aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-17 21:16:55 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-17 21:16:55 -0400
commit4b337c5f245b6587ba844ac7bb13c313a2912f7b (patch)
tree999c6a6580b76a083c8efb9dabff709d1c49fcd0 /drivers
parent492b057c426e4aa747484958e18e9da29003985d (diff)
parent3fe0344faf7fdcb158bd5c1a9aec960a8d70c8e8 (diff)
Merge commit 'origin/master' into next
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/base/core.c57
-rw-r--r--drivers/base/dd.c6
-rw-r--r--drivers/base/firmware_class.c27
-rw-r--r--drivers/base/node.c4
-rw-r--r--drivers/base/platform.c7
-rw-r--r--drivers/base/sys.c8
-rw-r--r--drivers/block/aoe/aoechr.c7
-rw-r--r--drivers/block/pktcdvd.c9
-rw-r--r--drivers/block/xen-blkfront.c14
-rw-r--r--drivers/char/hvcs.c6
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/core.c1
-rw-r--r--drivers/char/hw_random/tx4939-rng.c184
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c4
-rw-r--r--drivers/char/misc.c15
-rw-r--r--drivers/char/raw.c6
-rw-r--r--drivers/char/vt.c13
-rw-r--r--drivers/clocksource/acpi_pm.c1
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c61
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c68
-rw-r--r--drivers/dma/Kconfig8
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/txx9dmac.c1354
-rw-r--r--drivers/dma/txx9dmac.h307
-rw-r--r--drivers/eisa/eisa.ids5
-rw-r--r--drivers/eisa/pci_eisa.c2
-rw-r--r--drivers/eisa/virtual_root.c2
-rw-r--r--drivers/firewire/Makefile8
-rw-r--r--drivers/firewire/core-card.c (renamed from drivers/firewire/fw-card.c)25
-rw-r--r--drivers/firewire/core-cdev.c (renamed from drivers/firewire/fw-cdev.c)13
-rw-r--r--drivers/firewire/core-device.c (renamed from drivers/firewire/fw-device.c)154
-rw-r--r--drivers/firewire/core-iso.c (renamed from drivers/firewire/fw-iso.c)6
-rw-r--r--drivers/firewire/core-topology.c (renamed from drivers/firewire/fw-topology.c)24
-rw-r--r--drivers/firewire/core-transaction.c (renamed from drivers/firewire/fw-transaction.c)42
-rw-r--r--drivers/firewire/core.h293
-rw-r--r--drivers/firewire/fw-device.h202
-rw-r--r--drivers/firewire/fw-topology.h77
-rw-r--r--drivers/firewire/fw-transaction.h446
-rw-r--r--drivers/firewire/ohci.c (renamed from drivers/firewire/fw-ohci.c)19
-rw-r--r--drivers/firewire/ohci.h (renamed from drivers/firewire/fw-ohci.h)6
-rw-r--r--drivers/firewire/sbp2.c (renamed from drivers/firewire/fw-sbp2.c)64
-rw-r--r--drivers/firmware/memmap.c16
-rw-r--r--drivers/gpu/drm/Kconfig13
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c12
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c20
-rw-r--r--drivers/gpu/drm/drm_stub.c2
-rw-r--r--drivers/gpu/drm/drm_sysfs.c7
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c8
-rw-r--r--drivers/gpu/drm/radeon/Kconfig34
-rw-r--r--drivers/gpu/drm/radeon/Makefile12
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h578
-rw-r--r--drivers/gpu/drm/radeon/atom-bits.h48
-rw-r--r--drivers/gpu/drm/radeon/atom-names.h100
-rw-r--r--drivers/gpu/drm/radeon/atom-types.h42
-rw-r--r--drivers/gpu/drm/radeon/atom.c1215
-rw-r--r--drivers/gpu/drm/radeon/atom.h149
-rw-r--r--drivers/gpu/drm/radeon/atombios.h4785
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c695
-rw-r--r--drivers/gpu/drm/radeon/r100.c1524
-rw-r--r--drivers/gpu/drm/radeon/r300.c1116
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h52
-rw-r--r--drivers/gpu/drm/radeon/r420.c223
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h749
-rw-r--r--drivers/gpu/drm/radeon/r520.c234
-rw-r--r--drivers/gpu/drm/radeon/r600.c169
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h114
-rw-r--r--drivers/gpu/drm/radeon/radeon.h793
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c249
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h405
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1298
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c133
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c390
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c833
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c2481
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c603
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c249
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c252
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c813
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c692
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c217
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c1708
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c825
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c387
-rw-r--r--drivers/gpu/drm/radeon/radeon_fixed.h50
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c233
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c287
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c209
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c158
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c295
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1276
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1288
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h398
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c511
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h45
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h3570
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c485
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c653
-rw-r--r--drivers/gpu/drm/radeon/rs400.c411
-rw-r--r--drivers/gpu/drm/radeon/rs600.c324
-rw-r--r--drivers/gpu/drm/radeon/rs690.c181
-rw-r--r--drivers/gpu/drm/radeon/rs780.c102
-rw-r--r--drivers/gpu/drm/radeon/rv515.c504
-rw-r--r--drivers/gpu/drm/radeon/rv770.c124
-rw-r--r--drivers/gpu/drm/ttm/Makefile8
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c150
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c1698
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c561
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c454
-rw-r--r--drivers/gpu/drm/ttm/ttm_global.c114
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c234
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c50
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c635
-rw-r--r--drivers/hid/usbhid/hiddev.c7
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/hp_accel.c20
-rw-r--r--drivers/hwmon/lis3lv02d.c187
-rw-r--r--drivers/hwmon/lis3lv02d.h29
-rw-r--r--drivers/hwmon/lis3lv02d_spi.c1
-rw-r--r--drivers/i2c/busses/Kconfig13
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-at91.c8
-rw-r--r--drivers/i2c/busses/i2c-au1550.c2
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c3
-rw-r--r--drivers/i2c/busses/i2c-highlander.c2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c2
-rw-r--r--drivers/i2c/busses/i2c-ocores.c8
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c10
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c8
-rw-r--r--drivers/i2c/busses/i2c-pxa.c9
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c4
-rw-r--r--drivers/i2c/busses/i2c-stu300.c1029
-rw-r--r--drivers/i2c/busses/i2c-versatile.c6
-rw-r--r--drivers/ide/ide-pm.c6
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/ide/ide_platform.c2
-rw-r--r--drivers/ieee1394/csr1212.c2
-rw-r--r--drivers/ieee1394/eth1394.c16
-rw-r--r--drivers/ieee1394/nodemgr.c5
-rw-r--r--drivers/ieee1394/sbp2.c8
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c8
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/misc/pcspkr.c1
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/input/xen-kbdfront.c8
-rw-r--r--drivers/md/dm-ioctl.c1
-rw-r--r--drivers/media/Kconfig10
-rw-r--r--drivers/media/common/tuners/tuner-simple.c44
-rw-r--r--drivers/media/common/tuners/tuner-types.c59
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c58
-rw-r--r--drivers/media/common/tuners/xc5000.c264
-rw-r--r--drivers/media/dvb/b2c2/flexcop-common.h8
-rw-r--r--drivers/media/dvb/b2c2/flexcop-fe-tuner.c790
-rw-r--r--drivers/media/dvb/b2c2/flexcop-i2c.c2
-rw-r--r--drivers/media/dvb/b2c2/flexcop-misc.c20
-rw-r--r--drivers/media/dvb/bt8xx/bt878.c8
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c121
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c14
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c42
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.h4
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c10
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig1
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c94
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c31
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-common.c7
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h8
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h4
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c325
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.h1
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.c8
-rw-r--r--drivers/media/dvb/firewire/firedtv-1394.c4
-rw-r--r--drivers/media/dvb/firewire/firedtv-dvb.c2
-rw-r--r--drivers/media/dvb/firewire/firedtv-rc.c4
-rw-r--r--drivers/media/dvb/frontends/Kconfig22
-rw-r--r--drivers/media/dvb/frontends/Makefile4
-rw-r--r--drivers/media/dvb/frontends/af9013.c2
-rw-r--r--drivers/media/dvb/frontends/au8522_dig.c98
-rw-r--r--drivers/media/dvb/frontends/cx24116.c2
-rw-r--r--drivers/media/dvb/frontends/drx397xD.c4
-rw-r--r--drivers/media/dvb/frontends/isl6423.c308
-rw-r--r--drivers/media/dvb/frontends/isl6423.h63
-rw-r--r--drivers/media/dvb/frontends/lgdt3305.c17
-rw-r--r--drivers/media/dvb/frontends/lgs8gxx.c10
-rw-r--r--drivers/media/dvb/frontends/lnbp21.c2
-rw-r--r--drivers/media/dvb/frontends/mt312.c2
-rw-r--r--drivers/media/dvb/frontends/nxt200x.c6
-rw-r--r--drivers/media/dvb/frontends/or51132.c2
-rw-r--r--drivers/media/dvb/frontends/stv0900_priv.h2
-rw-r--r--drivers/media/dvb/frontends/stv090x.c4299
-rw-r--r--drivers/media/dvb/frontends/stv090x.h106
-rw-r--r--drivers/media/dvb/frontends/stv090x_priv.h269
-rw-r--r--drivers/media/dvb/frontends/stv090x_reg.h2373
-rw-r--r--drivers/media/dvb/frontends/stv6110x.c373
-rw-r--r--drivers/media/dvb/frontends/stv6110x.h71
-rw-r--r--drivers/media/dvb/frontends/stv6110x_priv.h75
-rw-r--r--drivers/media/dvb/frontends/stv6110x_reg.h82
-rw-r--r--drivers/media/dvb/frontends/tda10048.c312
-rw-r--r--drivers/media/dvb/frontends/tda10048.h21
-rw-r--r--drivers/media/dvb/siano/Makefile2
-rw-r--r--drivers/media/dvb/siano/sms-cards.c188
-rw-r--r--drivers/media/dvb/siano/sms-cards.h64
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c468
-rw-r--r--drivers/media/dvb/siano/smscoreapi.h488
-rw-r--r--drivers/media/dvb/siano/smsdvb.c372
-rw-r--r--drivers/media/dvb/siano/smsendian.c102
-rw-r--r--drivers/media/dvb/siano/smsendian.h32
-rw-r--r--drivers/media/dvb/siano/smsir.c301
-rw-r--r--drivers/media/dvb/siano/smsir.h93
-rw-r--r--drivers/media/dvb/siano/smssdio.c357
-rw-r--r--drivers/media/dvb/siano/smsusb.c75
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c124
-rw-r--r--drivers/media/dvb/ttpci/av7110_hw.c2
-rw-r--r--drivers/media/dvb/ttpci/av7110_v4l.c2
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c2
-rw-r--r--drivers/media/dvb/ttpci/budget.c85
-rw-r--r--drivers/media/radio/dsbr100.c109
-rw-r--r--drivers/media/radio/radio-mr800.c1
-rw-r--r--drivers/media/radio/radio-sf16fmi.c16
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c22
-rw-r--r--drivers/media/radio/radio-si470x.c1
-rw-r--r--drivers/media/video/Kconfig20
-rw-r--r--drivers/media/video/Makefile79
-rw-r--r--drivers/media/video/adv7343.c534
-rw-r--r--drivers/media/video/adv7343_regs.h185
-rw-r--r--drivers/media/video/au0828/au0828-cards.c4
-rw-r--r--drivers/media/video/au0828/au0828-core.c17
-rw-r--r--drivers/media/video/au0828/au0828-video.c8
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c14
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c21
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c6
-rw-r--r--drivers/media/video/cx18/cx18-audio.c44
-rw-r--r--drivers/media/video/cx18/cx18-av-core.c374
-rw-r--r--drivers/media/video/cx18/cx18-av-firmware.c82
-rw-r--r--drivers/media/video/cx18/cx18-av-vbi.c4
-rw-r--r--drivers/media/video/cx18/cx18-cards.c63
-rw-r--r--drivers/media/video/cx18/cx18-controls.c6
-rw-r--r--drivers/media/video/cx18/cx18-driver.c100
-rw-r--r--drivers/media/video/cx18/cx18-driver.h22
-rw-r--r--drivers/media/video/cx18/cx18-dvb.c54
-rw-r--r--drivers/media/video/cx18/cx18-fileops.c7
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c114
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.h2
-rw-r--r--drivers/media/video/cx18/cx18-queue.c85
-rw-r--r--drivers/media/video/cx18/cx18-streams.c44
-rw-r--r--drivers/media/video/cx18/cx18-streams.h20
-rw-r--r--drivers/media/video/cx18/cx18-version.h2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-avcore.c1
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c8
-rw-r--r--drivers/media/video/cx231xx/cx231xx-i2c.c32
-rw-r--r--drivers/media/video/cx231xx/cx231xx-input.c2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-vbi.c1
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h2
-rw-r--r--drivers/media/video/cx23885/cimax2.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c1
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c121
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c92
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c123
-rw-r--r--drivers/media/video/cx23885/cx23885-i2c.c12
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c14
-rw-r--r--drivers/media/video/cx23885/cx23885.h21
-rw-r--r--drivers/media/video/cx88/Makefile2
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c7
-rw-r--r--drivers/media/video/cx88/cx88-cards.c108
-rw-r--r--drivers/media/video/cx88/cx88-core.c27
-rw-r--r--drivers/media/video/cx88/cx88-dsp.c312
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c1
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c13
-rw-r--r--drivers/media/video/cx88/cx88-input.c6
-rw-r--r--drivers/media/video/cx88/cx88-tvaudio.c115
-rw-r--r--drivers/media/video/cx88/cx88-video.c16
-rw-r--r--drivers/media/video/cx88/cx88.h12
-rw-r--r--drivers/media/video/dabusb.c6
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c5
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c222
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c58
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c21
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c25
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c8
-rw-r--r--drivers/media/video/em28xx/em28xx-reg.h16
-rw-r--r--drivers/media/video/em28xx/em28xx.h9
-rw-r--r--drivers/media/video/gspca/finepix.c1
-rw-r--r--drivers/media/video/gspca/gspca.c199
-rw-r--r--drivers/media/video/gspca/gspca.h6
-rw-r--r--drivers/media/video/gspca/m5602/Makefile3
-rw-r--r--drivers/media/video/gspca/m5602/m5602_bridge.h26
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c44
-rw-r--r--drivers/media/video/gspca/m5602/m5602_mt9m111.c400
-rw-r--r--drivers/media/video/gspca/m5602/m5602_mt9m111.h805
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov7660.c227
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov7660.h279
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov9650.c222
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov9650.h57
-rw-r--r--drivers/media/video/gspca/m5602/m5602_po1030.c494
-rw-r--r--drivers/media/video/gspca/m5602/m5602_po1030.h439
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.c391
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.h93
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k83a.c473
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k83a.h280
-rw-r--r--drivers/media/video/gspca/m5602/m5602_sensor.h9
-rw-r--r--drivers/media/video/gspca/mr97310a.c8
-rw-r--r--drivers/media/video/gspca/ov519.c520
-rw-r--r--drivers/media/video/gspca/ov534.c277
-rw-r--r--drivers/media/video/gspca/sonixb.c2
-rw-r--r--drivers/media/video/gspca/sonixj.c66
-rw-r--r--drivers/media/video/gspca/spca500.c33
-rw-r--r--drivers/media/video/gspca/spca505.c14
-rw-r--r--drivers/media/video/gspca/spca508.c1934
-rw-r--r--drivers/media/video/gspca/spca561.c105
-rw-r--r--drivers/media/video/gspca/sq905.c1
-rw-r--r--drivers/media/video/gspca/sq905c.c1
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c76
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h10
-rw-r--r--drivers/media/video/gspca/sunplus.c33
-rw-r--r--drivers/media/video/gspca/t613.c2
-rw-r--r--drivers/media/video/gspca/vc032x.c22
-rw-r--r--drivers/media/video/gspca/zc3xx.c22
-rw-r--r--drivers/media/video/hexium_gemini.c2
-rw-r--r--drivers/media/video/hexium_orion.c2
-rw-r--r--drivers/media/video/ir-kbd-i2c.c222
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c9
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c36
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c2
-rw-r--r--drivers/media/video/mt9m001.c108
-rw-r--r--drivers/media/video/mt9m111.c73
-rw-r--r--drivers/media/video/mt9t031.c135
-rw-r--r--drivers/media/video/mt9v022.c138
-rw-r--r--drivers/media/video/mx1_camera.c50
-rw-r--r--drivers/media/video/mx3_camera.c46
-rw-r--r--drivers/media/video/mxb.c4
-rw-r--r--drivers/media/video/ov511.c45
-rw-r--r--drivers/media/video/ov511.h3
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-devattr.h23
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h3
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c74
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.c51
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-sysfs.c22
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c6
-rw-r--r--drivers/media/video/pwc/pwc-if.c6
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c2
-rw-r--r--drivers/media/video/pxa_camera.c126
-rw-r--r--drivers/media/video/s2255drv.c110
-rw-r--r--drivers/media/video/saa7134/Kconfig1
-rw-r--r--drivers/media/video/saa7134/Makefile3
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c450
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c18
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c26
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c14
-rw-r--r--drivers/media/video/saa7134/saa7134-i2c.c33
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c118
-rw-r--r--drivers/media/video/saa7134/saa7134-ts.c122
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c10
-rw-r--r--drivers/media/video/saa7134/saa7134.h29
-rw-r--r--drivers/media/video/se401.c882
-rw-r--r--drivers/media/video/se401.h7
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c27
-rw-r--r--drivers/media/video/soc_camera.c106
-rw-r--r--drivers/media/video/stk-webcam.c4
-rw-r--r--drivers/media/video/tda7432.c14
-rw-r--r--drivers/media/video/tea6415c.c1
-rw-r--r--drivers/media/video/tea6420.c1
-rw-r--r--drivers/media/video/ths7303.c151
-rw-r--r--drivers/media/video/tuner-core.c33
-rw-r--r--drivers/media/video/tveeprom.c6
-rw-r--r--drivers/media/video/tvp514x.c2
-rw-r--r--drivers/media/video/usbvideo/konicawc.c4
-rw-r--r--drivers/media/video/usbvideo/quickcam_messenger.c4
-rw-r--r--drivers/media/video/usbvision/usbvision-core.c14
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c4
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c35
-rw-r--r--drivers/media/video/uvc/uvc_driver.c68
-rw-r--r--drivers/media/video/uvc/uvc_queue.c14
-rw-r--r--drivers/media/video/uvc/uvc_status.c21
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c39
-rw-r--r--drivers/media/video/uvc/uvc_video.c17
-rw-r--r--drivers/media/video/uvc/uvcvideo.h5
-rw-r--r--drivers/media/video/v4l2-common.c4
-rw-r--r--drivers/media/video/v4l2-device.c31
-rw-r--r--drivers/media/video/videobuf-core.c6
-rw-r--r--drivers/media/video/videobuf-dma-contig.c108
-rw-r--r--drivers/media/video/videobuf-dma-sg.c19
-rw-r--r--drivers/media/video/vino.c6
-rw-r--r--drivers/media/video/zoran/zoran_card.c4
-rw-r--r--drivers/media/video/zr364xx.c6
-rw-r--r--drivers/mfd/htc-pasic3.c4
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/wm8400-core.c2
-rw-r--r--drivers/misc/c2port/core.c2
-rw-r--r--drivers/misc/sgi-gru/grufile.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--drivers/mtd/ubi/Kconfig13
-rw-r--r--drivers/mtd/ubi/Makefile2
-rw-r--r--drivers/mtd/ubi/build.c161
-rw-r--r--drivers/mtd/ubi/cdev.c32
-rw-r--r--drivers/mtd/ubi/eba.c99
-rw-r--r--drivers/mtd/ubi/gluebi.c378
-rw-r--r--drivers/mtd/ubi/io.c82
-rw-r--r--drivers/mtd/ubi/kapi.c117
-rw-r--r--drivers/mtd/ubi/ubi.h84
-rw-r--r--drivers/mtd/ubi/upd.c8
-rw-r--r--drivers/mtd/ubi/vmt.c65
-rw-r--r--drivers/mtd/ubi/wl.c179
-rw-r--r--drivers/net/Kconfig9
-rw-r--r--drivers/net/tun.c1
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig5
-rw-r--r--drivers/net/wireless/libertas/README12
-rw-r--r--drivers/net/wireless/libertas/if_spi.c8
-rw-r--r--drivers/net/wireless/libertas/if_spi.h3
-rw-r--r--drivers/net/wireless/libertas/if_usb.c8
-rw-r--r--drivers/net/xen-netfront.c10
-rw-r--r--drivers/parisc/eisa.c2
-rw-r--r--drivers/parisc/sba_iommu.c6
-rw-r--r--drivers/parport/parport_gsc.c4
-rw-r--r--drivers/pci/pci.c5
-rw-r--r--drivers/pci/pcie/portdrv_core.c2
-rw-r--r--drivers/pcmcia/ds.c20
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c9
-rw-r--r--drivers/s390/char/con3215.c16
-rw-r--r--drivers/s390/char/raw3270.c16
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tape_core.c22
-rw-r--r--drivers/s390/char/vmlogrdr.c12
-rw-r--r--drivers/s390/char/vmur.c16
-rw-r--r--drivers/s390/net/claw.c52
-rw-r--r--drivers/s390/net/lcs.c20
-rw-r--r--drivers/s390/net/lcs.h4
-rw-r--r--drivers/s390/net/netiucv.c47
-rw-r--r--drivers/s390/scsi/zfcp_aux.c10
-rw-r--r--drivers/s390/scsi/zfcp_def.h18
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h6
-rw-r--r--drivers/s390/scsi/zfcp_fc.c185
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c3
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c15
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c10
-rw-r--r--drivers/scsi/bnx2i/Kconfig1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/libsrp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c661
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c264
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c7
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c614
-rw-r--r--drivers/scsi/scsi_transport_spi.c18
-rw-r--r--drivers/serial/of_serial.c4
-rw-r--r--drivers/spi/spi_mpc83xx.c6
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/octeon/Kconfig12
-rw-r--r--drivers/staging/octeon/Makefile30
-rw-r--r--drivers/staging/octeon/cvmx-address.h274
-rw-r--r--drivers/staging/octeon/cvmx-asxx-defs.h475
-rw-r--r--drivers/staging/octeon/cvmx-cmd-queue.c306
-rw-r--r--drivers/staging/octeon/cvmx-cmd-queue.h617
-rw-r--r--drivers/staging/octeon/cvmx-config.h169
-rw-r--r--drivers/staging/octeon/cvmx-dbg-defs.h72
-rw-r--r--drivers/staging/octeon/cvmx-fau.h597
-rw-r--r--drivers/staging/octeon/cvmx-fpa-defs.h403
-rw-r--r--drivers/staging/octeon/cvmx-fpa.c183
-rw-r--r--drivers/staging/octeon/cvmx-fpa.h299
-rw-r--r--drivers/staging/octeon/cvmx-gmxx-defs.h2529
-rw-r--r--drivers/staging/octeon/cvmx-helper-board.c706
-rw-r--r--drivers/staging/octeon/cvmx-helper-board.h180
-rw-r--r--drivers/staging/octeon/cvmx-helper-fpa.c243
-rw-r--r--drivers/staging/octeon/cvmx-helper-fpa.h64
-rw-r--r--drivers/staging/octeon/cvmx-helper-loop.c85
-rw-r--r--drivers/staging/octeon/cvmx-helper-loop.h59
-rw-r--r--drivers/staging/octeon/cvmx-helper-npi.c113
-rw-r--r--drivers/staging/octeon/cvmx-helper-npi.h60
-rw-r--r--drivers/staging/octeon/cvmx-helper-rgmii.c525
-rw-r--r--drivers/staging/octeon/cvmx-helper-rgmii.h110
-rw-r--r--drivers/staging/octeon/cvmx-helper-sgmii.c550
-rw-r--r--drivers/staging/octeon/cvmx-helper-sgmii.h104
-rw-r--r--drivers/staging/octeon/cvmx-helper-spi.c195
-rw-r--r--drivers/staging/octeon/cvmx-helper-spi.h84
-rw-r--r--drivers/staging/octeon/cvmx-helper-util.c433
-rw-r--r--drivers/staging/octeon/cvmx-helper-util.h215
-rw-r--r--drivers/staging/octeon/cvmx-helper-xaui.c348
-rw-r--r--drivers/staging/octeon/cvmx-helper-xaui.h103
-rw-r--r--drivers/staging/octeon/cvmx-helper.c1058
-rw-r--r--drivers/staging/octeon/cvmx-helper.h227
-rw-r--r--drivers/staging/octeon/cvmx-interrupt-decodes.c371
-rw-r--r--drivers/staging/octeon/cvmx-interrupt-rsl.c140
-rw-r--r--drivers/staging/octeon/cvmx-ipd.h338
-rw-r--r--drivers/staging/octeon/cvmx-mdio.h506
-rw-r--r--drivers/staging/octeon/cvmx-packet.h65
-rw-r--r--drivers/staging/octeon/cvmx-pcsx-defs.h370
-rw-r--r--drivers/staging/octeon/cvmx-pcsxx-defs.h316
-rw-r--r--drivers/staging/octeon/cvmx-pip-defs.h1267
-rw-r--r--drivers/staging/octeon/cvmx-pip.h524
-rw-r--r--drivers/staging/octeon/cvmx-pko-defs.h1133
-rw-r--r--drivers/staging/octeon/cvmx-pko.c506
-rw-r--r--drivers/staging/octeon/cvmx-pko.h610
-rw-r--r--drivers/staging/octeon/cvmx-pow.h1982
-rw-r--r--drivers/staging/octeon/cvmx-scratch.h139
-rw-r--r--drivers/staging/octeon/cvmx-smix-defs.h178
-rw-r--r--drivers/staging/octeon/cvmx-spi.c667
-rw-r--r--drivers/staging/octeon/cvmx-spi.h269
-rw-r--r--drivers/staging/octeon/cvmx-spxx-defs.h347
-rw-r--r--drivers/staging/octeon/cvmx-srxx-defs.h126
-rw-r--r--drivers/staging/octeon/cvmx-stxx-defs.h292
-rw-r--r--drivers/staging/octeon/cvmx-wqe.h397
-rw-r--r--drivers/staging/octeon/ethernet-common.c328
-rw-r--r--drivers/staging/octeon/ethernet-common.h29
-rw-r--r--drivers/staging/octeon/ethernet-defines.h134
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c231
-rw-r--r--drivers/staging/octeon/ethernet-mdio.h46
-rw-r--r--drivers/staging/octeon/ethernet-mem.c198
-rw-r--r--drivers/staging/octeon/ethernet-mem.h29
-rw-r--r--drivers/staging/octeon/ethernet-proc.c256
-rw-r--r--drivers/staging/octeon/ethernet-proc.h29
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c397
-rw-r--r--drivers/staging/octeon/ethernet-rx.c505
-rw-r--r--drivers/staging/octeon/ethernet-rx.h33
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c129
-rw-r--r--drivers/staging/octeon/ethernet-spi.c323
-rw-r--r--drivers/staging/octeon/ethernet-tx.c634
-rw-r--r--drivers/staging/octeon/ethernet-tx.h32
-rw-r--r--drivers/staging/octeon/ethernet-util.h81
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c127
-rw-r--r--drivers/staging/octeon/ethernet.c507
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h127
-rw-r--r--drivers/staging/uc2322/aten2011.c4
-rw-r--r--drivers/thermal/thermal_sys.c4
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/atm/ueagle-atm.c9
-rw-r--r--drivers/usb/class/cdc-acm.c71
-rw-r--r--drivers/usb/class/cdc-acm.h2
-rw-r--r--drivers/usb/class/usblp.c6
-rw-r--r--drivers/usb/class/usbtmc.c6
-rw-r--r--drivers/usb/core/Kconfig16
-rw-r--r--drivers/usb/core/Makefile4
-rw-r--r--drivers/usb/core/config.c192
-rw-r--r--drivers/usb/core/driver.c56
-rw-r--r--drivers/usb/core/endpoint.c160
-rw-r--r--drivers/usb/core/file.c13
-rw-r--r--drivers/usb/core/hcd-pci.c244
-rw-r--r--drivers/usb/core/hcd.c220
-rw-r--r--drivers/usb/core/hcd.h55
-rw-r--r--drivers/usb/core/hub.c134
-rw-r--r--drivers/usb/core/hub.h3
-rw-r--r--drivers/usb/core/message.c194
-rw-r--r--drivers/usb/core/sysfs.c12
-rw-r--r--drivers/usb/core/urb.c12
-rw-r--r--drivers/usb/core/usb.c87
-rw-r--r--drivers/usb/core/usb.h13
-rw-r--r--drivers/usb/gadget/Kconfig53
-rw-r--r--drivers/usb/gadget/Makefile8
-rw-r--r--drivers/usb/gadget/at91_udc.c10
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c20
-rw-r--r--drivers/usb/gadget/audio.c302
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c6
-rw-r--r--drivers/usb/gadget/f_audio.c707
-rw-r--r--drivers/usb/gadget/f_rndis.c4
-rw-r--r--drivers/usb/gadget/file_storage.c93
-rw-r--r--drivers/usb/gadget/fsl_mx3_udc.c95
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c (renamed from drivers/usb/gadget/fsl_usb2_udc.c)69
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h18
-rw-r--r--drivers/usb/gadget/gadget_chips.h8
-rw-r--r--drivers/usb/gadget/goku_udc.c6
-rw-r--r--drivers/usb/gadget/imx_udc.c14
-rw-r--r--drivers/usb/gadget/inode.c14
-rw-r--r--drivers/usb/gadget/langwell_udc.c3373
-rw-r--r--drivers/usb/gadget/langwell_udc.h228
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c71
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h2
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c3269
-rw-r--r--drivers/usb/gadget/u_audio.c319
-rw-r--r--drivers/usb/gadget/u_audio.h56
-rw-r--r--drivers/usb/gadget/u_serial.c1
-rw-r--r--drivers/usb/host/Kconfig20
-rw-r--r--drivers/usb/host/Makefile2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c1
-rw-r--r--drivers/usb/host/ehci-fsl.c1
-rw-r--r--drivers/usb/host/ehci-hcd.c47
-rw-r--r--drivers/usb/host/ehci-hub.c4
-rw-r--r--drivers/usb/host/ehci-ixp4xx.c1
-rw-r--r--drivers/usb/host/ehci-orion.c3
-rw-r--r--drivers/usb/host/ehci-pci.c27
-rw-r--r--drivers/usb/host/ehci-ppc-of.c1
-rw-r--r--drivers/usb/host/ehci-ps3.c1
-rw-r--r--drivers/usb/host/ehci-q.c19
-rw-r--r--drivers/usb/host/ehci-sched.c8
-rw-r--r--drivers/usb/host/ehci.h1
-rw-r--r--drivers/usb/host/fhci-dbg.c2
-rw-r--r--drivers/usb/host/hwa-hc.c21
-rw-r--r--drivers/usb/host/ohci-dbg.c31
-rw-r--r--drivers/usb/host/ohci-hcd.c38
-rw-r--r--drivers/usb/host/ohci-pci.c24
-rw-r--r--drivers/usb/host/pci-quirks.c123
-rw-r--r--drivers/usb/host/r8a66597-hcd.c62
-rw-r--r--drivers/usb/host/r8a66597.h38
-rw-r--r--drivers/usb/host/uhci-hcd.c23
-rw-r--r--drivers/usb/host/uhci-q.c2
-rw-r--r--drivers/usb/host/xhci-dbg.c485
-rw-r--r--drivers/usb/host/xhci-ext-caps.h145
-rw-r--r--drivers/usb/host/xhci-hcd.c1274
-rw-r--r--drivers/usb/host/xhci-hub.c308
-rw-r--r--drivers/usb/host/xhci-mem.c769
-rw-r--r--drivers/usb/host/xhci-pci.c166
-rw-r--r--drivers/usb/host/xhci-ring.c1648
-rw-r--r--drivers/usb/host/xhci.h1157
-rw-r--r--drivers/usb/misc/iowarrior.c6
-rw-r--r--drivers/usb/misc/legousbtower.c6
-rw-r--r--drivers/usb/misc/sisusbvga/Kconfig2
-rw-r--r--drivers/usb/misc/usbtest.c39
-rw-r--r--drivers/usb/mon/mon_text.c2
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/blackfin.c11
-rw-r--r--drivers/usb/musb/cppi_dma.c34
-rw-r--r--drivers/usb/musb/cppi_dma.h6
-rw-r--r--drivers/usb/musb/davinci.c54
-rw-r--r--drivers/usb/musb/musb_core.c228
-rw-r--r--drivers/usb/musb/musb_core.h22
-rw-r--r--drivers/usb/musb/musb_gadget.c45
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c45
-rw-r--r--drivers/usb/musb/musb_host.c273
-rw-r--r--drivers/usb/musb/musb_host.h1
-rw-r--r--drivers/usb/musb/musb_virthub.c35
-rw-r--r--drivers/usb/musb/omap2430.c71
-rw-r--r--drivers/usb/musb/tusb6010.c70
-rw-r--r--drivers/usb/otg/Kconfig14
-rw-r--r--drivers/usb/otg/Makefile1
-rw-r--r--drivers/usb/otg/langwell_otg.c1915
-rw-r--r--drivers/usb/otg/nop-usb-xceiv.c25
-rw-r--r--drivers/usb/otg/twl4030-usb.c28
-rw-r--r--drivers/usb/serial/aircable.c5
-rw-r--r--drivers/usb/serial/belkin_sa.c7
-rw-r--r--drivers/usb/serial/bus.c27
-rw-r--r--drivers/usb/serial/cp210x.c6
-rw-r--r--drivers/usb/serial/cyberjack.c20
-rw-r--r--drivers/usb/serial/cypress_m8.c11
-rw-r--r--drivers/usb/serial/digi_acceleport.c20
-rw-r--r--drivers/usb/serial/empeg.c8
-rw-r--r--drivers/usb/serial/ftdi_sio.c179
-rw-r--r--drivers/usb/serial/ftdi_sio.h13
-rw-r--r--drivers/usb/serial/garmin_gps.c214
-rw-r--r--drivers/usb/serial/generic.c186
-rw-r--r--drivers/usb/serial/io_edgeport.c29
-rw-r--r--drivers/usb/serial/io_tables.h12
-rw-r--r--drivers/usb/serial/io_ti.c22
-rw-r--r--drivers/usb/serial/ipaq.c7
-rw-r--r--drivers/usb/serial/iuu_phoenix.c6
-rw-r--r--drivers/usb/serial/keyspan.c13
-rw-r--r--drivers/usb/serial/keyspan.h12
-rw-r--r--drivers/usb/serial/keyspan_pda.c4
-rw-r--r--drivers/usb/serial/kl5kusb105.c39
-rw-r--r--drivers/usb/serial/kobil_sct.c12
-rw-r--r--drivers/usb/serial/mct_u232.c13
-rw-r--r--drivers/usb/serial/mos7720.c9
-rw-r--r--drivers/usb/serial/mos7840.c358
-rw-r--r--drivers/usb/serial/omninet.c19
-rw-r--r--drivers/usb/serial/opticon.c14
-rw-r--r--drivers/usb/serial/option.c41
-rw-r--r--drivers/usb/serial/oti6858.c7
-rw-r--r--drivers/usb/serial/pl2303.c10
-rw-r--r--drivers/usb/serial/sierra.c185
-rw-r--r--drivers/usb/serial/spcp8x5.c5
-rw-r--r--drivers/usb/serial/symbolserial.c14
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c10
-rw-r--r--drivers/usb/serial/usb-serial.c53
-rw-r--r--drivers/usb/serial/usb_debug.c41
-rw-r--r--drivers/usb/serial/visor.c13
-rw-r--r--drivers/usb/serial/whiteheat.c6
-rw-r--r--drivers/usb/storage/initializers.c14
-rw-r--r--drivers/usb/storage/option_ms.c124
-rw-r--r--drivers/usb/storage/sierra_ms.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--drivers/video/acornfb.c38
-rw-r--r--drivers/video/atmel_lcdfb.c2
-rw-r--r--drivers/video/aty/radeon_pm.c3
-rw-r--r--drivers/video/bf54x-lq043fb.c15
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c15
-rw-r--r--drivers/video/carminefb.c2
-rw-r--r--drivers/video/chipsfb.c1
-rw-r--r--drivers/video/efifb.c5
-rw-r--r--drivers/video/fbmem.c31
-rw-r--r--drivers/video/igafb.c8
-rw-r--r--drivers/video/intelfb/intelfbdrv.c5
-rw-r--r--drivers/video/logo/Makefile12
-rw-r--r--drivers/video/logo/logo.c15
-rw-r--r--drivers/video/mb862xx/mb862xxfb.c2
-rw-r--r--drivers/video/modedb.c8
-rw-r--r--drivers/video/offb.c8
-rw-r--r--drivers/video/pm2fb.c2
-rw-r--r--drivers/video/s1d13xxxfb.c341
-rw-r--r--drivers/video/s3c-fb.c53
-rw-r--r--drivers/video/s3c2410fb.c67
-rw-r--r--drivers/video/s3c2410fb.h5
-rw-r--r--drivers/video/sis/sis_main.c4
-rw-r--r--drivers/video/stifb.c2
-rw-r--r--drivers/video/tcx.c27
-rw-r--r--drivers/video/vesafb.c15
-rw-r--r--drivers/video/xen-fbfront.c8
-rw-r--r--drivers/vlynq/Kconfig20
-rw-r--r--drivers/vlynq/Makefile5
-rw-r--r--drivers/vlynq/vlynq.c814
730 files changed, 114224 insertions, 10761 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 00cf9553f740..a442c8f29fc1 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -104,6 +104,8 @@ source "drivers/auxdisplay/Kconfig"
104 104
105source "drivers/uio/Kconfig" 105source "drivers/uio/Kconfig"
106 106
107source "drivers/vlynq/Kconfig"
108
107source "drivers/xen/Kconfig" 109source "drivers/xen/Kconfig"
108 110
109source "drivers/staging/Kconfig" 111source "drivers/staging/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 9e7d4e56c85b..00b44f4ccf03 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -105,6 +105,7 @@ obj-$(CONFIG_PPC_PS3) += ps3/
105obj-$(CONFIG_OF) += of/ 105obj-$(CONFIG_OF) += of/
106obj-$(CONFIG_SSB) += ssb/ 106obj-$(CONFIG_SSB) += ssb/
107obj-$(CONFIG_VIRTIO) += virtio/ 107obj-$(CONFIG_VIRTIO) += virtio/
108obj-$(CONFIG_VLYNQ) += vlynq/
108obj-$(CONFIG_STAGING) += staging/ 109obj-$(CONFIG_STAGING) += staging/
109obj-y += platform/ 110obj-y += platform/
110obj-y += ieee802154/ 111obj-y += ieee802154/
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 1977d4beb89e..7ecb1938e590 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -22,6 +22,7 @@
22#include <linux/kallsyms.h> 22#include <linux/kallsyms.h>
23#include <linux/semaphore.h> 23#include <linux/semaphore.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/async.h>
25 26
26#include "base.h" 27#include "base.h"
27#include "power/power.h" 28#include "power/power.h"
@@ -161,10 +162,18 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
161 struct device *dev = to_dev(kobj); 162 struct device *dev = to_dev(kobj);
162 int retval = 0; 163 int retval = 0;
163 164
164 /* add the major/minor if present */ 165 /* add device node properties if present */
165 if (MAJOR(dev->devt)) { 166 if (MAJOR(dev->devt)) {
167 const char *tmp;
168 const char *name;
169
166 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt)); 170 add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
167 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt)); 171 add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
172 name = device_get_nodename(dev, &tmp);
173 if (name) {
174 add_uevent_var(env, "DEVNAME=%s", name);
175 kfree(tmp);
176 }
168 } 177 }
169 178
170 if (dev->type && dev->type->name) 179 if (dev->type && dev->type->name)
@@ -874,7 +883,7 @@ int device_add(struct device *dev)
874 * the name, and force the use of dev_name() 883 * the name, and force the use of dev_name()
875 */ 884 */
876 if (dev->init_name) { 885 if (dev->init_name) {
877 dev_set_name(dev, dev->init_name); 886 dev_set_name(dev, "%s", dev->init_name);
878 dev->init_name = NULL; 887 dev->init_name = NULL;
879 } 888 }
880 889
@@ -1128,6 +1137,47 @@ static struct device *next_device(struct klist_iter *i)
1128} 1137}
1129 1138
1130/** 1139/**
1140 * device_get_nodename - path of device node file
1141 * @dev: device
1142 * @tmp: possibly allocated string
1143 *
1144 * Return the relative path of a possible device node.
1145 * Non-default names may need to allocate a memory to compose
1146 * a name. This memory is returned in tmp and needs to be
1147 * freed by the caller.
1148 */
1149const char *device_get_nodename(struct device *dev, const char **tmp)
1150{
1151 char *s;
1152
1153 *tmp = NULL;
1154
1155 /* the device type may provide a specific name */
1156 if (dev->type && dev->type->nodename)
1157 *tmp = dev->type->nodename(dev);
1158 if (*tmp)
1159 return *tmp;
1160
1161 /* the class may provide a specific name */
1162 if (dev->class && dev->class->nodename)
1163 *tmp = dev->class->nodename(dev);
1164 if (*tmp)
1165 return *tmp;
1166
1167 /* return name without allocation, tmp == NULL */
1168 if (strchr(dev_name(dev), '!') == NULL)
1169 return dev_name(dev);
1170
1171 /* replace '!' in the name with '/' */
1172 *tmp = kstrdup(dev_name(dev), GFP_KERNEL);
1173 if (!*tmp)
1174 return NULL;
1175 while ((s = strchr(*tmp, '!')))
1176 s[0] = '/';
1177 return *tmp;
1178}
1179
1180/**
1131 * device_for_each_child - device child iterator. 1181 * device_for_each_child - device child iterator.
1132 * @parent: parent struct device. 1182 * @parent: parent struct device.
1133 * @data: data for the callback. 1183 * @data: data for the callback.
@@ -1271,7 +1321,7 @@ struct device *__root_device_register(const char *name, struct module *owner)
1271 if (!root) 1321 if (!root)
1272 return ERR_PTR(err); 1322 return ERR_PTR(err);
1273 1323
1274 err = dev_set_name(&root->dev, name); 1324 err = dev_set_name(&root->dev, "%s", name);
1275 if (err) { 1325 if (err) {
1276 kfree(root); 1326 kfree(root);
1277 return ERR_PTR(err); 1327 return ERR_PTR(err);
@@ -1665,4 +1715,5 @@ void device_shutdown(void)
1665 kobject_put(sysfs_dev_char_kobj); 1715 kobject_put(sysfs_dev_char_kobj);
1666 kobject_put(sysfs_dev_block_kobj); 1716 kobject_put(sysfs_dev_block_kobj);
1667 kobject_put(dev_kobj); 1717 kobject_put(dev_kobj);
1718 async_synchronize_full();
1668} 1719}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 742cbe6b042b..f0106875f01d 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -226,7 +226,7 @@ static int __device_attach(struct device_driver *drv, void *data)
226 * pair is found, break out and return. 226 * pair is found, break out and return.
227 * 227 *
228 * Returns 1 if the device was bound to a driver; 228 * Returns 1 if the device was bound to a driver;
229 * 0 if no matching device was found; 229 * 0 if no matching driver was found;
230 * -ENODEV if the device is not registered. 230 * -ENODEV if the device is not registered.
231 * 231 *
232 * When called for a USB interface, @dev->parent->sem must be held. 232 * When called for a USB interface, @dev->parent->sem must be held.
@@ -320,6 +320,10 @@ static void __device_release_driver(struct device *dev)
320 devres_release_all(dev); 320 devres_release_all(dev);
321 dev->driver = NULL; 321 dev->driver = NULL;
322 klist_remove(&dev->p->knode_driver); 322 klist_remove(&dev->p->knode_driver);
323 if (dev->bus)
324 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
325 BUS_NOTIFY_UNBOUND_DRIVER,
326 dev);
323 } 327 }
324} 328}
325 329
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 8a267c427629..ddeb819c8f87 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -40,7 +40,7 @@ static int loading_timeout = 60; /* In seconds */
40static DEFINE_MUTEX(fw_lock); 40static DEFINE_MUTEX(fw_lock);
41 41
42struct firmware_priv { 42struct firmware_priv {
43 char fw_id[FIRMWARE_NAME_MAX]; 43 char *fw_id;
44 struct completion completion; 44 struct completion completion;
45 struct bin_attribute attr_data; 45 struct bin_attribute attr_data;
46 struct firmware *fw; 46 struct firmware *fw;
@@ -355,8 +355,9 @@ static void fw_dev_release(struct device *dev)
355 for (i = 0; i < fw_priv->nr_pages; i++) 355 for (i = 0; i < fw_priv->nr_pages; i++)
356 __free_page(fw_priv->pages[i]); 356 __free_page(fw_priv->pages[i]);
357 kfree(fw_priv->pages); 357 kfree(fw_priv->pages);
358 kfree(fw_priv->fw_id);
358 kfree(fw_priv); 359 kfree(fw_priv);
359 kfree(dev); 360 put_device(dev);
360 361
361 module_put(THIS_MODULE); 362 module_put(THIS_MODULE);
362} 363}
@@ -386,13 +387,19 @@ static int fw_register_device(struct device **dev_p, const char *fw_name,
386 387
387 init_completion(&fw_priv->completion); 388 init_completion(&fw_priv->completion);
388 fw_priv->attr_data = firmware_attr_data_tmpl; 389 fw_priv->attr_data = firmware_attr_data_tmpl;
389 strlcpy(fw_priv->fw_id, fw_name, FIRMWARE_NAME_MAX); 390 fw_priv->fw_id = kstrdup(fw_name, GFP_KERNEL);
391 if (!fw_priv->fw_id) {
392 dev_err(device, "%s: Firmware name allocation failed\n",
393 __func__);
394 retval = -ENOMEM;
395 goto error_kfree;
396 }
390 397
391 fw_priv->timeout.function = firmware_class_timeout; 398 fw_priv->timeout.function = firmware_class_timeout;
392 fw_priv->timeout.data = (u_long) fw_priv; 399 fw_priv->timeout.data = (u_long) fw_priv;
393 init_timer(&fw_priv->timeout); 400 init_timer(&fw_priv->timeout);
394 401
395 dev_set_name(f_dev, dev_name(device)); 402 dev_set_name(f_dev, "%s", dev_name(device));
396 f_dev->parent = device; 403 f_dev->parent = device;
397 f_dev->class = &firmware_class; 404 f_dev->class = &firmware_class;
398 dev_set_drvdata(f_dev, fw_priv); 405 dev_set_drvdata(f_dev, fw_priv);
@@ -400,14 +407,17 @@ static int fw_register_device(struct device **dev_p, const char *fw_name,
400 retval = device_register(f_dev); 407 retval = device_register(f_dev);
401 if (retval) { 408 if (retval) {
402 dev_err(device, "%s: device_register failed\n", __func__); 409 dev_err(device, "%s: device_register failed\n", __func__);
403 goto error_kfree; 410 put_device(f_dev);
411 goto error_kfree_fw_id;
404 } 412 }
405 *dev_p = f_dev; 413 *dev_p = f_dev;
406 return 0; 414 return 0;
407 415
416error_kfree_fw_id:
417 kfree(fw_priv->fw_id);
408error_kfree: 418error_kfree:
409 kfree(fw_priv);
410 kfree(f_dev); 419 kfree(f_dev);
420 kfree(fw_priv);
411 return retval; 421 return retval;
412} 422}
413 423
@@ -615,8 +625,9 @@ request_firmware_work_func(void *arg)
615 * @cont: function will be called asynchronously when the firmware 625 * @cont: function will be called asynchronously when the firmware
616 * request is over. 626 * request is over.
617 * 627 *
618 * Asynchronous variant of request_firmware() for contexts where 628 * Asynchronous variant of request_firmware() for user contexts where
619 * it is not possible to sleep. 629 * it is not possible to sleep for long time. It can't be called
630 * in atomic contexts.
620 **/ 631 **/
621int 632int
622request_firmware_nowait( 633request_firmware_nowait(
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 40b809742a1c..91d4087b4039 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -72,10 +72,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
72 "Node %d Inactive(anon): %8lu kB\n" 72 "Node %d Inactive(anon): %8lu kB\n"
73 "Node %d Active(file): %8lu kB\n" 73 "Node %d Active(file): %8lu kB\n"
74 "Node %d Inactive(file): %8lu kB\n" 74 "Node %d Inactive(file): %8lu kB\n"
75#ifdef CONFIG_UNEVICTABLE_LRU
76 "Node %d Unevictable: %8lu kB\n" 75 "Node %d Unevictable: %8lu kB\n"
77 "Node %d Mlocked: %8lu kB\n" 76 "Node %d Mlocked: %8lu kB\n"
78#endif
79#ifdef CONFIG_HIGHMEM 77#ifdef CONFIG_HIGHMEM
80 "Node %d HighTotal: %8lu kB\n" 78 "Node %d HighTotal: %8lu kB\n"
81 "Node %d HighFree: %8lu kB\n" 79 "Node %d HighFree: %8lu kB\n"
@@ -105,10 +103,8 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
105 nid, K(node_page_state(nid, NR_INACTIVE_ANON)), 103 nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
106 nid, K(node_page_state(nid, NR_ACTIVE_FILE)), 104 nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
107 nid, K(node_page_state(nid, NR_INACTIVE_FILE)), 105 nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
108#ifdef CONFIG_UNEVICTABLE_LRU
109 nid, K(node_page_state(nid, NR_UNEVICTABLE)), 106 nid, K(node_page_state(nid, NR_UNEVICTABLE)),
110 nid, K(node_page_state(nid, NR_MLOCK)), 107 nid, K(node_page_state(nid, NR_MLOCK)),
111#endif
112#ifdef CONFIG_HIGHMEM 108#ifdef CONFIG_HIGHMEM
113 nid, K(i.totalhigh), 109 nid, K(i.totalhigh),
114 nid, K(i.freehigh), 110 nid, K(i.freehigh),
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index ead3f64c41d0..81cb01bfc356 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -69,7 +69,8 @@ EXPORT_SYMBOL_GPL(platform_get_irq);
69 * @name: resource name 69 * @name: resource name
70 */ 70 */
71struct resource *platform_get_resource_byname(struct platform_device *dev, 71struct resource *platform_get_resource_byname(struct platform_device *dev,
72 unsigned int type, char *name) 72 unsigned int type,
73 const char *name)
73{ 74{
74 int i; 75 int i;
75 76
@@ -88,7 +89,7 @@ EXPORT_SYMBOL_GPL(platform_get_resource_byname);
88 * @dev: platform device 89 * @dev: platform device
89 * @name: IRQ name 90 * @name: IRQ name
90 */ 91 */
91int platform_get_irq_byname(struct platform_device *dev, char *name) 92int platform_get_irq_byname(struct platform_device *dev, const char *name)
92{ 93{
93 struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ, 94 struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ,
94 name); 95 name);
@@ -244,7 +245,7 @@ int platform_device_add(struct platform_device *pdev)
244 if (pdev->id != -1) 245 if (pdev->id != -1)
245 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); 246 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
246 else 247 else
247 dev_set_name(&pdev->dev, pdev->name); 248 dev_set_name(&pdev->dev, "%s", pdev->name);
248 249
249 for (i = 0; i < pdev->num_resources; i++) { 250 for (i = 0; i < pdev->num_resources; i++) {
250 struct resource *p, *r = &pdev->resource[i]; 251 struct resource *p, *r = &pdev->resource[i];
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 9742a78c9fe4..79a9ae5238ac 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -131,6 +131,8 @@ static struct kset *system_kset;
131 131
132int sysdev_class_register(struct sysdev_class *cls) 132int sysdev_class_register(struct sysdev_class *cls)
133{ 133{
134 int retval;
135
134 pr_debug("Registering sysdev class '%s'\n", cls->name); 136 pr_debug("Registering sysdev class '%s'\n", cls->name);
135 137
136 INIT_LIST_HEAD(&cls->drivers); 138 INIT_LIST_HEAD(&cls->drivers);
@@ -138,7 +140,11 @@ int sysdev_class_register(struct sysdev_class *cls)
138 cls->kset.kobj.parent = &system_kset->kobj; 140 cls->kset.kobj.parent = &system_kset->kobj;
139 cls->kset.kobj.ktype = &ktype_sysdev_class; 141 cls->kset.kobj.ktype = &ktype_sysdev_class;
140 cls->kset.kobj.kset = system_kset; 142 cls->kset.kobj.kset = system_kset;
141 kobject_set_name(&cls->kset.kobj, cls->name); 143
144 retval = kobject_set_name(&cls->kset.kobj, "%s", cls->name);
145 if (retval)
146 return retval;
147
142 return kset_register(&cls->kset); 148 return kset_register(&cls->kset);
143} 149}
144 150
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 200efc4d2c1e..19888354188f 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -266,6 +266,11 @@ static const struct file_operations aoe_fops = {
266 .owner = THIS_MODULE, 266 .owner = THIS_MODULE,
267}; 267};
268 268
269static char *aoe_nodename(struct device *dev)
270{
271 return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
272}
273
269int __init 274int __init
270aoechr_init(void) 275aoechr_init(void)
271{ 276{
@@ -283,6 +288,8 @@ aoechr_init(void)
283 unregister_chrdev(AOE_MAJOR, "aoechr"); 288 unregister_chrdev(AOE_MAJOR, "aoechr");
284 return PTR_ERR(aoe_class); 289 return PTR_ERR(aoe_class);
285 } 290 }
291 aoe_class->nodename = aoe_nodename;
292
286 for (i = 0; i < ARRAY_SIZE(chardevs); ++i) 293 for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
287 device_create(aoe_class, NULL, 294 device_create(aoe_class, NULL,
288 MKDEV(AOE_MAJOR, chardevs[i].minor), NULL, 295 MKDEV(AOE_MAJOR, chardevs[i].minor), NULL,
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index d57f11759480..83650e00632d 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -430,7 +430,7 @@ static void pkt_sysfs_cleanup(void)
430/******************************************************************** 430/********************************************************************
431 entries in debugfs 431 entries in debugfs
432 432
433 /debugfs/pktcdvd[0-7]/ 433 /sys/kernel/debug/pktcdvd[0-7]/
434 info 434 info
435 435
436 *******************************************************************/ 436 *******************************************************************/
@@ -2855,6 +2855,11 @@ static struct block_device_operations pktcdvd_ops = {
2855 .media_changed = pkt_media_changed, 2855 .media_changed = pkt_media_changed,
2856}; 2856};
2857 2857
2858static char *pktcdvd_nodename(struct gendisk *gd)
2859{
2860 return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
2861}
2862
2858/* 2863/*
2859 * Set up mapping from pktcdvd device to CD-ROM device. 2864 * Set up mapping from pktcdvd device to CD-ROM device.
2860 */ 2865 */
@@ -2907,6 +2912,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
2907 disk->fops = &pktcdvd_ops; 2912 disk->fops = &pktcdvd_ops;
2908 disk->flags = GENHD_FL_REMOVABLE; 2913 disk->flags = GENHD_FL_REMOVABLE;
2909 strcpy(disk->disk_name, pd->name); 2914 strcpy(disk->disk_name, pd->name);
2915 disk->nodename = pktcdvd_nodename;
2910 disk->private_data = pd; 2916 disk->private_data = pd;
2911 disk->queue = blk_alloc_queue(GFP_KERNEL); 2917 disk->queue = blk_alloc_queue(GFP_KERNEL);
2912 if (!disk->queue) 2918 if (!disk->queue)
@@ -3062,6 +3068,7 @@ static const struct file_operations pkt_ctl_fops = {
3062static struct miscdevice pkt_misc = { 3068static struct miscdevice pkt_misc = {
3063 .minor = MISC_DYNAMIC_MINOR, 3069 .minor = MISC_DYNAMIC_MINOR,
3064 .name = DRIVER_NAME, 3070 .name = DRIVER_NAME,
3071 .name = "pktcdvd/control",
3065 .fops = &pkt_ctl_fops 3072 .fops = &pkt_ctl_fops
3066}; 3073};
3067 3074
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c1996829d5ec..e53284767f7c 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -753,12 +753,12 @@ static int blkfront_probe(struct xenbus_device *dev,
753 753
754 /* Front end dir is a number, which is used as the id. */ 754 /* Front end dir is a number, which is used as the id. */
755 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); 755 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
756 dev->dev.driver_data = info; 756 dev_set_drvdata(&dev->dev, info);
757 757
758 err = talk_to_backend(dev, info); 758 err = talk_to_backend(dev, info);
759 if (err) { 759 if (err) {
760 kfree(info); 760 kfree(info);
761 dev->dev.driver_data = NULL; 761 dev_set_drvdata(&dev->dev, NULL);
762 return err; 762 return err;
763 } 763 }
764 764
@@ -843,7 +843,7 @@ static int blkif_recover(struct blkfront_info *info)
843 */ 843 */
844static int blkfront_resume(struct xenbus_device *dev) 844static int blkfront_resume(struct xenbus_device *dev)
845{ 845{
846 struct blkfront_info *info = dev->dev.driver_data; 846 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
847 int err; 847 int err;
848 848
849 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); 849 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
@@ -922,7 +922,7 @@ static void blkfront_connect(struct blkfront_info *info)
922 */ 922 */
923static void blkfront_closing(struct xenbus_device *dev) 923static void blkfront_closing(struct xenbus_device *dev)
924{ 924{
925 struct blkfront_info *info = dev->dev.driver_data; 925 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
926 unsigned long flags; 926 unsigned long flags;
927 927
928 dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename); 928 dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename);
@@ -957,7 +957,7 @@ static void blkfront_closing(struct xenbus_device *dev)
957static void backend_changed(struct xenbus_device *dev, 957static void backend_changed(struct xenbus_device *dev,
958 enum xenbus_state backend_state) 958 enum xenbus_state backend_state)
959{ 959{
960 struct blkfront_info *info = dev->dev.driver_data; 960 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
961 struct block_device *bd; 961 struct block_device *bd;
962 962
963 dev_dbg(&dev->dev, "blkfront:backend_changed.\n"); 963 dev_dbg(&dev->dev, "blkfront:backend_changed.\n");
@@ -997,7 +997,7 @@ static void backend_changed(struct xenbus_device *dev,
997 997
998static int blkfront_remove(struct xenbus_device *dev) 998static int blkfront_remove(struct xenbus_device *dev)
999{ 999{
1000 struct blkfront_info *info = dev->dev.driver_data; 1000 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
1001 1001
1002 dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename); 1002 dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename);
1003 1003
@@ -1010,7 +1010,7 @@ static int blkfront_remove(struct xenbus_device *dev)
1010 1010
1011static int blkfront_is_ready(struct xenbus_device *dev) 1011static int blkfront_is_ready(struct xenbus_device *dev)
1012{ 1012{
1013 struct blkfront_info *info = dev->dev.driver_data; 1013 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
1014 1014
1015 return info->is_ready; 1015 return info->is_ready;
1016} 1016}
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 2724d628527f..266b858b8f85 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -347,7 +347,7 @@ static void __exit hvcs_module_exit(void);
347 347
348static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod) 348static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
349{ 349{
350 return viod->dev.driver_data; 350 return dev_get_drvdata(&viod->dev);
351} 351}
352/* The sysfs interface for the driver and devices */ 352/* The sysfs interface for the driver and devices */
353 353
@@ -785,7 +785,7 @@ static int __devinit hvcs_probe(
785 kref_init(&hvcsd->kref); 785 kref_init(&hvcsd->kref);
786 786
787 hvcsd->vdev = dev; 787 hvcsd->vdev = dev;
788 dev->dev.driver_data = hvcsd; 788 dev_set_drvdata(&dev->dev, hvcsd);
789 789
790 hvcsd->index = index; 790 hvcsd->index = index;
791 791
@@ -831,7 +831,7 @@ static int __devinit hvcs_probe(
831 831
832static int __devexit hvcs_remove(struct vio_dev *dev) 832static int __devexit hvcs_remove(struct vio_dev *dev)
833{ 833{
834 struct hvcs_struct *hvcsd = dev->dev.driver_data; 834 struct hvcs_struct *hvcsd = dev_get_drvdata(&dev->dev);
835 unsigned long flags; 835 unsigned long flags;
836 struct tty_struct *tty; 836 struct tty_struct *tty;
837 837
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index f4b3f7293feb..ce66a70184f7 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -149,6 +149,19 @@ config HW_RANDOM_VIRTIO
149 To compile this driver as a module, choose M here: the 149 To compile this driver as a module, choose M here: the
150 module will be called virtio-rng. If unsure, say N. 150 module will be called virtio-rng. If unsure, say N.
151 151
152config HW_RANDOM_TX4939
153 tristate "TX4939 Random Number Generator support"
154 depends on HW_RANDOM && SOC_TX4939
155 default HW_RANDOM
156 ---help---
157 This driver provides kernel-side support for the Random Number
158 Generator hardware found on TX4939 SoC.
159
160 To compile this driver as a module, choose M here: the
161 module will be called tx4939-rng.
162
163 If unsure, say Y.
164
152config HW_RANDOM_MXC_RNGA 165config HW_RANDOM_MXC_RNGA
153 tristate "Freescale i.MX RNGA Random Number Generator" 166 tristate "Freescale i.MX RNGA Random Number Generator"
154 depends on HW_RANDOM && ARCH_HAS_RNGA 167 depends on HW_RANDOM && ARCH_HAS_RNGA
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index fd1ecd2f6731..676828ba8123 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -15,4 +15,5 @@ obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
15obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o 15obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
16obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o 16obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
17obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o 17obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
18obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
18obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o 19obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index e5d583c84e4f..fc93e2fc7c71 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -153,6 +153,7 @@ static const struct file_operations rng_chrdev_ops = {
153static struct miscdevice rng_miscdev = { 153static struct miscdevice rng_miscdev = {
154 .minor = RNG_MISCDEV_MINOR, 154 .minor = RNG_MISCDEV_MINOR,
155 .name = RNG_MODULE_NAME, 155 .name = RNG_MODULE_NAME,
156 .devnode = "hwrng",
156 .fops = &rng_chrdev_ops, 157 .fops = &rng_chrdev_ops,
157}; 158};
158 159
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
new file mode 100644
index 000000000000..544d9085a8e8
--- /dev/null
+++ b/drivers/char/hw_random/tx4939-rng.c
@@ -0,0 +1,184 @@
1/*
2 * RNG driver for TX4939 Random Number Generators (RNG)
3 *
4 * Copyright (C) 2009 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/delay.h>
14#include <linux/io.h>
15#include <linux/platform_device.h>
16#include <linux/hw_random.h>
17
18#define TX4939_RNG_RCSR 0x00000000
19#define TX4939_RNG_ROR(n) (0x00000018 + (n) * 8)
20
21#define TX4939_RNG_RCSR_INTE 0x00000008
22#define TX4939_RNG_RCSR_RST 0x00000004
23#define TX4939_RNG_RCSR_FIN 0x00000002
24#define TX4939_RNG_RCSR_ST 0x00000001
25
26struct tx4939_rng {
27 struct hwrng rng;
28 void __iomem *base;
29 u64 databuf[3];
30 unsigned int data_avail;
31};
32
33static void rng_io_start(void)
34{
35#ifndef CONFIG_64BIT
36 /*
37 * readq is reading a 64-bit register using a 64-bit load. On
38 * a 32-bit kernel however interrupts or any other processor
39 * exception would clobber the upper 32-bit of the processor
40 * register so interrupts need to be disabled.
41 */
42 local_irq_disable();
43#endif
44}
45
46static void rng_io_end(void)
47{
48#ifndef CONFIG_64BIT
49 local_irq_enable();
50#endif
51}
52
53static u64 read_rng(void __iomem *base, unsigned int offset)
54{
55 return ____raw_readq(base + offset);
56}
57
58static void write_rng(u64 val, void __iomem *base, unsigned int offset)
59{
60 return ____raw_writeq(val, base + offset);
61}
62
63static int tx4939_rng_data_present(struct hwrng *rng, int wait)
64{
65 struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
66 int i;
67
68 if (rngdev->data_avail)
69 return rngdev->data_avail;
70 for (i = 0; i < 20; i++) {
71 rng_io_start();
72 if (!(read_rng(rngdev->base, TX4939_RNG_RCSR)
73 & TX4939_RNG_RCSR_ST)) {
74 rngdev->databuf[0] =
75 read_rng(rngdev->base, TX4939_RNG_ROR(0));
76 rngdev->databuf[1] =
77 read_rng(rngdev->base, TX4939_RNG_ROR(1));
78 rngdev->databuf[2] =
79 read_rng(rngdev->base, TX4939_RNG_ROR(2));
80 rngdev->data_avail =
81 sizeof(rngdev->databuf) / sizeof(u32);
82 /* Start RNG */
83 write_rng(TX4939_RNG_RCSR_ST,
84 rngdev->base, TX4939_RNG_RCSR);
85 wait = 0;
86 }
87 rng_io_end();
88 if (!wait)
89 break;
90 /* 90 bus clock cycles by default for generation */
91 ndelay(90 * 5);
92 }
93 return rngdev->data_avail;
94}
95
96static int tx4939_rng_data_read(struct hwrng *rng, u32 *buffer)
97{
98 struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
99
100 rngdev->data_avail--;
101 *buffer = *((u32 *)&rngdev->databuf + rngdev->data_avail);
102 return sizeof(u32);
103}
104
105static int __init tx4939_rng_probe(struct platform_device *dev)
106{
107 struct tx4939_rng *rngdev;
108 struct resource *r;
109 int i;
110
111 r = platform_get_resource(dev, IORESOURCE_MEM, 0);
112 if (!r)
113 return -EBUSY;
114 rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
115 if (!rngdev)
116 return -ENOMEM;
117 if (!devm_request_mem_region(&dev->dev, r->start, resource_size(r),
118 dev_name(&dev->dev)))
119 return -EBUSY;
120 rngdev->base = devm_ioremap(&dev->dev, r->start, resource_size(r));
121 if (!rngdev->base)
122 return -EBUSY;
123
124 rngdev->rng.name = dev_name(&dev->dev);
125 rngdev->rng.data_present = tx4939_rng_data_present;
126 rngdev->rng.data_read = tx4939_rng_data_read;
127
128 rng_io_start();
129 /* Reset RNG */
130 write_rng(TX4939_RNG_RCSR_RST, rngdev->base, TX4939_RNG_RCSR);
131 write_rng(0, rngdev->base, TX4939_RNG_RCSR);
132 /* Start RNG */
133 write_rng(TX4939_RNG_RCSR_ST, rngdev->base, TX4939_RNG_RCSR);
134 rng_io_end();
135 /*
136 * Drop first two results. From the datasheet:
137 * The quality of the random numbers generated immediately
138 * after reset can be insufficient. Therefore, do not use
139 * random numbers obtained from the first and second
140 * generations; use the ones from the third or subsequent
141 * generation.
142 */
143 for (i = 0; i < 2; i++) {
144 rngdev->data_avail = 0;
145 if (!tx4939_rng_data_present(&rngdev->rng, 1))
146 return -EIO;
147 }
148
149 platform_set_drvdata(dev, rngdev);
150 return hwrng_register(&rngdev->rng);
151}
152
153static int __exit tx4939_rng_remove(struct platform_device *dev)
154{
155 struct tx4939_rng *rngdev = platform_get_drvdata(dev);
156
157 hwrng_unregister(&rngdev->rng);
158 platform_set_drvdata(dev, NULL);
159 return 0;
160}
161
162static struct platform_driver tx4939_rng_driver = {
163 .driver = {
164 .name = "tx4939-rng",
165 .owner = THIS_MODULE,
166 },
167 .remove = tx4939_rng_remove,
168};
169
170static int __init tx4939rng_init(void)
171{
172 return platform_driver_probe(&tx4939_rng_driver, tx4939_rng_probe);
173}
174
175static void __exit tx4939rng_exit(void)
176{
177 platform_driver_unregister(&tx4939_rng_driver);
178}
179
180module_init(tx4939rng_init);
181module_exit(tx4939rng_exit);
182
183MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for TX4939");
184MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 259644646b82..d2e698096ace 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2375,14 +2375,14 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
2375 info->io.addr_data, info->io.regsize, info->io.regspacing, 2375 info->io.addr_data, info->io.regsize, info->io.regspacing,
2376 info->irq); 2376 info->irq);
2377 2377
2378 dev->dev.driver_data = (void *) info; 2378 dev_set_drvdata(&dev->dev, info);
2379 2379
2380 return try_smi_init(info); 2380 return try_smi_init(info);
2381} 2381}
2382 2382
2383static int __devexit ipmi_of_remove(struct of_device *dev) 2383static int __devexit ipmi_of_remove(struct of_device *dev)
2384{ 2384{
2385 cleanup_one_si(dev->dev.driver_data); 2385 cleanup_one_si(dev_get_drvdata(&dev->dev));
2386 return 0; 2386 return 0;
2387} 2387}
2388 2388
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index a5e0db9d7662..62c99fa59e2b 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -168,7 +168,6 @@ static const struct file_operations misc_fops = {
168 .open = misc_open, 168 .open = misc_open,
169}; 169};
170 170
171
172/** 171/**
173 * misc_register - register a miscellaneous device 172 * misc_register - register a miscellaneous device
174 * @misc: device structure 173 * @misc: device structure
@@ -217,8 +216,8 @@ int misc_register(struct miscdevice * misc)
217 misc_minors[misc->minor >> 3] |= 1 << (misc->minor & 7); 216 misc_minors[misc->minor >> 3] |= 1 << (misc->minor & 7);
218 dev = MKDEV(MISC_MAJOR, misc->minor); 217 dev = MKDEV(MISC_MAJOR, misc->minor);
219 218
220 misc->this_device = device_create(misc_class, misc->parent, dev, NULL, 219 misc->this_device = device_create(misc_class, misc->parent, dev,
221 "%s", misc->name); 220 misc, "%s", misc->name);
222 if (IS_ERR(misc->this_device)) { 221 if (IS_ERR(misc->this_device)) {
223 err = PTR_ERR(misc->this_device); 222 err = PTR_ERR(misc->this_device);
224 goto out; 223 goto out;
@@ -264,6 +263,15 @@ int misc_deregister(struct miscdevice *misc)
264EXPORT_SYMBOL(misc_register); 263EXPORT_SYMBOL(misc_register);
265EXPORT_SYMBOL(misc_deregister); 264EXPORT_SYMBOL(misc_deregister);
266 265
266static char *misc_nodename(struct device *dev)
267{
268 struct miscdevice *c = dev_get_drvdata(dev);
269
270 if (c->devnode)
271 return kstrdup(c->devnode, GFP_KERNEL);
272 return NULL;
273}
274
267static int __init misc_init(void) 275static int __init misc_init(void)
268{ 276{
269 int err; 277 int err;
@@ -279,6 +287,7 @@ static int __init misc_init(void)
279 err = -EIO; 287 err = -EIO;
280 if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) 288 if (register_chrdev(MISC_MAJOR,"misc",&misc_fops))
281 goto fail_printk; 289 goto fail_printk;
290 misc_class->nodename = misc_nodename;
282 return 0; 291 return 0;
283 292
284fail_printk: 293fail_printk:
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index db32f0e4c7dd..05f9d18b9361 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -261,6 +261,11 @@ static const struct file_operations raw_ctl_fops = {
261 261
262static struct cdev raw_cdev; 262static struct cdev raw_cdev;
263 263
264static char *raw_nodename(struct device *dev)
265{
266 return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
267}
268
264static int __init raw_init(void) 269static int __init raw_init(void)
265{ 270{
266 dev_t dev = MKDEV(RAW_MAJOR, 0); 271 dev_t dev = MKDEV(RAW_MAJOR, 0);
@@ -284,6 +289,7 @@ static int __init raw_init(void)
284 ret = PTR_ERR(raw_class); 289 ret = PTR_ERR(raw_class);
285 goto error_region; 290 goto error_region;
286 } 291 }
292 raw_class->nodename = raw_nodename;
287 device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl"); 293 device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl");
288 294
289 return 0; 295 return 0;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index c796a86ab7f3..d9113b4c76e3 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -171,8 +171,9 @@ int do_poke_blanked_console;
171int console_blanked; 171int console_blanked;
172 172
173static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */ 173static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
174static int blankinterval = 10*60*HZ;
175static int vesa_off_interval; 174static int vesa_off_interval;
175static int blankinterval = 10*60;
176core_param(consoleblank, blankinterval, int, 0444);
176 177
177static DECLARE_WORK(console_work, console_callback); 178static DECLARE_WORK(console_work, console_callback);
178 179
@@ -1485,7 +1486,7 @@ static void setterm_command(struct vc_data *vc)
1485 update_attr(vc); 1486 update_attr(vc);
1486 break; 1487 break;
1487 case 9: /* set blanking interval */ 1488 case 9: /* set blanking interval */
1488 blankinterval = ((vc->vc_par[1] < 60) ? vc->vc_par[1] : 60) * 60 * HZ; 1489 blankinterval = ((vc->vc_par[1] < 60) ? vc->vc_par[1] : 60) * 60;
1489 poke_blanked_console(); 1490 poke_blanked_console();
1490 break; 1491 break;
1491 case 10: /* set bell frequency in Hz */ 1492 case 10: /* set bell frequency in Hz */
@@ -2871,7 +2872,7 @@ static int __init con_init(void)
2871 2872
2872 if (blankinterval) { 2873 if (blankinterval) {
2873 blank_state = blank_normal_wait; 2874 blank_state = blank_normal_wait;
2874 mod_timer(&console_timer, jiffies + blankinterval); 2875 mod_timer(&console_timer, jiffies + (blankinterval * HZ));
2875 } 2876 }
2876 2877
2877 for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) { 2878 for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) {
@@ -3677,7 +3678,7 @@ void do_unblank_screen(int leaving_gfx)
3677 return; /* but leave console_blanked != 0 */ 3678 return; /* but leave console_blanked != 0 */
3678 3679
3679 if (blankinterval) { 3680 if (blankinterval) {
3680 mod_timer(&console_timer, jiffies + blankinterval); 3681 mod_timer(&console_timer, jiffies + (blankinterval * HZ));
3681 blank_state = blank_normal_wait; 3682 blank_state = blank_normal_wait;
3682 } 3683 }
3683 3684
@@ -3711,7 +3712,7 @@ void unblank_screen(void)
3711static void blank_screen_t(unsigned long dummy) 3712static void blank_screen_t(unsigned long dummy)
3712{ 3713{
3713 if (unlikely(!keventd_up())) { 3714 if (unlikely(!keventd_up())) {
3714 mod_timer(&console_timer, jiffies + blankinterval); 3715 mod_timer(&console_timer, jiffies + (blankinterval * HZ));
3715 return; 3716 return;
3716 } 3717 }
3717 blank_timer_expired = 1; 3718 blank_timer_expired = 1;
@@ -3741,7 +3742,7 @@ void poke_blanked_console(void)
3741 if (console_blanked) 3742 if (console_blanked)
3742 unblank_screen(); 3743 unblank_screen();
3743 else if (blankinterval) { 3744 else if (blankinterval) {
3744 mod_timer(&console_timer, jiffies + blankinterval); 3745 mod_timer(&console_timer, jiffies + (blankinterval * HZ));
3745 blank_state = blank_normal_wait; 3746 blank_state = blank_normal_wait;
3746 } 3747 }
3747} 3748}
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 40bd8c61c7d7..72a633a6ec98 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/acpi_pmtmr.h> 19#include <linux/acpi_pmtmr.h>
20#include <linux/clocksource.h> 20#include <linux/clocksource.h>
21#include <linux/timex.h>
21#include <linux/errno.h> 22#include <linux/errno.h>
22#include <linux/init.h> 23#include <linux/init.h>
23#include <linux/pci.h> 24#include <linux/pci.h>
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 7a74d175287b..7fc58af748b4 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -42,27 +42,12 @@
42 * this governor will not work. 42 * this governor will not work.
43 * All times here are in uS. 43 * All times here are in uS.
44 */ 44 */
45static unsigned int def_sampling_rate;
46#define MIN_SAMPLING_RATE_RATIO (2) 45#define MIN_SAMPLING_RATE_RATIO (2)
47/* for correct statistics, we need at least 10 ticks between each measure */
48#define MIN_STAT_SAMPLING_RATE \
49 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
50#define MIN_SAMPLING_RATE \
51 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
52/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
53 * Define the minimal settable sampling rate to the greater of:
54 * - "HW transition latency" * 100 (same as default sampling / 10)
55 * - MIN_STAT_SAMPLING_RATE
56 * To avoid that userspace shoots itself.
57*/
58static unsigned int minimum_sampling_rate(void)
59{
60 return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
61}
62 46
63/* This will also vanish soon with removing sampling_rate_max */ 47static unsigned int min_sampling_rate;
64#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 48
65#define LATENCY_MULTIPLIER (1000) 49#define LATENCY_MULTIPLIER (1000)
50#define MIN_LATENCY_MULTIPLIER (100)
66#define DEF_SAMPLING_DOWN_FACTOR (1) 51#define DEF_SAMPLING_DOWN_FACTOR (1)
67#define MAX_SAMPLING_DOWN_FACTOR (10) 52#define MAX_SAMPLING_DOWN_FACTOR (10)
68#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 53#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
@@ -182,27 +167,14 @@ static struct notifier_block dbs_cpufreq_notifier_block = {
182/************************** sysfs interface ************************/ 167/************************** sysfs interface ************************/
183static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 168static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
184{ 169{
185 static int print_once; 170 printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
186 171 "sysfs file is deprecated - used by: %s\n", current->comm);
187 if (!print_once) { 172 return sprintf(buf, "%u\n", -1U);
188 printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
189 "sysfs file is deprecated - used by: %s\n",
190 current->comm);
191 print_once = 1;
192 }
193 return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
194} 173}
195 174
196static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) 175static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
197{ 176{
198 static int print_once; 177 return sprintf(buf, "%u\n", min_sampling_rate);
199
200 if (!print_once) {
201 printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
202 "sysfs file is deprecated - used by: %s\n", current->comm);
203 print_once = 1;
204 }
205 return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
206} 178}
207 179
208#define define_one_ro(_name) \ 180#define define_one_ro(_name) \
@@ -254,7 +226,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
254 return -EINVAL; 226 return -EINVAL;
255 227
256 mutex_lock(&dbs_mutex); 228 mutex_lock(&dbs_mutex);
257 dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate()); 229 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
258 mutex_unlock(&dbs_mutex); 230 mutex_unlock(&dbs_mutex);
259 231
260 return count; 232 return count;
@@ -601,11 +573,18 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
601 if (latency == 0) 573 if (latency == 0)
602 latency = 1; 574 latency = 1;
603 575
604 def_sampling_rate = 576 /*
605 max(latency * LATENCY_MULTIPLIER, 577 * conservative does not implement micro like ondemand
606 MIN_STAT_SAMPLING_RATE); 578 * governor, thus we are bound to jiffes/HZ
607 579 */
608 dbs_tuners_ins.sampling_rate = def_sampling_rate; 580 min_sampling_rate =
581 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
582 /* Bring kernel and HW constraints together */
583 min_sampling_rate = max(min_sampling_rate,
584 MIN_LATENCY_MULTIPLIER * latency);
585 dbs_tuners_ins.sampling_rate =
586 max(min_sampling_rate,
587 latency * LATENCY_MULTIPLIER);
609 588
610 cpufreq_register_notifier( 589 cpufreq_register_notifier(
611 &dbs_cpufreq_notifier_block, 590 &dbs_cpufreq_notifier_block,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index e741c339df76..1911d1729353 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -32,6 +32,7 @@
32#define DEF_FREQUENCY_UP_THRESHOLD (80) 32#define DEF_FREQUENCY_UP_THRESHOLD (80)
33#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) 33#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
34#define MICRO_FREQUENCY_UP_THRESHOLD (95) 34#define MICRO_FREQUENCY_UP_THRESHOLD (95)
35#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
35#define MIN_FREQUENCY_UP_THRESHOLD (11) 36#define MIN_FREQUENCY_UP_THRESHOLD (11)
36#define MAX_FREQUENCY_UP_THRESHOLD (100) 37#define MAX_FREQUENCY_UP_THRESHOLD (100)
37 38
@@ -45,27 +46,12 @@
45 * this governor will not work. 46 * this governor will not work.
46 * All times here are in uS. 47 * All times here are in uS.
47 */ 48 */
48static unsigned int def_sampling_rate;
49#define MIN_SAMPLING_RATE_RATIO (2) 49#define MIN_SAMPLING_RATE_RATIO (2)
50/* for correct statistics, we need at least 10 ticks between each measure */
51#define MIN_STAT_SAMPLING_RATE \
52 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
53#define MIN_SAMPLING_RATE \
54 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
55/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
56 * Define the minimal settable sampling rate to the greater of:
57 * - "HW transition latency" * 100 (same as default sampling / 10)
58 * - MIN_STAT_SAMPLING_RATE
59 * To avoid that userspace shoots itself.
60*/
61static unsigned int minimum_sampling_rate(void)
62{
63 return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
64}
65 50
66/* This will also vanish soon with removing sampling_rate_max */ 51static unsigned int min_sampling_rate;
67#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 52
68#define LATENCY_MULTIPLIER (1000) 53#define LATENCY_MULTIPLIER (1000)
54#define MIN_LATENCY_MULTIPLIER (100)
69#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 55#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
70 56
71static void do_dbs_timer(struct work_struct *work); 57static void do_dbs_timer(struct work_struct *work);
@@ -219,28 +205,14 @@ static void ondemand_powersave_bias_init(void)
219/************************** sysfs interface ************************/ 205/************************** sysfs interface ************************/
220static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 206static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
221{ 207{
222 static int print_once; 208 printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
223 209 "sysfs file is deprecated - used by: %s\n", current->comm);
224 if (!print_once) { 210 return sprintf(buf, "%u\n", -1U);
225 printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
226 "sysfs file is deprecated - used by: %s\n",
227 current->comm);
228 print_once = 1;
229 }
230 return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
231} 211}
232 212
233static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) 213static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
234{ 214{
235 static int print_once; 215 return sprintf(buf, "%u\n", min_sampling_rate);
236
237 if (!print_once) {
238 printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_min "
239 "sysfs file is deprecated - used by: %s\n",
240 current->comm);
241 print_once = 1;
242 }
243 return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
244} 216}
245 217
246#define define_one_ro(_name) \ 218#define define_one_ro(_name) \
@@ -274,7 +246,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
274 mutex_unlock(&dbs_mutex); 246 mutex_unlock(&dbs_mutex);
275 return -EINVAL; 247 return -EINVAL;
276 } 248 }
277 dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate()); 249 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
278 mutex_unlock(&dbs_mutex); 250 mutex_unlock(&dbs_mutex);
279 251
280 return count; 252 return count;
@@ -619,12 +591,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
619 latency = policy->cpuinfo.transition_latency / 1000; 591 latency = policy->cpuinfo.transition_latency / 1000;
620 if (latency == 0) 592 if (latency == 0)
621 latency = 1; 593 latency = 1;
622 594 /* Bring kernel and HW constraints together */
623 def_sampling_rate = 595 min_sampling_rate = max(min_sampling_rate,
624 max(latency * LATENCY_MULTIPLIER, 596 MIN_LATENCY_MULTIPLIER * latency);
625 MIN_STAT_SAMPLING_RATE); 597 dbs_tuners_ins.sampling_rate =
626 598 max(min_sampling_rate,
627 dbs_tuners_ins.sampling_rate = def_sampling_rate; 599 latency * LATENCY_MULTIPLIER);
628 } 600 }
629 dbs_timer_init(this_dbs_info); 601 dbs_timer_init(this_dbs_info);
630 602
@@ -678,6 +650,16 @@ static int __init cpufreq_gov_dbs_init(void)
678 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; 650 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
679 dbs_tuners_ins.down_differential = 651 dbs_tuners_ins.down_differential =
680 MICRO_FREQUENCY_DOWN_DIFFERENTIAL; 652 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
653 /*
654 * In no_hz/micro accounting case we set the minimum frequency
655 * not depending on HZ, but fixed (very low). The deferred
656 * timer might skip some samples if idle/sleeping as needed.
657 */
658 min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
659 } else {
660 /* For correct statistics, we need 10 ticks for each measure */
661 min_sampling_rate =
662 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
681 } 663 }
682 664
683 kondemand_wq = create_workqueue("kondemand"); 665 kondemand_wq = create_workqueue("kondemand");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 3b3c01b6f1ee..070357aaedbc 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -81,6 +81,14 @@ config MX3_IPU_IRQS
81 To avoid bloating the irq_desc[] array we allocate a sufficient 81 To avoid bloating the irq_desc[] array we allocate a sufficient
82 number of IRQ slots and map them dynamically to specific sources. 82 number of IRQ slots and map them dynamically to specific sources.
83 83
84config TXX9_DMAC
85 tristate "Toshiba TXx9 SoC DMA support"
86 depends on MACH_TX49XX || MACH_TX39XX
87 select DMA_ENGINE
88 help
89 Support the TXx9 SoC internal DMA controller. This can be
90 integrated in chips such as the Toshiba TX4927/38/39.
91
84config DMA_ENGINE 92config DMA_ENGINE
85 bool 93 bool
86 94
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 2e5dc96700d2..a0b6564800c4 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_FSL_DMA) += fsldma.o
8obj-$(CONFIG_MV_XOR) += mv_xor.o 8obj-$(CONFIG_MV_XOR) += mv_xor.o
9obj-$(CONFIG_DW_DMAC) += dw_dmac.o 9obj-$(CONFIG_DW_DMAC) += dw_dmac.o
10obj-$(CONFIG_MX3_IPU) += ipu/ 10obj-$(CONFIG_MX3_IPU) += ipu/
11obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
new file mode 100644
index 000000000000..9aa9ea9822c8
--- /dev/null
+++ b/drivers/dma/txx9dmac.c
@@ -0,0 +1,1354 @@
1/*
2 * Driver for the TXx9 SoC DMA Controller
3 *
4 * Copyright (C) 2009 Atsushi Nemoto
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/dma-mapping.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/slab.h>
17#include <linux/scatterlist.h>
18#include "txx9dmac.h"
19
20static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan)
21{
22 return container_of(chan, struct txx9dmac_chan, chan);
23}
24
25static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc)
26{
27 return dc->ch_regs;
28}
29
30static struct txx9dmac_cregs32 __iomem *__dma_regs32(
31 const struct txx9dmac_chan *dc)
32{
33 return dc->ch_regs;
34}
35
36#define channel64_readq(dc, name) \
37 __raw_readq(&(__dma_regs(dc)->name))
38#define channel64_writeq(dc, name, val) \
39 __raw_writeq((val), &(__dma_regs(dc)->name))
40#define channel64_readl(dc, name) \
41 __raw_readl(&(__dma_regs(dc)->name))
42#define channel64_writel(dc, name, val) \
43 __raw_writel((val), &(__dma_regs(dc)->name))
44
45#define channel32_readl(dc, name) \
46 __raw_readl(&(__dma_regs32(dc)->name))
47#define channel32_writel(dc, name, val) \
48 __raw_writel((val), &(__dma_regs32(dc)->name))
49
50#define channel_readq(dc, name) channel64_readq(dc, name)
51#define channel_writeq(dc, name, val) channel64_writeq(dc, name, val)
52#define channel_readl(dc, name) \
53 (is_dmac64(dc) ? \
54 channel64_readl(dc, name) : channel32_readl(dc, name))
55#define channel_writel(dc, name, val) \
56 (is_dmac64(dc) ? \
57 channel64_writel(dc, name, val) : channel32_writel(dc, name, val))
58
59static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc)
60{
61 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
62 return channel64_readq(dc, CHAR);
63 else
64 return channel64_readl(dc, CHAR);
65}
66
67static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
68{
69 if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
70 channel64_writeq(dc, CHAR, val);
71 else
72 channel64_writel(dc, CHAR, val);
73}
74
75static void channel64_clear_CHAR(const struct txx9dmac_chan *dc)
76{
77#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
78 channel64_writel(dc, CHAR, 0);
79 channel64_writel(dc, __pad_CHAR, 0);
80#else
81 channel64_writeq(dc, CHAR, 0);
82#endif
83}
84
85static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc)
86{
87 if (is_dmac64(dc))
88 return channel64_read_CHAR(dc);
89 else
90 return channel32_readl(dc, CHAR);
91}
92
93static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
94{
95 if (is_dmac64(dc))
96 channel64_write_CHAR(dc, val);
97 else
98 channel32_writel(dc, CHAR, val);
99}
100
101static struct txx9dmac_regs __iomem *__txx9dmac_regs(
102 const struct txx9dmac_dev *ddev)
103{
104 return ddev->regs;
105}
106
107static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32(
108 const struct txx9dmac_dev *ddev)
109{
110 return ddev->regs;
111}
112
113#define dma64_readl(ddev, name) \
114 __raw_readl(&(__txx9dmac_regs(ddev)->name))
115#define dma64_writel(ddev, name, val) \
116 __raw_writel((val), &(__txx9dmac_regs(ddev)->name))
117
118#define dma32_readl(ddev, name) \
119 __raw_readl(&(__txx9dmac_regs32(ddev)->name))
120#define dma32_writel(ddev, name, val) \
121 __raw_writel((val), &(__txx9dmac_regs32(ddev)->name))
122
123#define dma_readl(ddev, name) \
124 (__is_dmac64(ddev) ? \
125 dma64_readl(ddev, name) : dma32_readl(ddev, name))
126#define dma_writel(ddev, name, val) \
127 (__is_dmac64(ddev) ? \
128 dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val))
129
130static struct device *chan2dev(struct dma_chan *chan)
131{
132 return &chan->dev->device;
133}
134static struct device *chan2parent(struct dma_chan *chan)
135{
136 return chan->dev->device.parent;
137}
138
139static struct txx9dmac_desc *
140txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd)
141{
142 return container_of(txd, struct txx9dmac_desc, txd);
143}
144
145static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc,
146 const struct txx9dmac_desc *desc)
147{
148 return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR;
149}
150
151static void desc_write_CHAR(const struct txx9dmac_chan *dc,
152 struct txx9dmac_desc *desc, dma_addr_t val)
153{
154 if (is_dmac64(dc))
155 desc->hwdesc.CHAR = val;
156 else
157 desc->hwdesc32.CHAR = val;
158}
159
160#define TXX9_DMA_MAX_COUNT 0x04000000
161
162#define TXX9_DMA_INITIAL_DESC_COUNT 64
163
164static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc)
165{
166 return list_entry(dc->active_list.next,
167 struct txx9dmac_desc, desc_node);
168}
169
170static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc)
171{
172 return list_entry(dc->active_list.prev,
173 struct txx9dmac_desc, desc_node);
174}
175
176static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc)
177{
178 return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node);
179}
180
181static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc)
182{
183 if (!list_empty(&desc->txd.tx_list))
184 desc = list_entry(desc->txd.tx_list.prev,
185 struct txx9dmac_desc, desc_node);
186 return desc;
187}
188
189static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx);
190
191static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc,
192 gfp_t flags)
193{
194 struct txx9dmac_dev *ddev = dc->ddev;
195 struct txx9dmac_desc *desc;
196
197 desc = kzalloc(sizeof(*desc), flags);
198 if (!desc)
199 return NULL;
200 dma_async_tx_descriptor_init(&desc->txd, &dc->chan);
201 desc->txd.tx_submit = txx9dmac_tx_submit;
202 /* txd.flags will be overwritten in prep funcs */
203 desc->txd.flags = DMA_CTRL_ACK;
204 desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc,
205 ddev->descsize, DMA_TO_DEVICE);
206 return desc;
207}
208
209static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc)
210{
211 struct txx9dmac_desc *desc, *_desc;
212 struct txx9dmac_desc *ret = NULL;
213 unsigned int i = 0;
214
215 spin_lock_bh(&dc->lock);
216 list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) {
217 if (async_tx_test_ack(&desc->txd)) {
218 list_del(&desc->desc_node);
219 ret = desc;
220 break;
221 }
222 dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc);
223 i++;
224 }
225 spin_unlock_bh(&dc->lock);
226
227 dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n",
228 i);
229 if (!ret) {
230 ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC);
231 if (ret) {
232 spin_lock_bh(&dc->lock);
233 dc->descs_allocated++;
234 spin_unlock_bh(&dc->lock);
235 } else
236 dev_err(chan2dev(&dc->chan),
237 "not enough descriptors available\n");
238 }
239 return ret;
240}
241
242static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc,
243 struct txx9dmac_desc *desc)
244{
245 struct txx9dmac_dev *ddev = dc->ddev;
246 struct txx9dmac_desc *child;
247
248 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
249 dma_sync_single_for_cpu(chan2parent(&dc->chan),
250 child->txd.phys, ddev->descsize,
251 DMA_TO_DEVICE);
252 dma_sync_single_for_cpu(chan2parent(&dc->chan),
253 desc->txd.phys, ddev->descsize,
254 DMA_TO_DEVICE);
255}
256
257/*
258 * Move a descriptor, including any children, to the free list.
259 * `desc' must not be on any lists.
260 */
261static void txx9dmac_desc_put(struct txx9dmac_chan *dc,
262 struct txx9dmac_desc *desc)
263{
264 if (desc) {
265 struct txx9dmac_desc *child;
266
267 txx9dmac_sync_desc_for_cpu(dc, desc);
268
269 spin_lock_bh(&dc->lock);
270 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
271 dev_vdbg(chan2dev(&dc->chan),
272 "moving child desc %p to freelist\n",
273 child);
274 list_splice_init(&desc->txd.tx_list, &dc->free_list);
275 dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n",
276 desc);
277 list_add(&desc->desc_node, &dc->free_list);
278 spin_unlock_bh(&dc->lock);
279 }
280}
281
282/* Called with dc->lock held and bh disabled */
283static dma_cookie_t
284txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc)
285{
286 dma_cookie_t cookie = dc->chan.cookie;
287
288 if (++cookie < 0)
289 cookie = 1;
290
291 dc->chan.cookie = cookie;
292 desc->txd.cookie = cookie;
293
294 return cookie;
295}
296
297/*----------------------------------------------------------------------*/
298
299static void txx9dmac_dump_regs(struct txx9dmac_chan *dc)
300{
301 if (is_dmac64(dc))
302 dev_err(chan2dev(&dc->chan),
303 " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x"
304 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
305 (u64)channel64_read_CHAR(dc),
306 channel64_readq(dc, SAR),
307 channel64_readq(dc, DAR),
308 channel64_readl(dc, CNTR),
309 channel64_readl(dc, SAIR),
310 channel64_readl(dc, DAIR),
311 channel64_readl(dc, CCR),
312 channel64_readl(dc, CSR));
313 else
314 dev_err(chan2dev(&dc->chan),
315 " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x"
316 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
317 channel32_readl(dc, CHAR),
318 channel32_readl(dc, SAR),
319 channel32_readl(dc, DAR),
320 channel32_readl(dc, CNTR),
321 channel32_readl(dc, SAIR),
322 channel32_readl(dc, DAIR),
323 channel32_readl(dc, CCR),
324 channel32_readl(dc, CSR));
325}
326
327static void txx9dmac_reset_chan(struct txx9dmac_chan *dc)
328{
329 channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST);
330 if (is_dmac64(dc)) {
331 channel64_clear_CHAR(dc);
332 channel_writeq(dc, SAR, 0);
333 channel_writeq(dc, DAR, 0);
334 } else {
335 channel_writel(dc, CHAR, 0);
336 channel_writel(dc, SAR, 0);
337 channel_writel(dc, DAR, 0);
338 }
339 channel_writel(dc, CNTR, 0);
340 channel_writel(dc, SAIR, 0);
341 channel_writel(dc, DAIR, 0);
342 channel_writel(dc, CCR, 0);
343 mmiowb();
344}
345
346/* Called with dc->lock held and bh disabled */
347static void txx9dmac_dostart(struct txx9dmac_chan *dc,
348 struct txx9dmac_desc *first)
349{
350 struct txx9dmac_slave *ds = dc->chan.private;
351 u32 sai, dai;
352
353 dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n",
354 first->txd.cookie, first);
355 /* ASSERT: channel is idle */
356 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
357 dev_err(chan2dev(&dc->chan),
358 "BUG: Attempted to start non-idle channel\n");
359 txx9dmac_dump_regs(dc);
360 /* The tasklet will hopefully advance the queue... */
361 return;
362 }
363
364 if (is_dmac64(dc)) {
365 channel64_writel(dc, CNTR, 0);
366 channel64_writel(dc, CSR, 0xffffffff);
367 if (ds) {
368 if (ds->tx_reg) {
369 sai = ds->reg_width;
370 dai = 0;
371 } else {
372 sai = 0;
373 dai = ds->reg_width;
374 }
375 } else {
376 sai = 8;
377 dai = 8;
378 }
379 channel64_writel(dc, SAIR, sai);
380 channel64_writel(dc, DAIR, dai);
381 /* All 64-bit DMAC supports SMPCHN */
382 channel64_writel(dc, CCR, dc->ccr);
383 /* Writing a non zero value to CHAR will assert XFACT */
384 channel64_write_CHAR(dc, first->txd.phys);
385 } else {
386 channel32_writel(dc, CNTR, 0);
387 channel32_writel(dc, CSR, 0xffffffff);
388 if (ds) {
389 if (ds->tx_reg) {
390 sai = ds->reg_width;
391 dai = 0;
392 } else {
393 sai = 0;
394 dai = ds->reg_width;
395 }
396 } else {
397 sai = 4;
398 dai = 4;
399 }
400 channel32_writel(dc, SAIR, sai);
401 channel32_writel(dc, DAIR, dai);
402 if (txx9_dma_have_SMPCHN()) {
403 channel32_writel(dc, CCR, dc->ccr);
404 /* Writing a non zero value to CHAR will assert XFACT */
405 channel32_writel(dc, CHAR, first->txd.phys);
406 } else {
407 channel32_writel(dc, CHAR, first->txd.phys);
408 channel32_writel(dc, CCR, dc->ccr);
409 }
410 }
411}
412
413/*----------------------------------------------------------------------*/
414
415static void
416txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
417 struct txx9dmac_desc *desc)
418{
419 dma_async_tx_callback callback;
420 void *param;
421 struct dma_async_tx_descriptor *txd = &desc->txd;
422 struct txx9dmac_slave *ds = dc->chan.private;
423
424 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
425 txd->cookie, desc);
426
427 dc->completed = txd->cookie;
428 callback = txd->callback;
429 param = txd->callback_param;
430
431 txx9dmac_sync_desc_for_cpu(dc, desc);
432 list_splice_init(&txd->tx_list, &dc->free_list);
433 list_move(&desc->desc_node, &dc->free_list);
434
435 /*
436 * We use dma_unmap_page() regardless of how the buffers were
437 * mapped before they were submitted...
438 */
439 if (!ds) {
440 dma_addr_t dmaaddr;
441 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
442 dmaaddr = is_dmac64(dc) ?
443 desc->hwdesc.DAR : desc->hwdesc32.DAR;
444 dma_unmap_page(chan2parent(&dc->chan), dmaaddr,
445 desc->len, DMA_FROM_DEVICE);
446 }
447 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
448 dmaaddr = is_dmac64(dc) ?
449 desc->hwdesc.SAR : desc->hwdesc32.SAR;
450 dma_unmap_page(chan2parent(&dc->chan), dmaaddr,
451 desc->len, DMA_TO_DEVICE);
452 }
453 }
454
455 /*
456 * The API requires that no submissions are done from a
457 * callback, so we don't need to drop the lock here
458 */
459 if (callback)
460 callback(param);
461 dma_run_dependencies(txd);
462}
463
464static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list)
465{
466 struct txx9dmac_dev *ddev = dc->ddev;
467 struct txx9dmac_desc *desc;
468 struct txx9dmac_desc *prev = NULL;
469
470 BUG_ON(!list_empty(list));
471 do {
472 desc = txx9dmac_first_queued(dc);
473 if (prev) {
474 desc_write_CHAR(dc, prev, desc->txd.phys);
475 dma_sync_single_for_device(chan2parent(&dc->chan),
476 prev->txd.phys, ddev->descsize,
477 DMA_TO_DEVICE);
478 }
479 prev = txx9dmac_last_child(desc);
480 list_move_tail(&desc->desc_node, list);
481 /* Make chain-completion interrupt happen */
482 if ((desc->txd.flags & DMA_PREP_INTERRUPT) &&
483 !txx9dmac_chan_INTENT(dc))
484 break;
485 } while (!list_empty(&dc->queue));
486}
487
488static void txx9dmac_complete_all(struct txx9dmac_chan *dc)
489{
490 struct txx9dmac_desc *desc, *_desc;
491 LIST_HEAD(list);
492
493 /*
494 * Submit queued descriptors ASAP, i.e. before we go through
495 * the completed ones.
496 */
497 list_splice_init(&dc->active_list, &list);
498 if (!list_empty(&dc->queue)) {
499 txx9dmac_dequeue(dc, &dc->active_list);
500 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
501 }
502
503 list_for_each_entry_safe(desc, _desc, &list, desc_node)
504 txx9dmac_descriptor_complete(dc, desc);
505}
506
507static void txx9dmac_dump_desc(struct txx9dmac_chan *dc,
508 struct txx9dmac_hwdesc *desc)
509{
510 if (is_dmac64(dc)) {
511#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
512 dev_crit(chan2dev(&dc->chan),
513 " desc: ch%#llx s%#llx d%#llx c%#x\n",
514 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR);
515#else
516 dev_crit(chan2dev(&dc->chan),
517 " desc: ch%#llx s%#llx d%#llx c%#x"
518 " si%#x di%#x cc%#x cs%#x\n",
519 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR,
520 desc->SAIR, desc->DAIR, desc->CCR, desc->CSR);
521#endif
522 } else {
523 struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc;
524#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
525 dev_crit(chan2dev(&dc->chan),
526 " desc: ch%#x s%#x d%#x c%#x\n",
527 d->CHAR, d->SAR, d->DAR, d->CNTR);
528#else
529 dev_crit(chan2dev(&dc->chan),
530 " desc: ch%#x s%#x d%#x c%#x"
531 " si%#x di%#x cc%#x cs%#x\n",
532 d->CHAR, d->SAR, d->DAR, d->CNTR,
533 d->SAIR, d->DAIR, d->CCR, d->CSR);
534#endif
535 }
536}
537
538static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr)
539{
540 struct txx9dmac_desc *bad_desc;
541 struct txx9dmac_desc *child;
542 u32 errors;
543
544 /*
545 * The descriptor currently at the head of the active list is
546 * borked. Since we don't have any way to report errors, we'll
547 * just have to scream loudly and try to carry on.
548 */
549 dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n");
550 txx9dmac_dump_regs(dc);
551
552 bad_desc = txx9dmac_first_active(dc);
553 list_del_init(&bad_desc->desc_node);
554
555 /* Clear all error flags and try to restart the controller */
556 errors = csr & (TXX9_DMA_CSR_ABCHC |
557 TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR |
558 TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR);
559 channel_writel(dc, CSR, errors);
560
561 if (list_empty(&dc->active_list) && !list_empty(&dc->queue))
562 txx9dmac_dequeue(dc, &dc->active_list);
563 if (!list_empty(&dc->active_list))
564 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
565
566 dev_crit(chan2dev(&dc->chan),
567 "Bad descriptor submitted for DMA! (cookie: %d)\n",
568 bad_desc->txd.cookie);
569 txx9dmac_dump_desc(dc, &bad_desc->hwdesc);
570 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
571 txx9dmac_dump_desc(dc, &child->hwdesc);
572 /* Pretend the descriptor completed successfully */
573 txx9dmac_descriptor_complete(dc, bad_desc);
574}
575
576static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc)
577{
578 dma_addr_t chain;
579 struct txx9dmac_desc *desc, *_desc;
580 struct txx9dmac_desc *child;
581 u32 csr;
582
583 if (is_dmac64(dc)) {
584 chain = channel64_read_CHAR(dc);
585 csr = channel64_readl(dc, CSR);
586 channel64_writel(dc, CSR, csr);
587 } else {
588 chain = channel32_readl(dc, CHAR);
589 csr = channel32_readl(dc, CSR);
590 channel32_writel(dc, CSR, csr);
591 }
592 /* For dynamic chain, we should look at XFACT instead of NCHNC */
593 if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) {
594 /* Everything we've submitted is done */
595 txx9dmac_complete_all(dc);
596 return;
597 }
598 if (!(csr & TXX9_DMA_CSR_CHNEN))
599 chain = 0; /* last descriptor of this chain */
600
601 dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n",
602 (u64)chain);
603
604 list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) {
605 if (desc_read_CHAR(dc, desc) == chain) {
606 /* This one is currently in progress */
607 if (csr & TXX9_DMA_CSR_ABCHC)
608 goto scan_done;
609 return;
610 }
611
612 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
613 if (desc_read_CHAR(dc, child) == chain) {
614 /* Currently in progress */
615 if (csr & TXX9_DMA_CSR_ABCHC)
616 goto scan_done;
617 return;
618 }
619
620 /*
621 * No descriptors so far seem to be in progress, i.e.
622 * this one must be done.
623 */
624 txx9dmac_descriptor_complete(dc, desc);
625 }
626scan_done:
627 if (csr & TXX9_DMA_CSR_ABCHC) {
628 txx9dmac_handle_error(dc, csr);
629 return;
630 }
631
632 dev_err(chan2dev(&dc->chan),
633 "BUG: All descriptors done, but channel not idle!\n");
634
635 /* Try to continue after resetting the channel... */
636 txx9dmac_reset_chan(dc);
637
638 if (!list_empty(&dc->queue)) {
639 txx9dmac_dequeue(dc, &dc->active_list);
640 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
641 }
642}
643
644static void txx9dmac_chan_tasklet(unsigned long data)
645{
646 int irq;
647 u32 csr;
648 struct txx9dmac_chan *dc;
649
650 dc = (struct txx9dmac_chan *)data;
651 csr = channel_readl(dc, CSR);
652 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr);
653
654 spin_lock(&dc->lock);
655 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
656 TXX9_DMA_CSR_NTRNFC))
657 txx9dmac_scan_descriptors(dc);
658 spin_unlock(&dc->lock);
659 irq = dc->irq;
660
661 enable_irq(irq);
662}
663
664static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id)
665{
666 struct txx9dmac_chan *dc = dev_id;
667
668 dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n",
669 channel_readl(dc, CSR));
670
671 tasklet_schedule(&dc->tasklet);
672 /*
673 * Just disable the interrupts. We'll turn them back on in the
674 * softirq handler.
675 */
676 disable_irq_nosync(irq);
677
678 return IRQ_HANDLED;
679}
680
681static void txx9dmac_tasklet(unsigned long data)
682{
683 int irq;
684 u32 csr;
685 struct txx9dmac_chan *dc;
686
687 struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data;
688 u32 mcr;
689 int i;
690
691 mcr = dma_readl(ddev, MCR);
692 dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr);
693 for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) {
694 if ((mcr >> (24 + i)) & 0x11) {
695 dc = ddev->chan[i];
696 csr = channel_readl(dc, CSR);
697 dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n",
698 csr);
699 spin_lock(&dc->lock);
700 if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
701 TXX9_DMA_CSR_NTRNFC))
702 txx9dmac_scan_descriptors(dc);
703 spin_unlock(&dc->lock);
704 }
705 }
706 irq = ddev->irq;
707
708 enable_irq(irq);
709}
710
711static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id)
712{
713 struct txx9dmac_dev *ddev = dev_id;
714
715 dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n",
716 dma_readl(ddev, MCR));
717
718 tasklet_schedule(&ddev->tasklet);
719 /*
720 * Just disable the interrupts. We'll turn them back on in the
721 * softirq handler.
722 */
723 disable_irq_nosync(irq);
724
725 return IRQ_HANDLED;
726}
727
728/*----------------------------------------------------------------------*/
729
730static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx)
731{
732 struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx);
733 struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan);
734 dma_cookie_t cookie;
735
736 spin_lock_bh(&dc->lock);
737 cookie = txx9dmac_assign_cookie(dc, desc);
738
739 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n",
740 desc->txd.cookie, desc);
741
742 list_add_tail(&desc->desc_node, &dc->queue);
743 spin_unlock_bh(&dc->lock);
744
745 return cookie;
746}
747
748static struct dma_async_tx_descriptor *
749txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
750 size_t len, unsigned long flags)
751{
752 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
753 struct txx9dmac_dev *ddev = dc->ddev;
754 struct txx9dmac_desc *desc;
755 struct txx9dmac_desc *first;
756 struct txx9dmac_desc *prev;
757 size_t xfer_count;
758 size_t offset;
759
760 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n",
761 (u64)dest, (u64)src, len, flags);
762
763 if (unlikely(!len)) {
764 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
765 return NULL;
766 }
767
768 prev = first = NULL;
769
770 for (offset = 0; offset < len; offset += xfer_count) {
771 xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT);
772 /*
773 * Workaround for ERT-TX49H2-033, ERT-TX49H3-020,
774 * ERT-TX49H4-016 (slightly conservative)
775 */
776 if (__is_dmac64(ddev)) {
777 if (xfer_count > 0x100 &&
778 (xfer_count & 0xff) >= 0xfa &&
779 (xfer_count & 0xff) <= 0xff)
780 xfer_count -= 0x20;
781 } else {
782 if (xfer_count > 0x80 &&
783 (xfer_count & 0x7f) >= 0x7e &&
784 (xfer_count & 0x7f) <= 0x7f)
785 xfer_count -= 0x20;
786 }
787
788 desc = txx9dmac_desc_get(dc);
789 if (!desc) {
790 txx9dmac_desc_put(dc, first);
791 return NULL;
792 }
793
794 if (__is_dmac64(ddev)) {
795 desc->hwdesc.SAR = src + offset;
796 desc->hwdesc.DAR = dest + offset;
797 desc->hwdesc.CNTR = xfer_count;
798 txx9dmac_desc_set_nosimple(ddev, desc, 8, 8,
799 dc->ccr | TXX9_DMA_CCR_XFACT);
800 } else {
801 desc->hwdesc32.SAR = src + offset;
802 desc->hwdesc32.DAR = dest + offset;
803 desc->hwdesc32.CNTR = xfer_count;
804 txx9dmac_desc_set_nosimple(ddev, desc, 4, 4,
805 dc->ccr | TXX9_DMA_CCR_XFACT);
806 }
807
808 /*
809 * The descriptors on tx_list are not reachable from
810 * the dc->queue list or dc->active_list after a
811 * submit. If we put all descriptors on active_list,
812 * calling of callback on the completion will be more
813 * complex.
814 */
815 if (!first) {
816 first = desc;
817 } else {
818 desc_write_CHAR(dc, prev, desc->txd.phys);
819 dma_sync_single_for_device(chan2parent(&dc->chan),
820 prev->txd.phys, ddev->descsize,
821 DMA_TO_DEVICE);
822 list_add_tail(&desc->desc_node,
823 &first->txd.tx_list);
824 }
825 prev = desc;
826 }
827
828 /* Trigger interrupt after last block */
829 if (flags & DMA_PREP_INTERRUPT)
830 txx9dmac_desc_set_INTENT(ddev, prev);
831
832 desc_write_CHAR(dc, prev, 0);
833 dma_sync_single_for_device(chan2parent(&dc->chan),
834 prev->txd.phys, ddev->descsize,
835 DMA_TO_DEVICE);
836
837 first->txd.flags = flags;
838 first->len = len;
839
840 return &first->txd;
841}
842
843static struct dma_async_tx_descriptor *
844txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
845 unsigned int sg_len, enum dma_data_direction direction,
846 unsigned long flags)
847{
848 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
849 struct txx9dmac_dev *ddev = dc->ddev;
850 struct txx9dmac_slave *ds = chan->private;
851 struct txx9dmac_desc *prev;
852 struct txx9dmac_desc *first;
853 unsigned int i;
854 struct scatterlist *sg;
855
856 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
857
858 BUG_ON(!ds || !ds->reg_width);
859 if (ds->tx_reg)
860 BUG_ON(direction != DMA_TO_DEVICE);
861 else
862 BUG_ON(direction != DMA_FROM_DEVICE);
863 if (unlikely(!sg_len))
864 return NULL;
865
866 prev = first = NULL;
867
868 for_each_sg(sgl, sg, sg_len, i) {
869 struct txx9dmac_desc *desc;
870 dma_addr_t mem;
871 u32 sai, dai;
872
873 desc = txx9dmac_desc_get(dc);
874 if (!desc) {
875 txx9dmac_desc_put(dc, first);
876 return NULL;
877 }
878
879 mem = sg_dma_address(sg);
880
881 if (__is_dmac64(ddev)) {
882 if (direction == DMA_TO_DEVICE) {
883 desc->hwdesc.SAR = mem;
884 desc->hwdesc.DAR = ds->tx_reg;
885 } else {
886 desc->hwdesc.SAR = ds->rx_reg;
887 desc->hwdesc.DAR = mem;
888 }
889 desc->hwdesc.CNTR = sg_dma_len(sg);
890 } else {
891 if (direction == DMA_TO_DEVICE) {
892 desc->hwdesc32.SAR = mem;
893 desc->hwdesc32.DAR = ds->tx_reg;
894 } else {
895 desc->hwdesc32.SAR = ds->rx_reg;
896 desc->hwdesc32.DAR = mem;
897 }
898 desc->hwdesc32.CNTR = sg_dma_len(sg);
899 }
900 if (direction == DMA_TO_DEVICE) {
901 sai = ds->reg_width;
902 dai = 0;
903 } else {
904 sai = 0;
905 dai = ds->reg_width;
906 }
907 txx9dmac_desc_set_nosimple(ddev, desc, sai, dai,
908 dc->ccr | TXX9_DMA_CCR_XFACT);
909
910 if (!first) {
911 first = desc;
912 } else {
913 desc_write_CHAR(dc, prev, desc->txd.phys);
914 dma_sync_single_for_device(chan2parent(&dc->chan),
915 prev->txd.phys,
916 ddev->descsize,
917 DMA_TO_DEVICE);
918 list_add_tail(&desc->desc_node,
919 &first->txd.tx_list);
920 }
921 prev = desc;
922 }
923
924 /* Trigger interrupt after last block */
925 if (flags & DMA_PREP_INTERRUPT)
926 txx9dmac_desc_set_INTENT(ddev, prev);
927
928 desc_write_CHAR(dc, prev, 0);
929 dma_sync_single_for_device(chan2parent(&dc->chan),
930 prev->txd.phys, ddev->descsize,
931 DMA_TO_DEVICE);
932
933 first->txd.flags = flags;
934 first->len = 0;
935
936 return &first->txd;
937}
938
939static void txx9dmac_terminate_all(struct dma_chan *chan)
940{
941 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
942 struct txx9dmac_desc *desc, *_desc;
943 LIST_HEAD(list);
944
945 dev_vdbg(chan2dev(chan), "terminate_all\n");
946 spin_lock_bh(&dc->lock);
947
948 txx9dmac_reset_chan(dc);
949
950 /* active_list entries will end up before queued entries */
951 list_splice_init(&dc->queue, &list);
952 list_splice_init(&dc->active_list, &list);
953
954 spin_unlock_bh(&dc->lock);
955
956 /* Flush all pending and queued descriptors */
957 list_for_each_entry_safe(desc, _desc, &list, desc_node)
958 txx9dmac_descriptor_complete(dc, desc);
959}
960
961static enum dma_status
962txx9dmac_is_tx_complete(struct dma_chan *chan,
963 dma_cookie_t cookie,
964 dma_cookie_t *done, dma_cookie_t *used)
965{
966 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
967 dma_cookie_t last_used;
968 dma_cookie_t last_complete;
969 int ret;
970
971 last_complete = dc->completed;
972 last_used = chan->cookie;
973
974 ret = dma_async_is_complete(cookie, last_complete, last_used);
975 if (ret != DMA_SUCCESS) {
976 spin_lock_bh(&dc->lock);
977 txx9dmac_scan_descriptors(dc);
978 spin_unlock_bh(&dc->lock);
979
980 last_complete = dc->completed;
981 last_used = chan->cookie;
982
983 ret = dma_async_is_complete(cookie, last_complete, last_used);
984 }
985
986 if (done)
987 *done = last_complete;
988 if (used)
989 *used = last_used;
990
991 return ret;
992}
993
994static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
995 struct txx9dmac_desc *prev)
996{
997 struct txx9dmac_dev *ddev = dc->ddev;
998 struct txx9dmac_desc *desc;
999 LIST_HEAD(list);
1000
1001 prev = txx9dmac_last_child(prev);
1002 txx9dmac_dequeue(dc, &list);
1003 desc = list_entry(list.next, struct txx9dmac_desc, desc_node);
1004 desc_write_CHAR(dc, prev, desc->txd.phys);
1005 dma_sync_single_for_device(chan2parent(&dc->chan),
1006 prev->txd.phys, ddev->descsize,
1007 DMA_TO_DEVICE);
1008 mmiowb();
1009 if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
1010 channel_read_CHAR(dc) == prev->txd.phys)
1011 /* Restart chain DMA */
1012 channel_write_CHAR(dc, desc->txd.phys);
1013 list_splice_tail(&list, &dc->active_list);
1014}
1015
1016static void txx9dmac_issue_pending(struct dma_chan *chan)
1017{
1018 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1019
1020 spin_lock_bh(&dc->lock);
1021
1022 if (!list_empty(&dc->active_list))
1023 txx9dmac_scan_descriptors(dc);
1024 if (!list_empty(&dc->queue)) {
1025 if (list_empty(&dc->active_list)) {
1026 txx9dmac_dequeue(dc, &dc->active_list);
1027 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
1028 } else if (txx9_dma_have_SMPCHN()) {
1029 struct txx9dmac_desc *prev = txx9dmac_last_active(dc);
1030
1031 if (!(prev->txd.flags & DMA_PREP_INTERRUPT) ||
1032 txx9dmac_chan_INTENT(dc))
1033 txx9dmac_chain_dynamic(dc, prev);
1034 }
1035 }
1036
1037 spin_unlock_bh(&dc->lock);
1038}
1039
1040static int txx9dmac_alloc_chan_resources(struct dma_chan *chan)
1041{
1042 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1043 struct txx9dmac_slave *ds = chan->private;
1044 struct txx9dmac_desc *desc;
1045 int i;
1046
1047 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1048
1049 /* ASSERT: channel is idle */
1050 if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
1051 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1052 return -EIO;
1053 }
1054
1055 dc->completed = chan->cookie = 1;
1056
1057 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
1058 txx9dmac_chan_set_SMPCHN(dc);
1059 if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN))
1060 dc->ccr |= TXX9_DMA_CCR_INTENC;
1061 if (chan->device->device_prep_dma_memcpy) {
1062 if (ds)
1063 return -EINVAL;
1064 dc->ccr |= TXX9_DMA_CCR_XFSZ_X8;
1065 } else {
1066 if (!ds ||
1067 (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg))
1068 return -EINVAL;
1069 dc->ccr |= TXX9_DMA_CCR_EXTRQ |
1070 TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width));
1071 txx9dmac_chan_set_INTENT(dc);
1072 }
1073
1074 spin_lock_bh(&dc->lock);
1075 i = dc->descs_allocated;
1076 while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) {
1077 spin_unlock_bh(&dc->lock);
1078
1079 desc = txx9dmac_desc_alloc(dc, GFP_KERNEL);
1080 if (!desc) {
1081 dev_info(chan2dev(chan),
1082 "only allocated %d descriptors\n", i);
1083 spin_lock_bh(&dc->lock);
1084 break;
1085 }
1086 txx9dmac_desc_put(dc, desc);
1087
1088 spin_lock_bh(&dc->lock);
1089 i = ++dc->descs_allocated;
1090 }
1091 spin_unlock_bh(&dc->lock);
1092
1093 dev_dbg(chan2dev(chan),
1094 "alloc_chan_resources allocated %d descriptors\n", i);
1095
1096 return i;
1097}
1098
1099static void txx9dmac_free_chan_resources(struct dma_chan *chan)
1100{
1101 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
1102 struct txx9dmac_dev *ddev = dc->ddev;
1103 struct txx9dmac_desc *desc, *_desc;
1104 LIST_HEAD(list);
1105
1106 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
1107 dc->descs_allocated);
1108
1109 /* ASSERT: channel is idle */
1110 BUG_ON(!list_empty(&dc->active_list));
1111 BUG_ON(!list_empty(&dc->queue));
1112 BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT);
1113
1114 spin_lock_bh(&dc->lock);
1115 list_splice_init(&dc->free_list, &list);
1116 dc->descs_allocated = 0;
1117 spin_unlock_bh(&dc->lock);
1118
1119 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1120 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1121 dma_unmap_single(chan2parent(chan), desc->txd.phys,
1122 ddev->descsize, DMA_TO_DEVICE);
1123 kfree(desc);
1124 }
1125
1126 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
1127}
1128
1129/*----------------------------------------------------------------------*/
1130
1131static void txx9dmac_off(struct txx9dmac_dev *ddev)
1132{
1133 dma_writel(ddev, MCR, 0);
1134 mmiowb();
1135}
1136
1137static int __init txx9dmac_chan_probe(struct platform_device *pdev)
1138{
1139 struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data;
1140 struct platform_device *dmac_dev = cpdata->dmac_dev;
1141 struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data;
1142 struct txx9dmac_chan *dc;
1143 int err;
1144 int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
1145 int irq;
1146
1147 dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
1148 if (!dc)
1149 return -ENOMEM;
1150
1151 dc->dma.dev = &pdev->dev;
1152 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
1153 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
1154 dc->dma.device_terminate_all = txx9dmac_terminate_all;
1155 dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete;
1156 dc->dma.device_issue_pending = txx9dmac_issue_pending;
1157 if (pdata && pdata->memcpy_chan == ch) {
1158 dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
1159 dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask);
1160 } else {
1161 dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg;
1162 dma_cap_set(DMA_SLAVE, dc->dma.cap_mask);
1163 dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask);
1164 }
1165
1166 INIT_LIST_HEAD(&dc->dma.channels);
1167 dc->ddev = platform_get_drvdata(dmac_dev);
1168 if (dc->ddev->irq < 0) {
1169 irq = platform_get_irq(pdev, 0);
1170 if (irq < 0)
1171 return irq;
1172 tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet,
1173 (unsigned long)dc);
1174 dc->irq = irq;
1175 err = devm_request_irq(&pdev->dev, dc->irq,
1176 txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc);
1177 if (err)
1178 return err;
1179 } else
1180 dc->irq = -1;
1181 dc->ddev->chan[ch] = dc;
1182 dc->chan.device = &dc->dma;
1183 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
1184 dc->chan.cookie = dc->completed = 1;
1185
1186 if (is_dmac64(dc))
1187 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
1188 else
1189 dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch];
1190 spin_lock_init(&dc->lock);
1191
1192 INIT_LIST_HEAD(&dc->active_list);
1193 INIT_LIST_HEAD(&dc->queue);
1194 INIT_LIST_HEAD(&dc->free_list);
1195
1196 txx9dmac_reset_chan(dc);
1197
1198 platform_set_drvdata(pdev, dc);
1199
1200 err = dma_async_device_register(&dc->dma);
1201 if (err)
1202 return err;
1203 dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n",
1204 dc->dma.dev_id,
1205 dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "",
1206 dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : "");
1207
1208 return 0;
1209}
1210
1211static int __exit txx9dmac_chan_remove(struct platform_device *pdev)
1212{
1213 struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
1214
1215 dma_async_device_unregister(&dc->dma);
1216 if (dc->irq >= 0)
1217 tasklet_kill(&dc->tasklet);
1218 dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;
1219 return 0;
1220}
1221
1222static int __init txx9dmac_probe(struct platform_device *pdev)
1223{
1224 struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
1225 struct resource *io;
1226 struct txx9dmac_dev *ddev;
1227 u32 mcr;
1228 int err;
1229
1230 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1231 if (!io)
1232 return -EINVAL;
1233
1234 ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL);
1235 if (!ddev)
1236 return -ENOMEM;
1237
1238 if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io),
1239 dev_name(&pdev->dev)))
1240 return -EBUSY;
1241
1242 ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io));
1243 if (!ddev->regs)
1244 return -ENOMEM;
1245 ddev->have_64bit_regs = pdata->have_64bit_regs;
1246 if (__is_dmac64(ddev))
1247 ddev->descsize = sizeof(struct txx9dmac_hwdesc);
1248 else
1249 ddev->descsize = sizeof(struct txx9dmac_hwdesc32);
1250
1251 /* force dma off, just in case */
1252 txx9dmac_off(ddev);
1253
1254 ddev->irq = platform_get_irq(pdev, 0);
1255 if (ddev->irq >= 0) {
1256 tasklet_init(&ddev->tasklet, txx9dmac_tasklet,
1257 (unsigned long)ddev);
1258 err = devm_request_irq(&pdev->dev, ddev->irq,
1259 txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev);
1260 if (err)
1261 return err;
1262 }
1263
1264 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
1265 if (pdata && pdata->memcpy_chan >= 0)
1266 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
1267 dma_writel(ddev, MCR, mcr);
1268
1269 platform_set_drvdata(pdev, ddev);
1270 return 0;
1271}
1272
1273static int __exit txx9dmac_remove(struct platform_device *pdev)
1274{
1275 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1276
1277 txx9dmac_off(ddev);
1278 if (ddev->irq >= 0)
1279 tasklet_kill(&ddev->tasklet);
1280 return 0;
1281}
1282
1283static void txx9dmac_shutdown(struct platform_device *pdev)
1284{
1285 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1286
1287 txx9dmac_off(ddev);
1288}
1289
1290static int txx9dmac_suspend_late(struct platform_device *pdev,
1291 pm_message_t mesg)
1292{
1293 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1294
1295 txx9dmac_off(ddev);
1296 return 0;
1297}
1298
1299static int txx9dmac_resume_early(struct platform_device *pdev)
1300{
1301 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1302 struct txx9dmac_platform_data *pdata = pdev->dev.platform_data;
1303 u32 mcr;
1304
1305 mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
1306 if (pdata && pdata->memcpy_chan >= 0)
1307 mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
1308 dma_writel(ddev, MCR, mcr);
1309 return 0;
1310
1311}
1312
1313static struct platform_driver txx9dmac_chan_driver = {
1314 .remove = __exit_p(txx9dmac_chan_remove),
1315 .driver = {
1316 .name = "txx9dmac-chan",
1317 },
1318};
1319
1320static struct platform_driver txx9dmac_driver = {
1321 .remove = __exit_p(txx9dmac_remove),
1322 .shutdown = txx9dmac_shutdown,
1323 .suspend_late = txx9dmac_suspend_late,
1324 .resume_early = txx9dmac_resume_early,
1325 .driver = {
1326 .name = "txx9dmac",
1327 },
1328};
1329
1330static int __init txx9dmac_init(void)
1331{
1332 int rc;
1333
1334 rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe);
1335 if (!rc) {
1336 rc = platform_driver_probe(&txx9dmac_chan_driver,
1337 txx9dmac_chan_probe);
1338 if (rc)
1339 platform_driver_unregister(&txx9dmac_driver);
1340 }
1341 return rc;
1342}
1343module_init(txx9dmac_init);
1344
1345static void __exit txx9dmac_exit(void)
1346{
1347 platform_driver_unregister(&txx9dmac_chan_driver);
1348 platform_driver_unregister(&txx9dmac_driver);
1349}
1350module_exit(txx9dmac_exit);
1351
1352MODULE_LICENSE("GPL");
1353MODULE_DESCRIPTION("TXx9 DMA Controller driver");
1354MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h
new file mode 100644
index 000000000000..c907ff01d276
--- /dev/null
+++ b/drivers/dma/txx9dmac.h
@@ -0,0 +1,307 @@
1/*
2 * Driver for the TXx9 SoC DMA Controller
3 *
4 * Copyright (C) 2009 Atsushi Nemoto
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef TXX9DMAC_H
11#define TXX9DMAC_H
12
13#include <linux/dmaengine.h>
14#include <asm/txx9/dmac.h>
15
16/*
17 * Design Notes:
18 *
19 * This DMAC have four channels and one FIFO buffer. Each channel can
20 * be configured for memory-memory or device-memory transfer, but only
21 * one channel can do alignment-free memory-memory transfer at a time
22 * while the channel should occupy the FIFO buffer for effective
23 * transfers.
24 *
25 * Instead of dynamically assign the FIFO buffer to channels, I chose
26 * make one dedicated channel for memory-memory transfer. The
27 * dedicated channel is public. Other channels are private and used
28 * for slave transfer. Some devices in the SoC are wired to certain
29 * DMA channel.
30 */
31
32#ifdef CONFIG_MACH_TX49XX
33static inline bool txx9_dma_have_SMPCHN(void)
34{
35 return true;
36}
37#define TXX9_DMA_USE_SIMPLE_CHAIN
38#else
39static inline bool txx9_dma_have_SMPCHN(void)
40{
41 return false;
42}
43#endif
44
45#ifdef __LITTLE_ENDIAN
46#ifdef CONFIG_MACH_TX49XX
47#define CCR_LE TXX9_DMA_CCR_LE
48#define MCR_LE 0
49#else
50#define CCR_LE 0
51#define MCR_LE TXX9_DMA_MCR_LE
52#endif
53#else
54#define CCR_LE 0
55#define MCR_LE 0
56#endif
57
58/*
59 * Redefine this macro to handle differences between 32- and 64-bit
60 * addressing, big vs. little endian, etc.
61 */
62#ifdef __BIG_ENDIAN
63#define TXX9_DMA_REG32(name) u32 __pad_##name; u32 name
64#else
65#define TXX9_DMA_REG32(name) u32 name; u32 __pad_##name
66#endif
67
68/* Hardware register definitions. */
69struct txx9dmac_cregs {
70#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
71 TXX9_DMA_REG32(CHAR); /* Chain Address Register */
72#else
73 u64 CHAR; /* Chain Address Register */
74#endif
75 u64 SAR; /* Source Address Register */
76 u64 DAR; /* Destination Address Register */
77 TXX9_DMA_REG32(CNTR); /* Count Register */
78 TXX9_DMA_REG32(SAIR); /* Source Address Increment Register */
79 TXX9_DMA_REG32(DAIR); /* Destination Address Increment Register */
80 TXX9_DMA_REG32(CCR); /* Channel Control Register */
81 TXX9_DMA_REG32(CSR); /* Channel Status Register */
82};
83struct txx9dmac_cregs32 {
84 u32 CHAR;
85 u32 SAR;
86 u32 DAR;
87 u32 CNTR;
88 u32 SAIR;
89 u32 DAIR;
90 u32 CCR;
91 u32 CSR;
92};
93
94struct txx9dmac_regs {
95 /* per-channel registers */
96 struct txx9dmac_cregs CHAN[TXX9_DMA_MAX_NR_CHANNELS];
97 u64 __pad[9];
98 u64 MFDR; /* Memory Fill Data Register */
99 TXX9_DMA_REG32(MCR); /* Master Control Register */
100};
101struct txx9dmac_regs32 {
102 struct txx9dmac_cregs32 CHAN[TXX9_DMA_MAX_NR_CHANNELS];
103 u32 __pad[9];
104 u32 MFDR;
105 u32 MCR;
106};
107
108/* bits for MCR */
109#define TXX9_DMA_MCR_EIS(ch) (0x10000000<<(ch))
110#define TXX9_DMA_MCR_DIS(ch) (0x01000000<<(ch))
111#define TXX9_DMA_MCR_RSFIF 0x00000080
112#define TXX9_DMA_MCR_FIFUM(ch) (0x00000008<<(ch))
113#define TXX9_DMA_MCR_LE 0x00000004
114#define TXX9_DMA_MCR_RPRT 0x00000002
115#define TXX9_DMA_MCR_MSTEN 0x00000001
116
117/* bits for CCRn */
118#define TXX9_DMA_CCR_IMMCHN 0x20000000
119#define TXX9_DMA_CCR_USEXFSZ 0x10000000
120#define TXX9_DMA_CCR_LE 0x08000000
121#define TXX9_DMA_CCR_DBINH 0x04000000
122#define TXX9_DMA_CCR_SBINH 0x02000000
123#define TXX9_DMA_CCR_CHRST 0x01000000
124#define TXX9_DMA_CCR_RVBYTE 0x00800000
125#define TXX9_DMA_CCR_ACKPOL 0x00400000
126#define TXX9_DMA_CCR_REQPL 0x00200000
127#define TXX9_DMA_CCR_EGREQ 0x00100000
128#define TXX9_DMA_CCR_CHDN 0x00080000
129#define TXX9_DMA_CCR_DNCTL 0x00060000
130#define TXX9_DMA_CCR_EXTRQ 0x00010000
131#define TXX9_DMA_CCR_INTRQD 0x0000e000
132#define TXX9_DMA_CCR_INTENE 0x00001000
133#define TXX9_DMA_CCR_INTENC 0x00000800
134#define TXX9_DMA_CCR_INTENT 0x00000400
135#define TXX9_DMA_CCR_CHNEN 0x00000200
136#define TXX9_DMA_CCR_XFACT 0x00000100
137#define TXX9_DMA_CCR_SMPCHN 0x00000020
138#define TXX9_DMA_CCR_XFSZ(order) (((order) << 2) & 0x0000001c)
139#define TXX9_DMA_CCR_XFSZ_1 TXX9_DMA_CCR_XFSZ(0)
140#define TXX9_DMA_CCR_XFSZ_2 TXX9_DMA_CCR_XFSZ(1)
141#define TXX9_DMA_CCR_XFSZ_4 TXX9_DMA_CCR_XFSZ(2)
142#define TXX9_DMA_CCR_XFSZ_8 TXX9_DMA_CCR_XFSZ(3)
143#define TXX9_DMA_CCR_XFSZ_X4 TXX9_DMA_CCR_XFSZ(4)
144#define TXX9_DMA_CCR_XFSZ_X8 TXX9_DMA_CCR_XFSZ(5)
145#define TXX9_DMA_CCR_XFSZ_X16 TXX9_DMA_CCR_XFSZ(6)
146#define TXX9_DMA_CCR_XFSZ_X32 TXX9_DMA_CCR_XFSZ(7)
147#define TXX9_DMA_CCR_MEMIO 0x00000002
148#define TXX9_DMA_CCR_SNGAD 0x00000001
149
150/* bits for CSRn */
151#define TXX9_DMA_CSR_CHNEN 0x00000400
152#define TXX9_DMA_CSR_STLXFER 0x00000200
153#define TXX9_DMA_CSR_XFACT 0x00000100
154#define TXX9_DMA_CSR_ABCHC 0x00000080
155#define TXX9_DMA_CSR_NCHNC 0x00000040
156#define TXX9_DMA_CSR_NTRNFC 0x00000020
157#define TXX9_DMA_CSR_EXTDN 0x00000010
158#define TXX9_DMA_CSR_CFERR 0x00000008
159#define TXX9_DMA_CSR_CHERR 0x00000004
160#define TXX9_DMA_CSR_DESERR 0x00000002
161#define TXX9_DMA_CSR_SORERR 0x00000001
162
163struct txx9dmac_chan {
164 struct dma_chan chan;
165 struct dma_device dma;
166 struct txx9dmac_dev *ddev;
167 void __iomem *ch_regs;
168 struct tasklet_struct tasklet;
169 int irq;
170 u32 ccr;
171
172 spinlock_t lock;
173
174 /* these other elements are all protected by lock */
175 dma_cookie_t completed;
176 struct list_head active_list;
177 struct list_head queue;
178 struct list_head free_list;
179
180 unsigned int descs_allocated;
181};
182
183struct txx9dmac_dev {
184 void __iomem *regs;
185 struct tasklet_struct tasklet;
186 int irq;
187 struct txx9dmac_chan *chan[TXX9_DMA_MAX_NR_CHANNELS];
188 bool have_64bit_regs;
189 unsigned int descsize;
190};
191
192static inline bool __is_dmac64(const struct txx9dmac_dev *ddev)
193{
194 return ddev->have_64bit_regs;
195}
196
197static inline bool is_dmac64(const struct txx9dmac_chan *dc)
198{
199 return __is_dmac64(dc->ddev);
200}
201
202#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
203/* Hardware descriptor definition. (for simple-chain) */
204struct txx9dmac_hwdesc {
205#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
206 TXX9_DMA_REG32(CHAR);
207#else
208 u64 CHAR;
209#endif
210 u64 SAR;
211 u64 DAR;
212 TXX9_DMA_REG32(CNTR);
213};
214struct txx9dmac_hwdesc32 {
215 u32 CHAR;
216 u32 SAR;
217 u32 DAR;
218 u32 CNTR;
219};
220#else
221#define txx9dmac_hwdesc txx9dmac_cregs
222#define txx9dmac_hwdesc32 txx9dmac_cregs32
223#endif
224
225struct txx9dmac_desc {
226 /* FIRST values the hardware uses */
227 union {
228 struct txx9dmac_hwdesc hwdesc;
229 struct txx9dmac_hwdesc32 hwdesc32;
230 };
231
232 /* THEN values for driver housekeeping */
233 struct list_head desc_node ____cacheline_aligned;
234 struct dma_async_tx_descriptor txd;
235 size_t len;
236};
237
238#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
239
240static inline bool txx9dmac_chan_INTENT(struct txx9dmac_chan *dc)
241{
242 return (dc->ccr & TXX9_DMA_CCR_INTENT) != 0;
243}
244
245static inline void txx9dmac_chan_set_INTENT(struct txx9dmac_chan *dc)
246{
247 dc->ccr |= TXX9_DMA_CCR_INTENT;
248}
249
250static inline void txx9dmac_desc_set_INTENT(struct txx9dmac_dev *ddev,
251 struct txx9dmac_desc *desc)
252{
253}
254
255static inline void txx9dmac_chan_set_SMPCHN(struct txx9dmac_chan *dc)
256{
257 dc->ccr |= TXX9_DMA_CCR_SMPCHN;
258}
259
260static inline void txx9dmac_desc_set_nosimple(struct txx9dmac_dev *ddev,
261 struct txx9dmac_desc *desc,
262 u32 sair, u32 dair, u32 ccr)
263{
264}
265
266#else /* TXX9_DMA_USE_SIMPLE_CHAIN */
267
268static inline bool txx9dmac_chan_INTENT(struct txx9dmac_chan *dc)
269{
270 return true;
271}
272
273static void txx9dmac_chan_set_INTENT(struct txx9dmac_chan *dc)
274{
275}
276
277static inline void txx9dmac_desc_set_INTENT(struct txx9dmac_dev *ddev,
278 struct txx9dmac_desc *desc)
279{
280 if (__is_dmac64(ddev))
281 desc->hwdesc.CCR |= TXX9_DMA_CCR_INTENT;
282 else
283 desc->hwdesc32.CCR |= TXX9_DMA_CCR_INTENT;
284}
285
286static inline void txx9dmac_chan_set_SMPCHN(struct txx9dmac_chan *dc)
287{
288}
289
290static inline void txx9dmac_desc_set_nosimple(struct txx9dmac_dev *ddev,
291 struct txx9dmac_desc *desc,
292 u32 sai, u32 dai, u32 ccr)
293{
294 if (__is_dmac64(ddev)) {
295 desc->hwdesc.SAIR = sai;
296 desc->hwdesc.DAIR = dai;
297 desc->hwdesc.CCR = ccr;
298 } else {
299 desc->hwdesc32.SAIR = sai;
300 desc->hwdesc32.DAIR = dai;
301 desc->hwdesc32.CCR = ccr;
302 }
303}
304
305#endif /* TXX9_DMA_USE_SIMPLE_CHAIN */
306
307#endif /* TXX9DMAC_H */
diff --git a/drivers/eisa/eisa.ids b/drivers/eisa/eisa.ids
index ed69837d8b74..6cbb7a514436 100644
--- a/drivers/eisa/eisa.ids
+++ b/drivers/eisa/eisa.ids
@@ -1140,6 +1140,11 @@ NON0301 "c't Universale Graphic Adapter"
1140NON0401 "c't Universal Ethernet Adapter" 1140NON0401 "c't Universal Ethernet Adapter"
1141NON0501 "c't Universal 16-Bit Sound Adapter" 1141NON0501 "c't Universal 16-Bit Sound Adapter"
1142NON0601 "c't Universal 8-Bit Adapter" 1142NON0601 "c't Universal 8-Bit Adapter"
1143NPI0120 "Network Peripherals NP-EISA-1 FDDI Interface"
1144NPI0221 "Network Peripherals NP-EISA-2 FDDI Interface"
1145NPI0223 "Network Peripherals NP-EISA-2E Enhanced FDDI Interface"
1146NPI0301 "Network Peripherals NP-EISA-3 FDDI Interface"
1147NPI0303 "Network Peripherals NP-EISA-3E Enhanced FDDI Interface"
1143NSS0011 "Newport Systems Solutions WNIC Adapter" 1148NSS0011 "Newport Systems Solutions WNIC Adapter"
1144NVL0701 "Novell NE3200 Bus Master Ethernet" 1149NVL0701 "Novell NE3200 Bus Master Ethernet"
1145NVL0702 "Novell NE3200T Bus Master Ethernet" 1150NVL0702 "Novell NE3200T Bus Master Ethernet"
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c
index 74edb1d0110f..0dd0f633b18d 100644
--- a/drivers/eisa/pci_eisa.c
+++ b/drivers/eisa/pci_eisa.c
@@ -31,11 +31,11 @@ static int __init pci_eisa_init(struct pci_dev *pdev,
31 } 31 }
32 32
33 pci_eisa_root.dev = &pdev->dev; 33 pci_eisa_root.dev = &pdev->dev;
34 pci_eisa_root.dev->driver_data = &pci_eisa_root;
35 pci_eisa_root.res = pdev->bus->resource[0]; 34 pci_eisa_root.res = pdev->bus->resource[0];
36 pci_eisa_root.bus_base_addr = pdev->bus->resource[0]->start; 35 pci_eisa_root.bus_base_addr = pdev->bus->resource[0]->start;
37 pci_eisa_root.slots = EISA_MAX_SLOTS; 36 pci_eisa_root.slots = EISA_MAX_SLOTS;
38 pci_eisa_root.dma_mask = pdev->dma_mask; 37 pci_eisa_root.dma_mask = pdev->dma_mask;
38 dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root);
39 39
40 if (eisa_root_register (&pci_eisa_root)) { 40 if (eisa_root_register (&pci_eisa_root)) {
41 printk (KERN_ERR "pci_eisa : Could not register EISA root\n"); 41 printk (KERN_ERR "pci_eisa : Could not register EISA root\n");
diff --git a/drivers/eisa/virtual_root.c b/drivers/eisa/virtual_root.c
index 3074879f231f..535e4f9c83f4 100644
--- a/drivers/eisa/virtual_root.c
+++ b/drivers/eisa/virtual_root.c
@@ -57,7 +57,7 @@ static int __init virtual_eisa_root_init (void)
57 57
58 eisa_bus_root.force_probe = force_probe; 58 eisa_bus_root.force_probe = force_probe;
59 59
60 eisa_root_dev.dev.driver_data = &eisa_bus_root; 60 dev_set_drvdata(&eisa_root_dev.dev, &eisa_bus_root);
61 61
62 if (eisa_root_register (&eisa_bus_root)) { 62 if (eisa_root_register (&eisa_bus_root)) {
63 /* A real bridge may have been registered before 63 /* A real bridge may have been registered before
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index a7c31e9039c1..bc3b9bf822bf 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -2,10 +2,10 @@
2# Makefile for the Linux IEEE 1394 implementation 2# Makefile for the Linux IEEE 1394 implementation
3# 3#
4 4
5firewire-core-y += fw-card.o fw-topology.o fw-transaction.o fw-iso.o \ 5firewire-core-y += core-card.o core-cdev.o core-device.o \
6 fw-device.o fw-cdev.o 6 core-iso.o core-topology.o core-transaction.o
7firewire-ohci-y += fw-ohci.o 7firewire-ohci-y += ohci.o
8firewire-sbp2-y += fw-sbp2.o 8firewire-sbp2-y += sbp2.o
9 9
10obj-$(CONFIG_FIREWIRE) += firewire-core.o 10obj-$(CONFIG_FIREWIRE) += firewire-core.o
11obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o 11obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/core-card.c
index 8b8c8c22f0fc..4c1be64fdddd 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/core-card.c
@@ -16,18 +16,27 @@
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */ 17 */
18 18
19#include <linux/bug.h>
19#include <linux/completion.h> 20#include <linux/completion.h>
20#include <linux/crc-itu-t.h> 21#include <linux/crc-itu-t.h>
21#include <linux/delay.h>
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/firewire.h>
25#include <linux/firewire-constants.h>
26#include <linux/jiffies.h>
27#include <linux/kernel.h>
24#include <linux/kref.h> 28#include <linux/kref.h>
29#include <linux/list.h>
25#include <linux/module.h> 30#include <linux/module.h>
26#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/spinlock.h>
33#include <linux/timer.h>
34#include <linux/workqueue.h>
27 35
28#include "fw-transaction.h" 36#include <asm/atomic.h>
29#include "fw-topology.h" 37#include <asm/byteorder.h>
30#include "fw-device.h" 38
39#include "core.h"
31 40
32int fw_compute_block_crc(u32 *block) 41int fw_compute_block_crc(u32 *block)
33{ 42{
@@ -181,12 +190,6 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
181 mutex_unlock(&card_mutex); 190 mutex_unlock(&card_mutex);
182} 191}
183 192
184static int set_broadcast_channel(struct device *dev, void *data)
185{
186 fw_device_set_broadcast_channel(fw_device(dev), (long)data);
187 return 0;
188}
189
190static void allocate_broadcast_channel(struct fw_card *card, int generation) 193static void allocate_broadcast_channel(struct fw_card *card, int generation)
191{ 194{
192 int channel, bandwidth = 0; 195 int channel, bandwidth = 0;
@@ -196,7 +199,7 @@ static void allocate_broadcast_channel(struct fw_card *card, int generation)
196 if (channel == 31) { 199 if (channel == 31) {
197 card->broadcast_channel_allocated = true; 200 card->broadcast_channel_allocated = true;
198 device_for_each_child(card->device, (void *)(long)generation, 201 device_for_each_child(card->device, (void *)(long)generation,
199 set_broadcast_channel); 202 fw_device_set_broadcast_channel);
200 } 203 }
201} 204}
202 205
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/core-cdev.c
index 7eb6594cc3e5..d1d30c615b0f 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/firewire.h>
25#include <linux/firewire-cdev.h> 26#include <linux/firewire-cdev.h>
26#include <linux/idr.h> 27#include <linux/idr.h>
27#include <linux/jiffies.h> 28#include <linux/jiffies.h>
@@ -34,16 +35,14 @@
34#include <linux/preempt.h> 35#include <linux/preempt.h>
35#include <linux/spinlock.h> 36#include <linux/spinlock.h>
36#include <linux/time.h> 37#include <linux/time.h>
38#include <linux/uaccess.h>
37#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
38#include <linux/wait.h> 40#include <linux/wait.h>
39#include <linux/workqueue.h> 41#include <linux/workqueue.h>
40 42
41#include <asm/system.h> 43#include <asm/system.h>
42#include <asm/uaccess.h>
43 44
44#include "fw-device.h" 45#include "core.h"
45#include "fw-topology.h"
46#include "fw-transaction.h"
47 46
48struct client { 47struct client {
49 u32 version; 48 u32 version;
@@ -739,15 +738,11 @@ static void release_descriptor(struct client *client,
739static int ioctl_add_descriptor(struct client *client, void *buffer) 738static int ioctl_add_descriptor(struct client *client, void *buffer)
740{ 739{
741 struct fw_cdev_add_descriptor *request = buffer; 740 struct fw_cdev_add_descriptor *request = buffer;
742 struct fw_card *card = client->device->card;
743 struct descriptor_resource *r; 741 struct descriptor_resource *r;
744 int ret; 742 int ret;
745 743
746 /* Access policy: Allow this ioctl only on local nodes' device files. */ 744 /* Access policy: Allow this ioctl only on local nodes' device files. */
747 spin_lock_irq(&card->lock); 745 if (!client->device->is_local)
748 ret = client->device->node_id != card->local_node->node_id;
749 spin_unlock_irq(&card->lock);
750 if (ret)
751 return -ENOSYS; 746 return -ENOSYS;
752 747
753 if (request->length > 256) 748 if (request->length > 256)
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/core-device.c
index a47e2129d83d..97e656af2d22 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/core-device.c
@@ -22,10 +22,14 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/firewire.h>
26#include <linux/firewire-constants.h>
25#include <linux/idr.h> 27#include <linux/idr.h>
26#include <linux/jiffies.h> 28#include <linux/jiffies.h>
27#include <linux/kobject.h> 29#include <linux/kobject.h>
28#include <linux/list.h> 30#include <linux/list.h>
31#include <linux/mod_devicetable.h>
32#include <linux/module.h>
29#include <linux/mutex.h> 33#include <linux/mutex.h>
30#include <linux/rwsem.h> 34#include <linux/rwsem.h>
31#include <linux/semaphore.h> 35#include <linux/semaphore.h>
@@ -33,11 +37,11 @@
33#include <linux/string.h> 37#include <linux/string.h>
34#include <linux/workqueue.h> 38#include <linux/workqueue.h>
35 39
40#include <asm/atomic.h>
41#include <asm/byteorder.h>
36#include <asm/system.h> 42#include <asm/system.h>
37 43
38#include "fw-device.h" 44#include "core.h"
39#include "fw-topology.h"
40#include "fw-transaction.h"
41 45
42void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p) 46void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
43{ 47{
@@ -55,9 +59,10 @@ int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
55} 59}
56EXPORT_SYMBOL(fw_csr_iterator_next); 60EXPORT_SYMBOL(fw_csr_iterator_next);
57 61
58static int is_fw_unit(struct device *dev); 62static bool is_fw_unit(struct device *dev);
59 63
60static int match_unit_directory(u32 * directory, const struct fw_device_id *id) 64static int match_unit_directory(u32 *directory, u32 match_flags,
65 const struct ieee1394_device_id *id)
61{ 66{
62 struct fw_csr_iterator ci; 67 struct fw_csr_iterator ci;
63 int key, value, match; 68 int key, value, match;
@@ -65,31 +70,42 @@ static int match_unit_directory(u32 * directory, const struct fw_device_id *id)
65 match = 0; 70 match = 0;
66 fw_csr_iterator_init(&ci, directory); 71 fw_csr_iterator_init(&ci, directory);
67 while (fw_csr_iterator_next(&ci, &key, &value)) { 72 while (fw_csr_iterator_next(&ci, &key, &value)) {
68 if (key == CSR_VENDOR && value == id->vendor) 73 if (key == CSR_VENDOR && value == id->vendor_id)
69 match |= FW_MATCH_VENDOR; 74 match |= IEEE1394_MATCH_VENDOR_ID;
70 if (key == CSR_MODEL && value == id->model) 75 if (key == CSR_MODEL && value == id->model_id)
71 match |= FW_MATCH_MODEL; 76 match |= IEEE1394_MATCH_MODEL_ID;
72 if (key == CSR_SPECIFIER_ID && value == id->specifier_id) 77 if (key == CSR_SPECIFIER_ID && value == id->specifier_id)
73 match |= FW_MATCH_SPECIFIER_ID; 78 match |= IEEE1394_MATCH_SPECIFIER_ID;
74 if (key == CSR_VERSION && value == id->version) 79 if (key == CSR_VERSION && value == id->version)
75 match |= FW_MATCH_VERSION; 80 match |= IEEE1394_MATCH_VERSION;
76 } 81 }
77 82
78 return (match & id->match_flags) == id->match_flags; 83 return (match & match_flags) == match_flags;
79} 84}
80 85
81static int fw_unit_match(struct device *dev, struct device_driver *drv) 86static int fw_unit_match(struct device *dev, struct device_driver *drv)
82{ 87{
83 struct fw_unit *unit = fw_unit(dev); 88 struct fw_unit *unit = fw_unit(dev);
84 struct fw_driver *driver = fw_driver(drv); 89 struct fw_device *device;
85 int i; 90 const struct ieee1394_device_id *id;
86 91
87 /* We only allow binding to fw_units. */ 92 /* We only allow binding to fw_units. */
88 if (!is_fw_unit(dev)) 93 if (!is_fw_unit(dev))
89 return 0; 94 return 0;
90 95
91 for (i = 0; driver->id_table[i].match_flags != 0; i++) { 96 device = fw_parent_device(unit);
92 if (match_unit_directory(unit->directory, &driver->id_table[i])) 97 id = container_of(drv, struct fw_driver, driver)->id_table;
98
99 for (; id->match_flags != 0; id++) {
100 if (match_unit_directory(unit->directory, id->match_flags, id))
101 return 1;
102
103 /* Also check vendor ID in the root directory. */
104 if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
105 match_unit_directory(&device->config_rom[5],
106 IEEE1394_MATCH_VENDOR_ID, id) &&
107 match_unit_directory(unit->directory, id->match_flags
108 & ~IEEE1394_MATCH_VENDOR_ID, id))
93 return 1; 109 return 1;
94 } 110 }
95 111
@@ -98,7 +114,7 @@ static int fw_unit_match(struct device *dev, struct device_driver *drv)
98 114
99static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size) 115static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
100{ 116{
101 struct fw_device *device = fw_device(unit->device.parent); 117 struct fw_device *device = fw_parent_device(unit);
102 struct fw_csr_iterator ci; 118 struct fw_csr_iterator ci;
103 119
104 int key, value; 120 int key, value;
@@ -292,8 +308,7 @@ static void init_fw_attribute_group(struct device *dev,
292 group->attrs[j++] = &attr->attr; 308 group->attrs[j++] = &attr->attr;
293 } 309 }
294 310
295 BUG_ON(j >= ARRAY_SIZE(group->attrs)); 311 group->attrs[j] = NULL;
296 group->attrs[j++] = NULL;
297 group->groups[0] = &group->group; 312 group->groups[0] = &group->group;
298 group->groups[1] = NULL; 313 group->groups[1] = NULL;
299 group->group.attrs = group->attrs; 314 group->group.attrs = group->attrs;
@@ -356,9 +371,56 @@ static ssize_t guid_show(struct device *dev,
356 return ret; 371 return ret;
357} 372}
358 373
374static int units_sprintf(char *buf, u32 *directory)
375{
376 struct fw_csr_iterator ci;
377 int key, value;
378 int specifier_id = 0;
379 int version = 0;
380
381 fw_csr_iterator_init(&ci, directory);
382 while (fw_csr_iterator_next(&ci, &key, &value)) {
383 switch (key) {
384 case CSR_SPECIFIER_ID:
385 specifier_id = value;
386 break;
387 case CSR_VERSION:
388 version = value;
389 break;
390 }
391 }
392
393 return sprintf(buf, "0x%06x:0x%06x ", specifier_id, version);
394}
395
396static ssize_t units_show(struct device *dev,
397 struct device_attribute *attr, char *buf)
398{
399 struct fw_device *device = fw_device(dev);
400 struct fw_csr_iterator ci;
401 int key, value, i = 0;
402
403 down_read(&fw_device_rwsem);
404 fw_csr_iterator_init(&ci, &device->config_rom[5]);
405 while (fw_csr_iterator_next(&ci, &key, &value)) {
406 if (key != (CSR_UNIT | CSR_DIRECTORY))
407 continue;
408 i += units_sprintf(&buf[i], ci.p + value - 1);
409 if (i >= PAGE_SIZE - (8 + 1 + 8 + 1))
410 break;
411 }
412 up_read(&fw_device_rwsem);
413
414 if (i)
415 buf[i - 1] = '\n';
416
417 return i;
418}
419
359static struct device_attribute fw_device_attributes[] = { 420static struct device_attribute fw_device_attributes[] = {
360 __ATTR_RO(config_rom), 421 __ATTR_RO(config_rom),
361 __ATTR_RO(guid), 422 __ATTR_RO(guid),
423 __ATTR_RO(units),
362 __ATTR_NULL, 424 __ATTR_NULL,
363}; 425};
364 426
@@ -518,7 +580,9 @@ static int read_bus_info_block(struct fw_device *device, int generation)
518 580
519 kfree(old_rom); 581 kfree(old_rom);
520 ret = 0; 582 ret = 0;
521 device->cmc = rom[2] >> 30 & 1; 583 device->max_rec = rom[2] >> 12 & 0xf;
584 device->cmc = rom[2] >> 30 & 1;
585 device->irmc = rom[2] >> 31 & 1;
522 out: 586 out:
523 kfree(rom); 587 kfree(rom);
524 588
@@ -537,7 +601,7 @@ static struct device_type fw_unit_type = {
537 .release = fw_unit_release, 601 .release = fw_unit_release,
538}; 602};
539 603
540static int is_fw_unit(struct device *dev) 604static bool is_fw_unit(struct device *dev)
541{ 605{
542 return dev->type == &fw_unit_type; 606 return dev->type == &fw_unit_type;
543} 607}
@@ -570,9 +634,13 @@ static void create_units(struct fw_device *device)
570 unit->device.parent = &device->device; 634 unit->device.parent = &device->device;
571 dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++); 635 dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++);
572 636
637 BUILD_BUG_ON(ARRAY_SIZE(unit->attribute_group.attrs) <
638 ARRAY_SIZE(fw_unit_attributes) +
639 ARRAY_SIZE(config_rom_attributes));
573 init_fw_attribute_group(&unit->device, 640 init_fw_attribute_group(&unit->device,
574 fw_unit_attributes, 641 fw_unit_attributes,
575 &unit->attribute_group); 642 &unit->attribute_group);
643
576 if (device_register(&unit->device) < 0) 644 if (device_register(&unit->device) < 0)
577 goto skip_unit; 645 goto skip_unit;
578 646
@@ -683,6 +751,11 @@ static struct device_type fw_device_type = {
683 .release = fw_device_release, 751 .release = fw_device_release,
684}; 752};
685 753
754static bool is_fw_device(struct device *dev)
755{
756 return dev->type == &fw_device_type;
757}
758
686static int update_unit(struct device *dev, void *data) 759static int update_unit(struct device *dev, void *data)
687{ 760{
688 struct fw_unit *unit = fw_unit(dev); 761 struct fw_unit *unit = fw_unit(dev);
@@ -719,6 +792,9 @@ static int lookup_existing_device(struct device *dev, void *data)
719 struct fw_card *card = new->card; 792 struct fw_card *card = new->card;
720 int match = 0; 793 int match = 0;
721 794
795 if (!is_fw_device(dev))
796 return 0;
797
722 down_read(&fw_device_rwsem); /* serialize config_rom access */ 798 down_read(&fw_device_rwsem); /* serialize config_rom access */
723 spin_lock_irq(&card->lock); /* serialize node access */ 799 spin_lock_irq(&card->lock); /* serialize node access */
724 800
@@ -758,7 +834,7 @@ static int lookup_existing_device(struct device *dev, void *data)
758 834
759enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, }; 835enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
760 836
761void fw_device_set_broadcast_channel(struct fw_device *device, int generation) 837static void set_broadcast_channel(struct fw_device *device, int generation)
762{ 838{
763 struct fw_card *card = device->card; 839 struct fw_card *card = device->card;
764 __be32 data; 840 __be32 data;
@@ -767,6 +843,20 @@ void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
767 if (!card->broadcast_channel_allocated) 843 if (!card->broadcast_channel_allocated)
768 return; 844 return;
769 845
846 /*
847 * The Broadcast_Channel Valid bit is required by nodes which want to
848 * transmit on this channel. Such transmissions are practically
849 * exclusive to IP over 1394 (RFC 2734). IP capable nodes are required
850 * to be IRM capable and have a max_rec of 8 or more. We use this fact
851 * to narrow down to which nodes we send Broadcast_Channel updates.
852 */
853 if (!device->irmc || device->max_rec < 8)
854 return;
855
856 /*
857 * Some 1394-1995 nodes crash if this 1394a-2000 register is written.
858 * Perform a read test first.
859 */
770 if (device->bc_implemented == BC_UNKNOWN) { 860 if (device->bc_implemented == BC_UNKNOWN) {
771 rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST, 861 rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
772 device->node_id, generation, device->max_speed, 862 device->node_id, generation, device->max_speed,
@@ -794,6 +884,14 @@ void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
794 } 884 }
795} 885}
796 886
887int fw_device_set_broadcast_channel(struct device *dev, void *gen)
888{
889 if (is_fw_device(dev))
890 set_broadcast_channel(fw_device(dev), (long)gen);
891
892 return 0;
893}
894
797static void fw_device_init(struct work_struct *work) 895static void fw_device_init(struct work_struct *work)
798{ 896{
799 struct fw_device *device = 897 struct fw_device *device =
@@ -849,9 +947,13 @@ static void fw_device_init(struct work_struct *work)
849 device->device.devt = MKDEV(fw_cdev_major, minor); 947 device->device.devt = MKDEV(fw_cdev_major, minor);
850 dev_set_name(&device->device, "fw%d", minor); 948 dev_set_name(&device->device, "fw%d", minor);
851 949
950 BUILD_BUG_ON(ARRAY_SIZE(device->attribute_group.attrs) <
951 ARRAY_SIZE(fw_device_attributes) +
952 ARRAY_SIZE(config_rom_attributes));
852 init_fw_attribute_group(&device->device, 953 init_fw_attribute_group(&device->device,
853 fw_device_attributes, 954 fw_device_attributes,
854 &device->attribute_group); 955 &device->attribute_group);
956
855 if (device_add(&device->device)) { 957 if (device_add(&device->device)) {
856 fw_error("Failed to add device.\n"); 958 fw_error("Failed to add device.\n");
857 goto error_with_cdev; 959 goto error_with_cdev;
@@ -888,7 +990,7 @@ static void fw_device_init(struct work_struct *work)
888 1 << device->max_speed); 990 1 << device->max_speed);
889 device->config_rom_retries = 0; 991 device->config_rom_retries = 0;
890 992
891 fw_device_set_broadcast_channel(device, device->generation); 993 set_broadcast_channel(device, device->generation);
892 } 994 }
893 995
894 /* 996 /*
@@ -993,6 +1095,9 @@ static void fw_device_refresh(struct work_struct *work)
993 1095
994 create_units(device); 1096 create_units(device);
995 1097
1098 /* Userspace may want to re-read attributes. */
1099 kobject_uevent(&device->device.kobj, KOBJ_CHANGE);
1100
996 if (atomic_cmpxchg(&device->state, 1101 if (atomic_cmpxchg(&device->state,
997 FW_DEVICE_INITIALIZING, 1102 FW_DEVICE_INITIALIZING,
998 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) 1103 FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
@@ -1042,6 +1147,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1042 device->node = fw_node_get(node); 1147 device->node = fw_node_get(node);
1043 device->node_id = node->node_id; 1148 device->node_id = node->node_id;
1044 device->generation = card->generation; 1149 device->generation = card->generation;
1150 device->is_local = node == card->local_node;
1045 mutex_init(&device->client_list_mutex); 1151 mutex_init(&device->client_list_mutex);
1046 INIT_LIST_HEAD(&device->client_list); 1152 INIT_LIST_HEAD(&device->client_list);
1047 1153
@@ -1075,7 +1181,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1075 FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { 1181 FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
1076 PREPARE_DELAYED_WORK(&device->work, fw_device_refresh); 1182 PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
1077 schedule_delayed_work(&device->work, 1183 schedule_delayed_work(&device->work,
1078 node == card->local_node ? 0 : INITIAL_DELAY); 1184 device->is_local ? 0 : INITIAL_DELAY);
1079 } 1185 }
1080 break; 1186 break;
1081 1187
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/core-iso.c
index 2baf1007253e..28076c892d7e 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -22,14 +22,16 @@
22 22
23#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/firewire.h>
25#include <linux/firewire-constants.h> 26#include <linux/firewire-constants.h>
26#include <linux/kernel.h> 27#include <linux/kernel.h>
27#include <linux/mm.h> 28#include <linux/mm.h>
28#include <linux/spinlock.h> 29#include <linux/spinlock.h>
29#include <linux/vmalloc.h> 30#include <linux/vmalloc.h>
30 31
31#include "fw-topology.h" 32#include <asm/byteorder.h>
32#include "fw-transaction.h" 33
34#include "core.h"
33 35
34/* 36/*
35 * Isochronous DMA context management 37 * Isochronous DMA context management
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/core-topology.c
index d0deecc4de93..fddf2b358936 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -18,13 +18,22 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/module.h> 21#include <linux/bug.h>
22#include <linux/wait.h>
23#include <linux/errno.h> 22#include <linux/errno.h>
24#include <asm/bug.h> 23#include <linux/firewire.h>
24#include <linux/firewire-constants.h>
25#include <linux/jiffies.h>
26#include <linux/kernel.h>
27#include <linux/list.h>
28#include <linux/module.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/string.h>
32
33#include <asm/atomic.h>
25#include <asm/system.h> 34#include <asm/system.h>
26#include "fw-transaction.h" 35
27#include "fw-topology.h" 36#include "core.h"
28 37
29#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f) 38#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
30#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01) 39#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
@@ -37,6 +46,11 @@
37 46
38#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07) 47#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
39 48
49#define SELFID_PORT_CHILD 0x3
50#define SELFID_PORT_PARENT 0x2
51#define SELFID_PORT_NCONN 0x1
52#define SELFID_PORT_NONE 0x0
53
40static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count) 54static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
41{ 55{
42 u32 q; 56 u32 q;
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/core-transaction.c
index 283dac6d327d..479b22f5a1eb 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -18,24 +18,28 @@
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/bug.h>
21#include <linux/completion.h> 22#include <linux/completion.h>
23#include <linux/device.h>
24#include <linux/errno.h>
25#include <linux/firewire.h>
26#include <linux/firewire-constants.h>
27#include <linux/fs.h>
28#include <linux/init.h>
22#include <linux/idr.h> 29#include <linux/idr.h>
30#include <linux/jiffies.h>
23#include <linux/kernel.h> 31#include <linux/kernel.h>
24#include <linux/kref.h>
25#include <linux/module.h>
26#include <linux/mutex.h>
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/delay.h>
31#include <linux/poll.h>
32#include <linux/list.h> 32#include <linux/list.h>
33#include <linux/kthread.h> 33#include <linux/module.h>
34#include <asm/uaccess.h> 34#include <linux/slab.h>
35#include <linux/spinlock.h>
36#include <linux/string.h>
37#include <linux/timer.h>
38#include <linux/types.h>
35 39
36#include "fw-transaction.h" 40#include <asm/byteorder.h>
37#include "fw-topology.h" 41
38#include "fw-device.h" 42#include "core.h"
39 43
40#define HEADER_PRI(pri) ((pri) << 0) 44#define HEADER_PRI(pri) ((pri) << 0)
41#define HEADER_TCODE(tcode) ((tcode) << 4) 45#define HEADER_TCODE(tcode) ((tcode) << 4)
@@ -60,6 +64,10 @@
60#define HEADER_DESTINATION_IS_BROADCAST(q) \ 64#define HEADER_DESTINATION_IS_BROADCAST(q) \
61 (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f)) 65 (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f))
62 66
67#define PHY_PACKET_CONFIG 0x0
68#define PHY_PACKET_LINK_ON 0x1
69#define PHY_PACKET_SELF_ID 0x2
70
63#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22)) 71#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
64#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) 72#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
65#define PHY_IDENTIFIER(id) ((id) << 30) 73#define PHY_IDENTIFIER(id) ((id) << 30)
@@ -74,7 +82,7 @@ static int close_transaction(struct fw_transaction *transaction,
74 list_for_each_entry(t, &card->transaction_list, link) { 82 list_for_each_entry(t, &card->transaction_list, link) {
75 if (t == transaction) { 83 if (t == transaction) {
76 list_del(&t->link); 84 list_del(&t->link);
77 card->tlabel_mask &= ~(1 << t->tlabel); 85 card->tlabel_mask &= ~(1ULL << t->tlabel);
78 break; 86 break;
79 } 87 }
80 } 88 }
@@ -280,14 +288,14 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
280 spin_lock_irqsave(&card->lock, flags); 288 spin_lock_irqsave(&card->lock, flags);
281 289
282 tlabel = card->current_tlabel; 290 tlabel = card->current_tlabel;
283 if (card->tlabel_mask & (1 << tlabel)) { 291 if (card->tlabel_mask & (1ULL << tlabel)) {
284 spin_unlock_irqrestore(&card->lock, flags); 292 spin_unlock_irqrestore(&card->lock, flags);
285 callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data); 293 callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
286 return; 294 return;
287 } 295 }
288 296
289 card->current_tlabel = (card->current_tlabel + 1) & 0x1f; 297 card->current_tlabel = (card->current_tlabel + 1) & 0x3f;
290 card->tlabel_mask |= (1 << tlabel); 298 card->tlabel_mask |= (1ULL << tlabel);
291 299
292 t->node_id = destination_id; 300 t->node_id = destination_id;
293 t->tlabel = tlabel; 301 t->tlabel = tlabel;
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
new file mode 100644
index 000000000000..0a25a7b38a80
--- /dev/null
+++ b/drivers/firewire/core.h
@@ -0,0 +1,293 @@
1#ifndef _FIREWIRE_CORE_H
2#define _FIREWIRE_CORE_H
3
4#include <linux/dma-mapping.h>
5#include <linux/fs.h>
6#include <linux/list.h>
7#include <linux/idr.h>
8#include <linux/mm_types.h>
9#include <linux/rwsem.h>
10#include <linux/slab.h>
11#include <linux/types.h>
12
13#include <asm/atomic.h>
14
15struct device;
16struct fw_card;
17struct fw_device;
18struct fw_iso_buffer;
19struct fw_iso_context;
20struct fw_iso_packet;
21struct fw_node;
22struct fw_packet;
23
24
25/* -card */
26
27/* bitfields within the PHY registers */
28#define PHY_LINK_ACTIVE 0x80
29#define PHY_CONTENDER 0x40
30#define PHY_BUS_RESET 0x40
31#define PHY_BUS_SHORT_RESET 0x40
32
33#define BANDWIDTH_AVAILABLE_INITIAL 4915
34#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
35#define BROADCAST_CHANNEL_VALID (1 << 30)
36
37struct fw_card_driver {
38 /*
39 * Enable the given card with the given initial config rom.
40 * This function is expected to activate the card, and either
41 * enable the PHY or set the link_on bit and initiate a bus
42 * reset.
43 */
44 int (*enable)(struct fw_card *card, u32 *config_rom, size_t length);
45
46 int (*update_phy_reg)(struct fw_card *card, int address,
47 int clear_bits, int set_bits);
48
49 /*
50 * Update the config rom for an enabled card. This function
51 * should change the config rom that is presented on the bus
52 * an initiate a bus reset.
53 */
54 int (*set_config_rom)(struct fw_card *card,
55 u32 *config_rom, size_t length);
56
57 void (*send_request)(struct fw_card *card, struct fw_packet *packet);
58 void (*send_response)(struct fw_card *card, struct fw_packet *packet);
59 /* Calling cancel is valid once a packet has been submitted. */
60 int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
61
62 /*
63 * Allow the specified node ID to do direct DMA out and in of
64 * host memory. The card will disable this for all node when
65 * a bus reset happens, so driver need to reenable this after
66 * bus reset. Returns 0 on success, -ENODEV if the card
67 * doesn't support this, -ESTALE if the generation doesn't
68 * match.
69 */
70 int (*enable_phys_dma)(struct fw_card *card,
71 int node_id, int generation);
72
73 u64 (*get_bus_time)(struct fw_card *card);
74
75 struct fw_iso_context *
76 (*allocate_iso_context)(struct fw_card *card,
77 int type, int channel, size_t header_size);
78 void (*free_iso_context)(struct fw_iso_context *ctx);
79
80 int (*start_iso)(struct fw_iso_context *ctx,
81 s32 cycle, u32 sync, u32 tags);
82
83 int (*queue_iso)(struct fw_iso_context *ctx,
84 struct fw_iso_packet *packet,
85 struct fw_iso_buffer *buffer,
86 unsigned long payload);
87
88 int (*stop_iso)(struct fw_iso_context *ctx);
89};
90
91void fw_card_initialize(struct fw_card *card,
92 const struct fw_card_driver *driver, struct device *device);
93int fw_card_add(struct fw_card *card,
94 u32 max_receive, u32 link_speed, u64 guid);
95void fw_core_remove_card(struct fw_card *card);
96int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
97int fw_compute_block_crc(u32 *block);
98void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
99
100struct fw_descriptor {
101 struct list_head link;
102 size_t length;
103 u32 immediate;
104 u32 key;
105 const u32 *data;
106};
107
108int fw_core_add_descriptor(struct fw_descriptor *desc);
109void fw_core_remove_descriptor(struct fw_descriptor *desc);
110
111
112/* -cdev */
113
114extern const struct file_operations fw_device_ops;
115
116void fw_device_cdev_update(struct fw_device *device);
117void fw_device_cdev_remove(struct fw_device *device);
118
119
120/* -device */
121
122extern struct rw_semaphore fw_device_rwsem;
123extern struct idr fw_device_idr;
124extern int fw_cdev_major;
125
126struct fw_device *fw_device_get_by_devt(dev_t devt);
127int fw_device_set_broadcast_channel(struct device *dev, void *gen);
128void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
129
130
131/* -iso */
132
133/*
134 * The iso packet format allows for an immediate header/payload part
135 * stored in 'header' immediately after the packet info plus an
136 * indirect payload part that is pointer to by the 'payload' field.
137 * Applications can use one or the other or both to implement simple
138 * low-bandwidth streaming (e.g. audio) or more advanced
139 * scatter-gather streaming (e.g. assembling video frame automatically).
140 */
141struct fw_iso_packet {
142 u16 payload_length; /* Length of indirect payload. */
143 u32 interrupt:1; /* Generate interrupt on this packet */
144 u32 skip:1; /* Set to not send packet at all. */
145 u32 tag:2;
146 u32 sy:4;
147 u32 header_length:8; /* Length of immediate header. */
148 u32 header[0];
149};
150
151#define FW_ISO_CONTEXT_TRANSMIT 0
152#define FW_ISO_CONTEXT_RECEIVE 1
153
154#define FW_ISO_CONTEXT_MATCH_TAG0 1
155#define FW_ISO_CONTEXT_MATCH_TAG1 2
156#define FW_ISO_CONTEXT_MATCH_TAG2 4
157#define FW_ISO_CONTEXT_MATCH_TAG3 8
158#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
159
160/*
161 * An iso buffer is just a set of pages mapped for DMA in the
162 * specified direction. Since the pages are to be used for DMA, they
163 * are not mapped into the kernel virtual address space. We store the
164 * DMA address in the page private. The helper function
165 * fw_iso_buffer_map() will map the pages into a given vma.
166 */
167struct fw_iso_buffer {
168 enum dma_data_direction direction;
169 struct page **pages;
170 int page_count;
171};
172
173typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
174 u32 cycle, size_t header_length,
175 void *header, void *data);
176
177struct fw_iso_context {
178 struct fw_card *card;
179 int type;
180 int channel;
181 int speed;
182 size_t header_size;
183 fw_iso_callback_t callback;
184 void *callback_data;
185};
186
187int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
188 int page_count, enum dma_data_direction direction);
189int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
190void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
191
192struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
193 int type, int channel, int speed, size_t header_size,
194 fw_iso_callback_t callback, void *callback_data);
195int fw_iso_context_queue(struct fw_iso_context *ctx,
196 struct fw_iso_packet *packet,
197 struct fw_iso_buffer *buffer,
198 unsigned long payload);
199int fw_iso_context_start(struct fw_iso_context *ctx,
200 int cycle, int sync, int tags);
201int fw_iso_context_stop(struct fw_iso_context *ctx);
202void fw_iso_context_destroy(struct fw_iso_context *ctx);
203
204void fw_iso_resource_manage(struct fw_card *card, int generation,
205 u64 channels_mask, int *channel, int *bandwidth, bool allocate);
206
207
208/* -topology */
209
210enum {
211 FW_NODE_CREATED,
212 FW_NODE_UPDATED,
213 FW_NODE_DESTROYED,
214 FW_NODE_LINK_ON,
215 FW_NODE_LINK_OFF,
216 FW_NODE_INITIATED_RESET,
217};
218
219struct fw_node {
220 u16 node_id;
221 u8 color;
222 u8 port_count;
223 u8 link_on:1;
224 u8 initiated_reset:1;
225 u8 b_path:1;
226 u8 phy_speed:2; /* As in the self ID packet. */
227 u8 max_speed:2; /* Minimum of all phy-speeds on the path from the
228 * local node to this node. */
229 u8 max_depth:4; /* Maximum depth to any leaf node */
230 u8 max_hops:4; /* Max hops in this sub tree */
231 atomic_t ref_count;
232
233 /* For serializing node topology into a list. */
234 struct list_head link;
235
236 /* Upper layer specific data. */
237 void *data;
238
239 struct fw_node *ports[0];
240};
241
242static inline struct fw_node *fw_node_get(struct fw_node *node)
243{
244 atomic_inc(&node->ref_count);
245
246 return node;
247}
248
249static inline void fw_node_put(struct fw_node *node)
250{
251 if (atomic_dec_and_test(&node->ref_count))
252 kfree(node);
253}
254
255void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
256 int generation, int self_id_count, u32 *self_ids);
257void fw_destroy_nodes(struct fw_card *card);
258
259/*
260 * Check whether new_generation is the immediate successor of old_generation.
261 * Take counter roll-over at 255 (as per OHCI) into account.
262 */
263static inline bool is_next_generation(int new_generation, int old_generation)
264{
265 return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
266}
267
268
269/* -transaction */
270
271#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
272#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
273#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
274#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
275#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
276#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
277
278#define LOCAL_BUS 0xffc0
279
280void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
281void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
282void fw_fill_response(struct fw_packet *response, u32 *request_header,
283 int rcode, void *payload, size_t length);
284void fw_flush_transactions(struct fw_card *card);
285void fw_send_phy_config(struct fw_card *card,
286 int node_id, int generation, int gap_count);
287
288static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
289{
290 return tag << 14 | channel << 8 | sy;
291}
292
293#endif /* _FIREWIRE_CORE_H */
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
deleted file mode 100644
index 97588937c018..000000000000
--- a/drivers/firewire/fw-device.h
+++ /dev/null
@@ -1,202 +0,0 @@
1/*
2 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#ifndef __fw_device_h
20#define __fw_device_h
21
22#include <linux/device.h>
23#include <linux/fs.h>
24#include <linux/idr.h>
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/mutex.h>
28#include <linux/rwsem.h>
29#include <linux/sysfs.h>
30#include <linux/types.h>
31#include <linux/workqueue.h>
32
33#include <asm/atomic.h>
34
35enum fw_device_state {
36 FW_DEVICE_INITIALIZING,
37 FW_DEVICE_RUNNING,
38 FW_DEVICE_GONE,
39 FW_DEVICE_SHUTDOWN,
40};
41
42struct fw_attribute_group {
43 struct attribute_group *groups[2];
44 struct attribute_group group;
45 struct attribute *attrs[11];
46};
47
48struct fw_node;
49struct fw_card;
50
51/*
52 * Note, fw_device.generation always has to be read before fw_device.node_id.
53 * Use SMP memory barriers to ensure this. Otherwise requests will be sent
54 * to an outdated node_id if the generation was updated in the meantime due
55 * to a bus reset.
56 *
57 * Likewise, fw-core will take care to update .node_id before .generation so
58 * that whenever fw_device.generation is current WRT the actual bus generation,
59 * fw_device.node_id is guaranteed to be current too.
60 *
61 * The same applies to fw_device.card->node_id vs. fw_device.generation.
62 *
63 * fw_device.config_rom and fw_device.config_rom_length may be accessed during
64 * the lifetime of any fw_unit belonging to the fw_device, before device_del()
65 * was called on the last fw_unit. Alternatively, they may be accessed while
66 * holding fw_device_rwsem.
67 */
68struct fw_device {
69 atomic_t state;
70 struct fw_node *node;
71 int node_id;
72 int generation;
73 unsigned max_speed;
74 struct fw_card *card;
75 struct device device;
76
77 struct mutex client_list_mutex;
78 struct list_head client_list;
79
80 u32 *config_rom;
81 size_t config_rom_length;
82 int config_rom_retries;
83 unsigned cmc:1;
84 unsigned bc_implemented:2;
85
86 struct delayed_work work;
87 struct fw_attribute_group attribute_group;
88};
89
90static inline struct fw_device *fw_device(struct device *dev)
91{
92 return container_of(dev, struct fw_device, device);
93}
94
95static inline int fw_device_is_shutdown(struct fw_device *device)
96{
97 return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
98}
99
100static inline struct fw_device *fw_device_get(struct fw_device *device)
101{
102 get_device(&device->device);
103
104 return device;
105}
106
107static inline void fw_device_put(struct fw_device *device)
108{
109 put_device(&device->device);
110}
111
112struct fw_device *fw_device_get_by_devt(dev_t devt);
113int fw_device_enable_phys_dma(struct fw_device *device);
114void fw_device_set_broadcast_channel(struct fw_device *device, int generation);
115
116void fw_device_cdev_update(struct fw_device *device);
117void fw_device_cdev_remove(struct fw_device *device);
118
119extern struct rw_semaphore fw_device_rwsem;
120extern struct idr fw_device_idr;
121extern int fw_cdev_major;
122
123/*
124 * fw_unit.directory must not be accessed after device_del(&fw_unit.device).
125 */
126struct fw_unit {
127 struct device device;
128 u32 *directory;
129 struct fw_attribute_group attribute_group;
130};
131
132static inline struct fw_unit *fw_unit(struct device *dev)
133{
134 return container_of(dev, struct fw_unit, device);
135}
136
137static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
138{
139 get_device(&unit->device);
140
141 return unit;
142}
143
144static inline void fw_unit_put(struct fw_unit *unit)
145{
146 put_device(&unit->device);
147}
148
149#define CSR_OFFSET 0x40
150#define CSR_LEAF 0x80
151#define CSR_DIRECTORY 0xc0
152
153#define CSR_DESCRIPTOR 0x01
154#define CSR_VENDOR 0x03
155#define CSR_HARDWARE_VERSION 0x04
156#define CSR_NODE_CAPABILITIES 0x0c
157#define CSR_UNIT 0x11
158#define CSR_SPECIFIER_ID 0x12
159#define CSR_VERSION 0x13
160#define CSR_DEPENDENT_INFO 0x14
161#define CSR_MODEL 0x17
162#define CSR_INSTANCE 0x18
163#define CSR_DIRECTORY_ID 0x20
164
165struct fw_csr_iterator {
166 u32 *p;
167 u32 *end;
168};
169
170void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
171int fw_csr_iterator_next(struct fw_csr_iterator *ci,
172 int *key, int *value);
173
174#define FW_MATCH_VENDOR 0x0001
175#define FW_MATCH_MODEL 0x0002
176#define FW_MATCH_SPECIFIER_ID 0x0004
177#define FW_MATCH_VERSION 0x0008
178
179struct fw_device_id {
180 u32 match_flags;
181 u32 vendor;
182 u32 model;
183 u32 specifier_id;
184 u32 version;
185 void *driver_data;
186};
187
188struct fw_driver {
189 struct device_driver driver;
190 /* Called when the parent device sits through a bus reset. */
191 void (*update) (struct fw_unit *unit);
192 const struct fw_device_id *id_table;
193};
194
195static inline struct fw_driver *fw_driver(struct device_driver *drv)
196{
197 return container_of(drv, struct fw_driver, driver);
198}
199
200extern const struct file_operations fw_device_ops;
201
202#endif /* __fw_device_h */
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
deleted file mode 100644
index 3c497bb4fae4..000000000000
--- a/drivers/firewire/fw-topology.h
+++ /dev/null
@@ -1,77 +0,0 @@
1/*
2 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#ifndef __fw_topology_h
20#define __fw_topology_h
21
22#include <linux/list.h>
23#include <linux/slab.h>
24
25#include <asm/atomic.h>
26
27enum {
28 FW_NODE_CREATED,
29 FW_NODE_UPDATED,
30 FW_NODE_DESTROYED,
31 FW_NODE_LINK_ON,
32 FW_NODE_LINK_OFF,
33 FW_NODE_INITIATED_RESET,
34};
35
36struct fw_node {
37 u16 node_id;
38 u8 color;
39 u8 port_count;
40 u8 link_on : 1;
41 u8 initiated_reset : 1;
42 u8 b_path : 1;
43 u8 phy_speed : 2; /* As in the self ID packet. */
44 u8 max_speed : 2; /* Minimum of all phy-speeds on the path from the
45 * local node to this node. */
46 u8 max_depth : 4; /* Maximum depth to any leaf node */
47 u8 max_hops : 4; /* Max hops in this sub tree */
48 atomic_t ref_count;
49
50 /* For serializing node topology into a list. */
51 struct list_head link;
52
53 /* Upper layer specific data. */
54 void *data;
55
56 struct fw_node *ports[0];
57};
58
59static inline struct fw_node *fw_node_get(struct fw_node *node)
60{
61 atomic_inc(&node->ref_count);
62
63 return node;
64}
65
66static inline void fw_node_put(struct fw_node *node)
67{
68 if (atomic_dec_and_test(&node->ref_count))
69 kfree(node);
70}
71
72struct fw_card;
73void fw_destroy_nodes(struct fw_card *card);
74
75int fw_compute_block_crc(u32 *block);
76
77#endif /* __fw_topology_h */
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
deleted file mode 100644
index dfa799068f89..000000000000
--- a/drivers/firewire/fw-transaction.h
+++ /dev/null
@@ -1,446 +0,0 @@
1/*
2 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#ifndef __fw_transaction_h
20#define __fw_transaction_h
21
22#include <linux/completion.h>
23#include <linux/device.h>
24#include <linux/dma-mapping.h>
25#include <linux/firewire-constants.h>
26#include <linux/kref.h>
27#include <linux/list.h>
28#include <linux/spinlock_types.h>
29#include <linux/timer.h>
30#include <linux/types.h>
31#include <linux/workqueue.h>
32
33#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
34#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
35#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
36#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
37#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
38#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
39
40#define LOCAL_BUS 0xffc0
41
42#define SELFID_PORT_CHILD 0x3
43#define SELFID_PORT_PARENT 0x2
44#define SELFID_PORT_NCONN 0x1
45#define SELFID_PORT_NONE 0x0
46
47#define PHY_PACKET_CONFIG 0x0
48#define PHY_PACKET_LINK_ON 0x1
49#define PHY_PACKET_SELF_ID 0x2
50
51/* Bit fields _within_ the PHY registers. */
52#define PHY_LINK_ACTIVE 0x80
53#define PHY_CONTENDER 0x40
54#define PHY_BUS_RESET 0x40
55#define PHY_BUS_SHORT_RESET 0x40
56
57#define CSR_REGISTER_BASE 0xfffff0000000ULL
58
59/* register offsets relative to CSR_REGISTER_BASE */
60#define CSR_STATE_CLEAR 0x0
61#define CSR_STATE_SET 0x4
62#define CSR_NODE_IDS 0x8
63#define CSR_RESET_START 0xc
64#define CSR_SPLIT_TIMEOUT_HI 0x18
65#define CSR_SPLIT_TIMEOUT_LO 0x1c
66#define CSR_CYCLE_TIME 0x200
67#define CSR_BUS_TIME 0x204
68#define CSR_BUSY_TIMEOUT 0x210
69#define CSR_BUS_MANAGER_ID 0x21c
70#define CSR_BANDWIDTH_AVAILABLE 0x220
71#define CSR_CHANNELS_AVAILABLE 0x224
72#define CSR_CHANNELS_AVAILABLE_HI 0x224
73#define CSR_CHANNELS_AVAILABLE_LO 0x228
74#define CSR_BROADCAST_CHANNEL 0x234
75#define CSR_CONFIG_ROM 0x400
76#define CSR_CONFIG_ROM_END 0x800
77#define CSR_FCP_COMMAND 0xB00
78#define CSR_FCP_RESPONSE 0xD00
79#define CSR_FCP_END 0xF00
80#define CSR_TOPOLOGY_MAP 0x1000
81#define CSR_TOPOLOGY_MAP_END 0x1400
82#define CSR_SPEED_MAP 0x2000
83#define CSR_SPEED_MAP_END 0x3000
84
85#define BANDWIDTH_AVAILABLE_INITIAL 4915
86#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
87#define BROADCAST_CHANNEL_VALID (1 << 30)
88
89#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
90#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
91
92static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
93{
94 u32 *dst = _dst;
95 __be32 *src = _src;
96 int i;
97
98 for (i = 0; i < size / 4; i++)
99 dst[i] = be32_to_cpu(src[i]);
100}
101
102static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
103{
104 fw_memcpy_from_be32(_dst, _src, size);
105}
106
107struct fw_card;
108struct fw_packet;
109struct fw_node;
110struct fw_request;
111
112struct fw_descriptor {
113 struct list_head link;
114 size_t length;
115 u32 immediate;
116 u32 key;
117 const u32 *data;
118};
119
120int fw_core_add_descriptor(struct fw_descriptor *desc);
121void fw_core_remove_descriptor(struct fw_descriptor *desc);
122
123typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
124 struct fw_card *card, int status);
125
126typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
127 void *data, size_t length,
128 void *callback_data);
129
130/*
131 * Important note: The callback must guarantee that either fw_send_response()
132 * or kfree() is called on the @request.
133 */
134typedef void (*fw_address_callback_t)(struct fw_card *card,
135 struct fw_request *request,
136 int tcode, int destination, int source,
137 int generation, int speed,
138 unsigned long long offset,
139 void *data, size_t length,
140 void *callback_data);
141
142struct fw_packet {
143 int speed;
144 int generation;
145 u32 header[4];
146 size_t header_length;
147 void *payload;
148 size_t payload_length;
149 dma_addr_t payload_bus;
150 u32 timestamp;
151
152 /*
153 * This callback is called when the packet transmission has
154 * completed; for successful transmission, the status code is
155 * the ack received from the destination, otherwise it's a
156 * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
157 * The callback can be called from tasklet context and thus
158 * must never block.
159 */
160 fw_packet_callback_t callback;
161 int ack;
162 struct list_head link;
163 void *driver_data;
164};
165
166struct fw_transaction {
167 int node_id; /* The generation is implied; it is always the current. */
168 int tlabel;
169 int timestamp;
170 struct list_head link;
171
172 struct fw_packet packet;
173
174 /*
175 * The data passed to the callback is valid only during the
176 * callback.
177 */
178 fw_transaction_callback_t callback;
179 void *callback_data;
180};
181
182struct fw_address_handler {
183 u64 offset;
184 size_t length;
185 fw_address_callback_t address_callback;
186 void *callback_data;
187 struct list_head link;
188};
189
190struct fw_address_region {
191 u64 start;
192 u64 end;
193};
194
195extern const struct fw_address_region fw_high_memory_region;
196
197int fw_core_add_address_handler(struct fw_address_handler *handler,
198 const struct fw_address_region *region);
199void fw_core_remove_address_handler(struct fw_address_handler *handler);
200void fw_fill_response(struct fw_packet *response, u32 *request_header,
201 int rcode, void *payload, size_t length);
202void fw_send_response(struct fw_card *card,
203 struct fw_request *request, int rcode);
204
205extern struct bus_type fw_bus_type;
206
207struct fw_card {
208 const struct fw_card_driver *driver;
209 struct device *device;
210 struct kref kref;
211 struct completion done;
212
213 int node_id;
214 int generation;
215 int current_tlabel, tlabel_mask;
216 struct list_head transaction_list;
217 struct timer_list flush_timer;
218 unsigned long reset_jiffies;
219
220 unsigned long long guid;
221 unsigned max_receive;
222 int link_speed;
223 int config_rom_generation;
224
225 spinlock_t lock; /* Take this lock when handling the lists in
226 * this struct. */
227 struct fw_node *local_node;
228 struct fw_node *root_node;
229 struct fw_node *irm_node;
230 u8 color; /* must be u8 to match the definition in struct fw_node */
231 int gap_count;
232 bool beta_repeaters_present;
233
234 int index;
235
236 struct list_head link;
237
238 /* Work struct for BM duties. */
239 struct delayed_work work;
240 int bm_retries;
241 int bm_generation;
242
243 bool broadcast_channel_allocated;
244 u32 broadcast_channel;
245 u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
246};
247
248static inline struct fw_card *fw_card_get(struct fw_card *card)
249{
250 kref_get(&card->kref);
251
252 return card;
253}
254
255void fw_card_release(struct kref *kref);
256
257static inline void fw_card_put(struct fw_card *card)
258{
259 kref_put(&card->kref, fw_card_release);
260}
261
262extern void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
263
264/*
265 * Check whether new_generation is the immediate successor of old_generation.
266 * Take counter roll-over at 255 (as per to OHCI) into account.
267 */
268static inline bool is_next_generation(int new_generation, int old_generation)
269{
270 return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
271}
272
273/*
274 * The iso packet format allows for an immediate header/payload part
275 * stored in 'header' immediately after the packet info plus an
276 * indirect payload part that is pointer to by the 'payload' field.
277 * Applications can use one or the other or both to implement simple
278 * low-bandwidth streaming (e.g. audio) or more advanced
279 * scatter-gather streaming (e.g. assembling video frame automatically).
280 */
281
282struct fw_iso_packet {
283 u16 payload_length; /* Length of indirect payload. */
284 u32 interrupt : 1; /* Generate interrupt on this packet */
285 u32 skip : 1; /* Set to not send packet at all. */
286 u32 tag : 2;
287 u32 sy : 4;
288 u32 header_length : 8; /* Length of immediate header. */
289 u32 header[0];
290};
291
292#define FW_ISO_CONTEXT_TRANSMIT 0
293#define FW_ISO_CONTEXT_RECEIVE 1
294
295#define FW_ISO_CONTEXT_MATCH_TAG0 1
296#define FW_ISO_CONTEXT_MATCH_TAG1 2
297#define FW_ISO_CONTEXT_MATCH_TAG2 4
298#define FW_ISO_CONTEXT_MATCH_TAG3 8
299#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
300
301struct fw_iso_context;
302
303typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
304 u32 cycle, size_t header_length,
305 void *header, void *data);
306
307/*
308 * An iso buffer is just a set of pages mapped for DMA in the
309 * specified direction. Since the pages are to be used for DMA, they
310 * are not mapped into the kernel virtual address space. We store the
311 * DMA address in the page private. The helper function
312 * fw_iso_buffer_map() will map the pages into a given vma.
313 */
314
315struct fw_iso_buffer {
316 enum dma_data_direction direction;
317 struct page **pages;
318 int page_count;
319};
320
321struct fw_iso_context {
322 struct fw_card *card;
323 int type;
324 int channel;
325 int speed;
326 size_t header_size;
327 fw_iso_callback_t callback;
328 void *callback_data;
329};
330
331int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
332 int page_count, enum dma_data_direction direction);
333int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
334void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
335
336struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
337 int type, int channel, int speed, size_t header_size,
338 fw_iso_callback_t callback, void *callback_data);
339int fw_iso_context_queue(struct fw_iso_context *ctx,
340 struct fw_iso_packet *packet,
341 struct fw_iso_buffer *buffer,
342 unsigned long payload);
343int fw_iso_context_start(struct fw_iso_context *ctx,
344 int cycle, int sync, int tags);
345int fw_iso_context_stop(struct fw_iso_context *ctx);
346void fw_iso_context_destroy(struct fw_iso_context *ctx);
347
348void fw_iso_resource_manage(struct fw_card *card, int generation,
349 u64 channels_mask, int *channel, int *bandwidth, bool allocate);
350
351struct fw_card_driver {
352 /*
353 * Enable the given card with the given initial config rom.
354 * This function is expected to activate the card, and either
355 * enable the PHY or set the link_on bit and initiate a bus
356 * reset.
357 */
358 int (*enable)(struct fw_card *card, u32 *config_rom, size_t length);
359
360 int (*update_phy_reg)(struct fw_card *card, int address,
361 int clear_bits, int set_bits);
362
363 /*
364 * Update the config rom for an enabled card. This function
365 * should change the config rom that is presented on the bus
366 * an initiate a bus reset.
367 */
368 int (*set_config_rom)(struct fw_card *card,
369 u32 *config_rom, size_t length);
370
371 void (*send_request)(struct fw_card *card, struct fw_packet *packet);
372 void (*send_response)(struct fw_card *card, struct fw_packet *packet);
373 /* Calling cancel is valid once a packet has been submitted. */
374 int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
375
376 /*
377 * Allow the specified node ID to do direct DMA out and in of
378 * host memory. The card will disable this for all node when
379 * a bus reset happens, so driver need to reenable this after
380 * bus reset. Returns 0 on success, -ENODEV if the card
381 * doesn't support this, -ESTALE if the generation doesn't
382 * match.
383 */
384 int (*enable_phys_dma)(struct fw_card *card,
385 int node_id, int generation);
386
387 u64 (*get_bus_time)(struct fw_card *card);
388
389 struct fw_iso_context *
390 (*allocate_iso_context)(struct fw_card *card,
391 int type, int channel, size_t header_size);
392 void (*free_iso_context)(struct fw_iso_context *ctx);
393
394 int (*start_iso)(struct fw_iso_context *ctx,
395 s32 cycle, u32 sync, u32 tags);
396
397 int (*queue_iso)(struct fw_iso_context *ctx,
398 struct fw_iso_packet *packet,
399 struct fw_iso_buffer *buffer,
400 unsigned long payload);
401
402 int (*stop_iso)(struct fw_iso_context *ctx);
403};
404
405int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
406
407void fw_send_request(struct fw_card *card, struct fw_transaction *t,
408 int tcode, int destination_id, int generation, int speed,
409 unsigned long long offset, void *payload, size_t length,
410 fw_transaction_callback_t callback, void *callback_data);
411int fw_cancel_transaction(struct fw_card *card,
412 struct fw_transaction *transaction);
413void fw_flush_transactions(struct fw_card *card);
414int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
415 int generation, int speed, unsigned long long offset,
416 void *payload, size_t length);
417void fw_send_phy_config(struct fw_card *card,
418 int node_id, int generation, int gap_count);
419
420static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
421{
422 return tag << 14 | channel << 8 | sy;
423}
424
425/*
426 * Called by the topology code to inform the device code of node
427 * activity; found, lost, or updated nodes.
428 */
429void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
430
431/* API used by card level drivers */
432
433void fw_card_initialize(struct fw_card *card,
434 const struct fw_card_driver *driver, struct device *device);
435int fw_card_add(struct fw_card *card,
436 u32 max_receive, u32 link_speed, u64 guid);
437void fw_core_remove_card(struct fw_card *card);
438void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
439 int generation, int self_id_count, u32 *self_ids);
440void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
441void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
442
443extern int fw_irm_set_broadcast_channel_register(struct device *dev,
444 void *data);
445
446#endif /* __fw_transaction_h */
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/ohci.c
index 1180d0be0bb4..ecddd11b797a 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/ohci.c
@@ -20,17 +20,25 @@
20 20
21#include <linux/compiler.h> 21#include <linux/compiler.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/device.h>
23#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
25#include <linux/firewire.h>
26#include <linux/firewire-constants.h>
24#include <linux/gfp.h> 27#include <linux/gfp.h>
25#include <linux/init.h> 28#include <linux/init.h>
26#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/io.h>
27#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/list.h>
28#include <linux/mm.h> 33#include <linux/mm.h>
29#include <linux/module.h> 34#include <linux/module.h>
30#include <linux/moduleparam.h> 35#include <linux/moduleparam.h>
31#include <linux/pci.h> 36#include <linux/pci.h>
32#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/string.h>
33 39
40#include <asm/atomic.h>
41#include <asm/byteorder.h>
34#include <asm/page.h> 42#include <asm/page.h>
35#include <asm/system.h> 43#include <asm/system.h>
36 44
@@ -38,8 +46,8 @@
38#include <asm/pmac_feature.h> 46#include <asm/pmac_feature.h>
39#endif 47#endif
40 48
41#include "fw-ohci.h" 49#include "core.h"
42#include "fw-transaction.h" 50#include "ohci.h"
43 51
44#define DESCRIPTOR_OUTPUT_MORE 0 52#define DESCRIPTOR_OUTPUT_MORE 0
45#define DESCRIPTOR_OUTPUT_LAST (1 << 12) 53#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
@@ -178,7 +186,7 @@ struct fw_ohci {
178 int node_id; 186 int node_id;
179 int generation; 187 int generation;
180 int request_generation; /* for timestamping incoming requests */ 188 int request_generation; /* for timestamping incoming requests */
181 u32 bus_seconds; 189 atomic_t bus_seconds;
182 190
183 bool use_dualbuffer; 191 bool use_dualbuffer;
184 bool old_uninorth; 192 bool old_uninorth;
@@ -231,7 +239,6 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
231#define OHCI1394_MAX_AT_RESP_RETRIES 0x2 239#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
232#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 240#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
233 241
234#define FW_OHCI_MAJOR 240
235#define OHCI1394_REGISTER_SIZE 0x800 242#define OHCI1394_REGISTER_SIZE 0x800
236#define OHCI_LOOP_COUNT 500 243#define OHCI_LOOP_COUNT 500
237#define OHCI1394_PCI_HCI_Control 0x40 244#define OHCI1394_PCI_HCI_Control 0x40
@@ -1434,7 +1441,7 @@ static irqreturn_t irq_handler(int irq, void *data)
1434 if (event & OHCI1394_cycle64Seconds) { 1441 if (event & OHCI1394_cycle64Seconds) {
1435 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1442 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1436 if ((cycle_time & 0x80000000) == 0) 1443 if ((cycle_time & 0x80000000) == 0)
1437 ohci->bus_seconds++; 1444 atomic_inc(&ohci->bus_seconds);
1438 } 1445 }
1439 1446
1440 return IRQ_HANDLED; 1447 return IRQ_HANDLED;
@@ -1770,7 +1777,7 @@ static u64 ohci_get_bus_time(struct fw_card *card)
1770 u64 bus_time; 1777 u64 bus_time;
1771 1778
1772 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1779 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1773 bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time; 1780 bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | cycle_time;
1774 1781
1775 return bus_time; 1782 return bus_time;
1776} 1783}
diff --git a/drivers/firewire/fw-ohci.h b/drivers/firewire/ohci.h
index a2fbb6240ca7..ba492d85c516 100644
--- a/drivers/firewire/fw-ohci.h
+++ b/drivers/firewire/ohci.h
@@ -1,5 +1,5 @@
1#ifndef __fw_ohci_h 1#ifndef _FIREWIRE_OHCI_H
2#define __fw_ohci_h 2#define _FIREWIRE_OHCI_H
3 3
4/* OHCI register map */ 4/* OHCI register map */
5 5
@@ -154,4 +154,4 @@
154 154
155#define OHCI1394_phy_tcode 0xe 155#define OHCI1394_phy_tcode 0xe
156 156
157#endif /* __fw_ohci_h */ 157#endif /* _FIREWIRE_OHCI_H */
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/sbp2.c
index 2bcf51557c72..24c45635376a 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -30,18 +30,28 @@
30 30
31#include <linux/blkdev.h> 31#include <linux/blkdev.h>
32#include <linux/bug.h> 32#include <linux/bug.h>
33#include <linux/completion.h>
33#include <linux/delay.h> 34#include <linux/delay.h>
34#include <linux/device.h> 35#include <linux/device.h>
35#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
37#include <linux/firewire.h>
38#include <linux/firewire-constants.h>
39#include <linux/init.h>
40#include <linux/jiffies.h>
36#include <linux/kernel.h> 41#include <linux/kernel.h>
42#include <linux/kref.h>
43#include <linux/list.h>
37#include <linux/mod_devicetable.h> 44#include <linux/mod_devicetable.h>
38#include <linux/module.h> 45#include <linux/module.h>
39#include <linux/moduleparam.h> 46#include <linux/moduleparam.h>
40#include <linux/scatterlist.h> 47#include <linux/scatterlist.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
41#include <linux/string.h> 50#include <linux/string.h>
42#include <linux/stringify.h> 51#include <linux/stringify.h>
43#include <linux/timer.h>
44#include <linux/workqueue.h> 52#include <linux/workqueue.h>
53
54#include <asm/byteorder.h>
45#include <asm/system.h> 55#include <asm/system.h>
46 56
47#include <scsi/scsi.h> 57#include <scsi/scsi.h>
@@ -49,10 +59,6 @@
49#include <scsi/scsi_device.h> 59#include <scsi/scsi_device.h>
50#include <scsi/scsi_host.h> 60#include <scsi/scsi_host.h>
51 61
52#include "fw-device.h"
53#include "fw-topology.h"
54#include "fw-transaction.h"
55
56/* 62/*
57 * So far only bridges from Oxford Semiconductor are known to support 63 * So far only bridges from Oxford Semiconductor are known to support
58 * concurrent logins. Depending on firmware, four or two concurrent logins 64 * concurrent logins. Depending on firmware, four or two concurrent logins
@@ -174,6 +180,11 @@ struct sbp2_target {
174 int blocked; /* ditto */ 180 int blocked; /* ditto */
175}; 181};
176 182
183static struct fw_device *target_device(struct sbp2_target *tgt)
184{
185 return fw_parent_device(tgt->unit);
186}
187
177/* Impossible login_id, to detect logout attempt before successful login */ 188/* Impossible login_id, to detect logout attempt before successful login */
178#define INVALID_LOGIN_ID 0x10000 189#define INVALID_LOGIN_ID 0x10000
179 190
@@ -482,7 +493,7 @@ static void complete_transaction(struct fw_card *card, int rcode,
482static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, 493static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
483 int node_id, int generation, u64 offset) 494 int node_id, int generation, u64 offset)
484{ 495{
485 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 496 struct fw_device *device = target_device(lu->tgt);
486 unsigned long flags; 497 unsigned long flags;
487 498
488 orb->pointer.high = 0; 499 orb->pointer.high = 0;
@@ -504,7 +515,7 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
504 515
505static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) 516static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
506{ 517{
507 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 518 struct fw_device *device = target_device(lu->tgt);
508 struct sbp2_orb *orb, *next; 519 struct sbp2_orb *orb, *next;
509 struct list_head list; 520 struct list_head list;
510 unsigned long flags; 521 unsigned long flags;
@@ -542,7 +553,7 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
542 int generation, int function, 553 int generation, int function,
543 int lun_or_login_id, void *response) 554 int lun_or_login_id, void *response)
544{ 555{
545 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 556 struct fw_device *device = target_device(lu->tgt);
546 struct sbp2_management_orb *orb; 557 struct sbp2_management_orb *orb;
547 unsigned int timeout; 558 unsigned int timeout;
548 int retval = -ENOMEM; 559 int retval = -ENOMEM;
@@ -638,7 +649,7 @@ static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
638 649
639static void sbp2_agent_reset(struct sbp2_logical_unit *lu) 650static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
640{ 651{
641 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 652 struct fw_device *device = target_device(lu->tgt);
642 __be32 d = 0; 653 __be32 d = 0;
643 654
644 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, 655 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
@@ -655,7 +666,7 @@ static void complete_agent_reset_write_no_wait(struct fw_card *card,
655 666
656static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) 667static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
657{ 668{
658 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 669 struct fw_device *device = target_device(lu->tgt);
659 struct fw_transaction *t; 670 struct fw_transaction *t;
660 static __be32 d; 671 static __be32 d;
661 672
@@ -694,7 +705,7 @@ static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
694static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) 705static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
695{ 706{
696 struct sbp2_target *tgt = lu->tgt; 707 struct sbp2_target *tgt = lu->tgt;
697 struct fw_card *card = fw_device(tgt->unit->device.parent)->card; 708 struct fw_card *card = target_device(tgt)->card;
698 struct Scsi_Host *shost = 709 struct Scsi_Host *shost =
699 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); 710 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
700 unsigned long flags; 711 unsigned long flags;
@@ -718,7 +729,7 @@ static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
718static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) 729static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
719{ 730{
720 struct sbp2_target *tgt = lu->tgt; 731 struct sbp2_target *tgt = lu->tgt;
721 struct fw_card *card = fw_device(tgt->unit->device.parent)->card; 732 struct fw_card *card = target_device(tgt)->card;
722 struct Scsi_Host *shost = 733 struct Scsi_Host *shost =
723 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); 734 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
724 unsigned long flags; 735 unsigned long flags;
@@ -743,7 +754,7 @@ static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
743 */ 754 */
744static void sbp2_unblock(struct sbp2_target *tgt) 755static void sbp2_unblock(struct sbp2_target *tgt)
745{ 756{
746 struct fw_card *card = fw_device(tgt->unit->device.parent)->card; 757 struct fw_card *card = target_device(tgt)->card;
747 struct Scsi_Host *shost = 758 struct Scsi_Host *shost =
748 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); 759 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
749 unsigned long flags; 760 unsigned long flags;
@@ -773,7 +784,7 @@ static void sbp2_release_target(struct kref *kref)
773 struct Scsi_Host *shost = 784 struct Scsi_Host *shost =
774 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); 785 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
775 struct scsi_device *sdev; 786 struct scsi_device *sdev;
776 struct fw_device *device = fw_device(tgt->unit->device.parent); 787 struct fw_device *device = target_device(tgt);
777 788
778 /* prevent deadlocks */ 789 /* prevent deadlocks */
779 sbp2_unblock(tgt); 790 sbp2_unblock(tgt);
@@ -846,7 +857,7 @@ static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
846 */ 857 */
847static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) 858static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
848{ 859{
849 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 860 struct fw_device *device = target_device(lu->tgt);
850 __be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT); 861 __be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
851 862
852 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST, 863 fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
@@ -862,7 +873,7 @@ static void sbp2_login(struct work_struct *work)
862 struct sbp2_logical_unit *lu = 873 struct sbp2_logical_unit *lu =
863 container_of(work, struct sbp2_logical_unit, work.work); 874 container_of(work, struct sbp2_logical_unit, work.work);
864 struct sbp2_target *tgt = lu->tgt; 875 struct sbp2_target *tgt = lu->tgt;
865 struct fw_device *device = fw_device(tgt->unit->device.parent); 876 struct fw_device *device = target_device(tgt);
866 struct Scsi_Host *shost; 877 struct Scsi_Host *shost;
867 struct scsi_device *sdev; 878 struct scsi_device *sdev;
868 struct sbp2_login_response response; 879 struct sbp2_login_response response;
@@ -1110,7 +1121,7 @@ static struct scsi_host_template scsi_driver_template;
1110static int sbp2_probe(struct device *dev) 1121static int sbp2_probe(struct device *dev)
1111{ 1122{
1112 struct fw_unit *unit = fw_unit(dev); 1123 struct fw_unit *unit = fw_unit(dev);
1113 struct fw_device *device = fw_device(unit->device.parent); 1124 struct fw_device *device = fw_parent_device(unit);
1114 struct sbp2_target *tgt; 1125 struct sbp2_target *tgt;
1115 struct sbp2_logical_unit *lu; 1126 struct sbp2_logical_unit *lu;
1116 struct Scsi_Host *shost; 1127 struct Scsi_Host *shost;
@@ -1125,7 +1136,7 @@ static int sbp2_probe(struct device *dev)
1125 return -ENOMEM; 1136 return -ENOMEM;
1126 1137
1127 tgt = (struct sbp2_target *)shost->hostdata; 1138 tgt = (struct sbp2_target *)shost->hostdata;
1128 unit->device.driver_data = tgt; 1139 dev_set_drvdata(&unit->device, tgt);
1129 tgt->unit = unit; 1140 tgt->unit = unit;
1130 kref_init(&tgt->kref); 1141 kref_init(&tgt->kref);
1131 INIT_LIST_HEAD(&tgt->lu_list); 1142 INIT_LIST_HEAD(&tgt->lu_list);
@@ -1180,7 +1191,7 @@ static int sbp2_probe(struct device *dev)
1180static int sbp2_remove(struct device *dev) 1191static int sbp2_remove(struct device *dev)
1181{ 1192{
1182 struct fw_unit *unit = fw_unit(dev); 1193 struct fw_unit *unit = fw_unit(dev);
1183 struct sbp2_target *tgt = unit->device.driver_data; 1194 struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
1184 1195
1185 sbp2_target_put(tgt); 1196 sbp2_target_put(tgt);
1186 return 0; 1197 return 0;
@@ -1191,7 +1202,7 @@ static void sbp2_reconnect(struct work_struct *work)
1191 struct sbp2_logical_unit *lu = 1202 struct sbp2_logical_unit *lu =
1192 container_of(work, struct sbp2_logical_unit, work.work); 1203 container_of(work, struct sbp2_logical_unit, work.work);
1193 struct sbp2_target *tgt = lu->tgt; 1204 struct sbp2_target *tgt = lu->tgt;
1194 struct fw_device *device = fw_device(tgt->unit->device.parent); 1205 struct fw_device *device = target_device(tgt);
1195 int generation, node_id, local_node_id; 1206 int generation, node_id, local_node_id;
1196 1207
1197 if (fw_device_is_shutdown(device)) 1208 if (fw_device_is_shutdown(device))
@@ -1240,10 +1251,10 @@ static void sbp2_reconnect(struct work_struct *work)
1240 1251
1241static void sbp2_update(struct fw_unit *unit) 1252static void sbp2_update(struct fw_unit *unit)
1242{ 1253{
1243 struct sbp2_target *tgt = unit->device.driver_data; 1254 struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
1244 struct sbp2_logical_unit *lu; 1255 struct sbp2_logical_unit *lu;
1245 1256
1246 fw_device_enable_phys_dma(fw_device(unit->device.parent)); 1257 fw_device_enable_phys_dma(fw_parent_device(unit));
1247 1258
1248 /* 1259 /*
1249 * Fw-core serializes sbp2_update() against sbp2_remove(). 1260 * Fw-core serializes sbp2_update() against sbp2_remove().
@@ -1259,9 +1270,10 @@ static void sbp2_update(struct fw_unit *unit)
1259#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e 1270#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
1260#define SBP2_SW_VERSION_ENTRY 0x00010483 1271#define SBP2_SW_VERSION_ENTRY 0x00010483
1261 1272
1262static const struct fw_device_id sbp2_id_table[] = { 1273static const struct ieee1394_device_id sbp2_id_table[] = {
1263 { 1274 {
1264 .match_flags = FW_MATCH_SPECIFIER_ID | FW_MATCH_VERSION, 1275 .match_flags = IEEE1394_MATCH_SPECIFIER_ID |
1276 IEEE1394_MATCH_VERSION,
1265 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY, 1277 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
1266 .version = SBP2_SW_VERSION_ENTRY, 1278 .version = SBP2_SW_VERSION_ENTRY,
1267 }, 1279 },
@@ -1335,7 +1347,7 @@ static void complete_command_orb(struct sbp2_orb *base_orb,
1335{ 1347{
1336 struct sbp2_command_orb *orb = 1348 struct sbp2_command_orb *orb =
1337 container_of(base_orb, struct sbp2_command_orb, base); 1349 container_of(base_orb, struct sbp2_command_orb, base);
1338 struct fw_device *device = fw_device(orb->lu->tgt->unit->device.parent); 1350 struct fw_device *device = target_device(orb->lu->tgt);
1339 int result; 1351 int result;
1340 1352
1341 if (status != NULL) { 1353 if (status != NULL) {
@@ -1442,7 +1454,7 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
1442static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) 1454static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1443{ 1455{
1444 struct sbp2_logical_unit *lu = cmd->device->hostdata; 1456 struct sbp2_logical_unit *lu = cmd->device->hostdata;
1445 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 1457 struct fw_device *device = target_device(lu->tgt);
1446 struct sbp2_command_orb *orb; 1458 struct sbp2_command_orb *orb;
1447 int generation, retval = SCSI_MLQUEUE_HOST_BUSY; 1459 int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
1448 1460
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 05aa2d406ac6..d5ea8a68d338 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -31,8 +31,12 @@
31 * information is necessary as for the resource tree. 31 * information is necessary as for the resource tree.
32 */ 32 */
33struct firmware_map_entry { 33struct firmware_map_entry {
34 resource_size_t start; /* start of the memory range */ 34 /*
35 resource_size_t end; /* end of the memory range (incl.) */ 35 * start and end must be u64 rather than resource_size_t, because e820
36 * resources can lie at addresses above 4G.
37 */
38 u64 start; /* start of the memory range */
39 u64 end; /* end of the memory range (incl.) */
36 const char *type; /* type of the memory range */ 40 const char *type; /* type of the memory range */
37 struct list_head list; /* entry for the linked list */ 41 struct list_head list; /* entry for the linked list */
38 struct kobject kobj; /* kobject for each entry */ 42 struct kobject kobj; /* kobject for each entry */
@@ -101,7 +105,7 @@ static LIST_HEAD(map_entries);
101 * Common implementation of firmware_map_add() and firmware_map_add_early() 105 * Common implementation of firmware_map_add() and firmware_map_add_early()
102 * which expects a pre-allocated struct firmware_map_entry. 106 * which expects a pre-allocated struct firmware_map_entry.
103 **/ 107 **/
104static int firmware_map_add_entry(resource_size_t start, resource_size_t end, 108static int firmware_map_add_entry(u64 start, u64 end,
105 const char *type, 109 const char *type,
106 struct firmware_map_entry *entry) 110 struct firmware_map_entry *entry)
107{ 111{
@@ -132,8 +136,7 @@ static int firmware_map_add_entry(resource_size_t start, resource_size_t end,
132 * 136 *
133 * Returns 0 on success, or -ENOMEM if no memory could be allocated. 137 * Returns 0 on success, or -ENOMEM if no memory could be allocated.
134 **/ 138 **/
135int firmware_map_add(resource_size_t start, resource_size_t end, 139int firmware_map_add(u64 start, u64 end, const char *type)
136 const char *type)
137{ 140{
138 struct firmware_map_entry *entry; 141 struct firmware_map_entry *entry;
139 142
@@ -157,8 +160,7 @@ int firmware_map_add(resource_size_t start, resource_size_t end,
157 * 160 *
158 * Returns 0 on success, or -ENOMEM if no memory could be allocated. 161 * Returns 0 on success, or -ENOMEM if no memory could be allocated.
159 **/ 162 **/
160int __init firmware_map_add_early(resource_size_t start, resource_size_t end, 163int __init firmware_map_add_early(u64 start, u64 end, const char *type)
161 const char *type)
162{ 164{
163 struct firmware_map_entry *entry; 165 struct firmware_map_entry *entry;
164 166
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f5d46e7199d4..c961fe415aef 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -18,6 +18,14 @@ menuconfig DRM
18 details. You should also select and configure AGP 18 details. You should also select and configure AGP
19 (/dev/agpgart) support. 19 (/dev/agpgart) support.
20 20
21config DRM_TTM
22 tristate
23 depends on DRM
24 help
25 GPU memory management subsystem for devices with multiple
26 GPU memory types. Will be enabled automatically if a device driver
27 uses it.
28
21config DRM_TDFX 29config DRM_TDFX
22 tristate "3dfx Banshee/Voodoo3+" 30 tristate "3dfx Banshee/Voodoo3+"
23 depends on DRM && PCI 31 depends on DRM && PCI
@@ -36,6 +44,11 @@ config DRM_R128
36config DRM_RADEON 44config DRM_RADEON
37 tristate "ATI Radeon" 45 tristate "ATI Radeon"
38 depends on DRM && PCI 46 depends on DRM && PCI
47 select FB_CFB_FILLRECT
48 select FB_CFB_COPYAREA
49 select FB_CFB_IMAGEBLIT
50 select FB
51 select FRAMEBUFFER_CONSOLE if !EMBEDDED
39 help 52 help
40 Choose this option if you have an ATI Radeon graphics card. There 53 Choose this option if you have an ATI Radeon graphics card. There
41 are both PCI and AGP versions. You don't need to choose this to 54 are both PCI and AGP versions. You don't need to choose this to
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4ec5061fa584..4e89ab08b7b8 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -26,4 +26,4 @@ obj-$(CONFIG_DRM_I915) += i915/
26obj-$(CONFIG_DRM_SIS) += sis/ 26obj-$(CONFIG_DRM_SIS) += sis/
27obj-$(CONFIG_DRM_SAVAGE)+= savage/ 27obj-$(CONFIG_DRM_SAVAGE)+= savage/
28obj-$(CONFIG_DRM_VIA) +=via/ 28obj-$(CONFIG_DRM_VIA) +=via/
29 29obj-$(CONFIG_DRM_TTM) += ttm/
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index c77c6c6d9d2c..6ce0e2667a85 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -105,7 +105,7 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
105 ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, 105 ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
106 root, tmp, &drm_debugfs_fops); 106 root, tmp, &drm_debugfs_fops);
107 if (!ent) { 107 if (!ent) {
108 DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n", 108 DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
109 name, files[i].name); 109 name, files[i].name);
110 drm_free(tmp, sizeof(struct drm_info_node), 110 drm_free(tmp, sizeof(struct drm_info_node),
111 _DRM_DRIVER); 111 _DRM_DRIVER);
@@ -133,9 +133,9 @@ EXPORT_SYMBOL(drm_debugfs_create_files);
133 * \param minor device minor number 133 * \param minor device minor number
134 * \param root DRI debugfs dir entry. 134 * \param root DRI debugfs dir entry.
135 * 135 *
136 * Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry 136 * Create the DRI debugfs root entry "/sys/kernel/debug/dri", the device debugfs root entry
137 * "/debugfs/dri/%minor%/", and each entry in debugfs_list as 137 * "/sys/kernel/debug/dri/%minor%/", and each entry in debugfs_list as
138 * "/debugfs/dri/%minor%/%name%". 138 * "/sys/kernel/debug/dri/%minor%/%name%".
139 */ 139 */
140int drm_debugfs_init(struct drm_minor *minor, int minor_id, 140int drm_debugfs_init(struct drm_minor *minor, int minor_id,
141 struct dentry *root) 141 struct dentry *root)
@@ -148,7 +148,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
148 sprintf(name, "%d", minor_id); 148 sprintf(name, "%d", minor_id);
149 minor->debugfs_root = debugfs_create_dir(name, root); 149 minor->debugfs_root = debugfs_create_dir(name, root);
150 if (!minor->debugfs_root) { 150 if (!minor->debugfs_root) {
151 DRM_ERROR("Cannot create /debugfs/dri/%s\n", name); 151 DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s\n", name);
152 return -1; 152 return -1;
153 } 153 }
154 154
@@ -165,7 +165,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
165 ret = dev->driver->debugfs_init(minor); 165 ret = dev->driver->debugfs_init(minor);
166 if (ret) { 166 if (ret) {
167 DRM_ERROR("DRM: Driver failed to initialize " 167 DRM_ERROR("DRM: Driver failed to initialize "
168 "/debugfs/dri.\n"); 168 "/sys/kernel/debug/dri.\n");
169 return ret; 169 return ret;
170 } 170 }
171 } 171 }
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 019b7c578236..1bf7efd8d334 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -339,7 +339,7 @@ static int __init drm_core_init(void)
339 339
340 drm_debugfs_root = debugfs_create_dir("dri", NULL); 340 drm_debugfs_root = debugfs_create_dir("dri", NULL);
341 if (!drm_debugfs_root) { 341 if (!drm_debugfs_root) {
342 DRM_ERROR("Cannot create /debugfs/dri\n"); 342 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
343 ret = -1; 343 ret = -1;
344 goto err_p3; 344 goto err_p3;
345 } 345 }
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 7819fd930a51..a912a0ff11cc 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -188,36 +188,34 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
188 188
189 189
190 190
191struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, 191struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *node,
192 unsigned long size, unsigned alignment) 192 unsigned long size, unsigned alignment)
193{ 193{
194 194
195 struct drm_mm_node *align_splitoff = NULL; 195 struct drm_mm_node *align_splitoff = NULL;
196 struct drm_mm_node *child;
197 unsigned tmp = 0; 196 unsigned tmp = 0;
198 197
199 if (alignment) 198 if (alignment)
200 tmp = parent->start % alignment; 199 tmp = node->start % alignment;
201 200
202 if (tmp) { 201 if (tmp) {
203 align_splitoff = 202 align_splitoff =
204 drm_mm_split_at_start(parent, alignment - tmp, 0); 203 drm_mm_split_at_start(node, alignment - tmp, 0);
205 if (unlikely(align_splitoff == NULL)) 204 if (unlikely(align_splitoff == NULL))
206 return NULL; 205 return NULL;
207 } 206 }
208 207
209 if (parent->size == size) { 208 if (node->size == size) {
210 list_del_init(&parent->fl_entry); 209 list_del_init(&node->fl_entry);
211 parent->free = 0; 210 node->free = 0;
212 return parent;
213 } else { 211 } else {
214 child = drm_mm_split_at_start(parent, size, 0); 212 node = drm_mm_split_at_start(node, size, 0);
215 } 213 }
216 214
217 if (align_splitoff) 215 if (align_splitoff)
218 drm_mm_put_block(align_splitoff); 216 drm_mm_put_block(align_splitoff);
219 217
220 return child; 218 return node;
221} 219}
222 220
223EXPORT_SYMBOL(drm_mm_get_block); 221EXPORT_SYMBOL(drm_mm_get_block);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 89050684fe0d..387a8de1bc7e 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -343,7 +343,7 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
343#if defined(CONFIG_DEBUG_FS) 343#if defined(CONFIG_DEBUG_FS)
344 ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root); 344 ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
345 if (ret) { 345 if (ret) {
346 DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n"); 346 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
347 goto err_g2; 347 goto err_g2;
348 } 348 }
349#endif 349#endif
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 9987ab880835..85ec31b3ff00 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -70,6 +70,11 @@ static ssize_t version_show(struct class *dev, char *buf)
70 CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); 70 CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
71} 71}
72 72
73static char *drm_nodename(struct device *dev)
74{
75 return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
76}
77
73static CLASS_ATTR(version, S_IRUGO, version_show, NULL); 78static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
74 79
75/** 80/**
@@ -101,6 +106,8 @@ struct class *drm_sysfs_create(struct module *owner, char *name)
101 if (err) 106 if (err)
102 goto err_out_class; 107 goto err_out_class;
103 108
109 class->nodename = drm_nodename;
110
104 return class; 111 return class;
105 112
106err_out_class: 113err_out_class:
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 0ecf6b76a401..8e28e5993df5 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -504,6 +504,14 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
504 info->fbops = &intelfb_ops; 504 info->fbops = &intelfb_ops;
505 505
506 info->fix.line_length = fb->pitch; 506 info->fix.line_length = fb->pitch;
507
508 /* setup aperture base/size for vesafb takeover */
509 info->aperture_base = dev->mode_config.fb_base;
510 if (IS_I9XX(dev))
511 info->aperture_size = pci_resource_len(dev->pdev, 2);
512 else
513 info->aperture_size = pci_resource_len(dev->pdev, 0);
514
507 info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; 515 info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
508 info->fix.smem_len = size; 516 info->fix.smem_len = size;
509 517
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
new file mode 100644
index 000000000000..2168d67f09a6
--- /dev/null
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -0,0 +1,34 @@
1config DRM_RADEON_KMS
2 bool "Enable modesetting on radeon by default"
3 depends on DRM_RADEON
4 select DRM_TTM
5 help
6 Choose this option if you want kernel modesetting enabled by default,
7 and you have a new enough userspace to support this. Running old
8 userspaces with this enabled will cause pain.
9
10 When kernel modesetting is enabled the IOCTL of radeon/drm
11 driver are considered as invalid and an error message is printed
12 in the log and they return failure.
13
14 KMS enabled userspace will use new API to talk with the radeon/drm
15 driver. The new API provide functions to create/destroy/share/mmap
16 buffer object which are then managed by the kernel memory manager
17 (here TTM). In order to submit command to the GPU the userspace
18 provide a buffer holding the command stream, along this buffer
19 userspace have to provide a list of buffer object used by the
20 command stream. The kernel radeon driver will then place buffer
21 in GPU accessible memory and will update command stream to reflect
22 the position of the different buffers.
23
24 The kernel will also perform security check on command stream
25 provided by the user, we want to catch and forbid any illegal use
26 of the GPU such as DMA into random system memory or into memory
27 not owned by the process supplying the command stream. This part
28 of the code is still incomplete and this why we propose that patch
29 as a staging driver addition, future security might forbid current
30 experimental userspace to run.
31
32 This code support the following hardware : R1XX,R2XX,R3XX,R4XX,R5XX
33 (radeon up to X1950). Works is underway to provide support for R6XX,
34 R7XX and newer hardware (radeon from HD2XXX to HD4XXX).
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 52ce439a0f2e..5fae1e074b4b 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -3,7 +3,17 @@
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o r600_cp.o 6radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
7 radeon_irq.o r300_cmdbuf.o r600_cp.o
8
9radeon-$(CONFIG_DRM_RADEON_KMS) += radeon_device.o radeon_kms.o \
10 radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
11 atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
12 radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \
13 radeon_encoders.o radeon_display.o radeon_cursor.o radeon_i2c.o \
14 radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
15 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
16 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rs780.o rv770.o
7 17
8radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 18radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
9 19
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
new file mode 100644
index 000000000000..6d0183c61d3b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -0,0 +1,578 @@
1/*
2* Copyright 2006-2007 Advanced Micro Devices, Inc.
3*
4* Permission is hereby granted, free of charge, to any person obtaining a
5* copy of this software and associated documentation files (the "Software"),
6* to deal in the Software without restriction, including without limitation
7* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8* and/or sell copies of the Software, and to permit persons to whom the
9* Software is furnished to do so, subject to the following conditions:
10*
11* The above copyright notice and this permission notice shall be included in
12* all copies or substantial portions of the Software.
13*
14* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20* OTHER DEALINGS IN THE SOFTWARE.
21*/
22/* based on stg/asic_reg/drivers/inc/asic_reg/ObjectID.h ver 23 */
23
24#ifndef _OBJECTID_H
25#define _OBJECTID_H
26
27#if defined(_X86_)
28#pragma pack(1)
29#endif
30
31/****************************************************/
32/* Graphics Object Type Definition */
33/****************************************************/
34#define GRAPH_OBJECT_TYPE_NONE 0x0
35#define GRAPH_OBJECT_TYPE_GPU 0x1
36#define GRAPH_OBJECT_TYPE_ENCODER 0x2
37#define GRAPH_OBJECT_TYPE_CONNECTOR 0x3
38#define GRAPH_OBJECT_TYPE_ROUTER 0x4
39/* deleted */
40
41/****************************************************/
42/* Encoder Object ID Definition */
43/****************************************************/
44#define ENCODER_OBJECT_ID_NONE 0x00
45
46/* Radeon Class Display Hardware */
47#define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01
48#define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02
49#define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03
50#define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04
51#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */
52#define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06
53#define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07
54
55/* External Third Party Encoders */
56#define ENCODER_OBJECT_ID_SI170B 0x08
57#define ENCODER_OBJECT_ID_CH7303 0x09
58#define ENCODER_OBJECT_ID_CH7301 0x0A
59#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */
60#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C
61#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D
62#define ENCODER_OBJECT_ID_TITFP513 0x0E
63#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */
64#define ENCODER_OBJECT_ID_VT1623 0x10
65#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
66#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
67/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
68#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
69#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
70#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15
71#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */
72#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */
73#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */
74#define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19
75#define ENCODER_OBJECT_ID_VT1625 0x1A
76#define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B
77#define ENCODER_OBJECT_ID_DP_AN9801 0x1C
78#define ENCODER_OBJECT_ID_DP_DP501 0x1D
79#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY 0x1E
80#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA 0x1F
81#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20
82#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21
83
84#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
85
86/****************************************************/
87/* Connector Object ID Definition */
88/****************************************************/
89#define CONNECTOR_OBJECT_ID_NONE 0x00
90#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01
91#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02
92#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03
93#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D 0x04
94#define CONNECTOR_OBJECT_ID_VGA 0x05
95#define CONNECTOR_OBJECT_ID_COMPOSITE 0x06
96#define CONNECTOR_OBJECT_ID_SVIDEO 0x07
97#define CONNECTOR_OBJECT_ID_YPbPr 0x08
98#define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09
99#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */
100#define CONNECTOR_OBJECT_ID_SCART 0x0B
101#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C
102#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D
103#define CONNECTOR_OBJECT_ID_LVDS 0x0E
104#define CONNECTOR_OBJECT_ID_7PIN_DIN 0x0F
105#define CONNECTOR_OBJECT_ID_PCIE_CONNECTOR 0x10
106#define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11
107#define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12
108#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
109
110/* deleted */
111
112/****************************************************/
113/* Router Object ID Definition */
114/****************************************************/
115#define ROUTER_OBJECT_ID_NONE 0x00
116#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01
117
118/****************************************************/
119/* Graphics Object ENUM ID Definition */
120/****************************************************/
121#define GRAPH_OBJECT_ENUM_ID1 0x01
122#define GRAPH_OBJECT_ENUM_ID2 0x02
123#define GRAPH_OBJECT_ENUM_ID3 0x03
124#define GRAPH_OBJECT_ENUM_ID4 0x04
125#define GRAPH_OBJECT_ENUM_ID5 0x05
126#define GRAPH_OBJECT_ENUM_ID6 0x06
127
128/****************************************************/
129/* Graphics Object ID Bit definition */
130/****************************************************/
131#define OBJECT_ID_MASK 0x00FF
132#define ENUM_ID_MASK 0x0700
133#define RESERVED1_ID_MASK 0x0800
134#define OBJECT_TYPE_MASK 0x7000
135#define RESERVED2_ID_MASK 0x8000
136
137#define OBJECT_ID_SHIFT 0x00
138#define ENUM_ID_SHIFT 0x08
139#define OBJECT_TYPE_SHIFT 0x0C
140
141/****************************************************/
142/* Graphics Object family definition */
143/****************************************************/
144#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) \
145 (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
146 GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
147/****************************************************/
148/* GPU Object ID definition - Shared with BIOS */
149/****************************************************/
150#define GPU_ENUM_ID1 (GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
151 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
152
153/****************************************************/
154/* Encoder Object ID definition - Shared with BIOS */
155/****************************************************/
156/*
157#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101
158#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102
159#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103
160#define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104
161#define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105
162#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106
163#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107
164#define ENCODER_SIL170B_ENUM_ID1 0x2108
165#define ENCODER_CH7303_ENUM_ID1 0x2109
166#define ENCODER_CH7301_ENUM_ID1 0x210A
167#define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B
168#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 0x210C
169#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 0x210D
170#define ENCODER_TITFP513_ENUM_ID1 0x210E
171#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 0x210F
172#define ENCODER_VT1623_ENUM_ID1 0x2110
173#define ENCODER_HDMI_SI1930_ENUM_ID1 0x2111
174#define ENCODER_HDMI_INTERNAL_ENUM_ID1 0x2112
175#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113
176#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114
177#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115
178#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116
179#define ENCODER_SI178_ENUM_ID1 0x2117
180#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118
181#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119
182#define ENCODER_VT1625_ENUM_ID1 0x211A
183#define ENCODER_HDMI_SI1932_ENUM_ID1 0x211B
184#define ENCODER_ENCODER_DP_AN9801_ENUM_ID1 0x211C
185#define ENCODER_DP_DP501_ENUM_ID1 0x211D
186#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E
187*/
188#define ENCODER_INTERNAL_LVDS_ENUM_ID1 \
189 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
190 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
191 ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
192
193#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 \
194 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
195 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
196 ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
197
198#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 \
199 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
200 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
201 ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
202
203#define ENCODER_INTERNAL_DAC1_ENUM_ID1 \
204 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
205 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
206 ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
207
208#define ENCODER_INTERNAL_DAC2_ENUM_ID1 \
209 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
210 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
211 ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
212
213#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 \
214 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
215 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
216 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
217
218#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 \
219 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
220 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
221 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
222
223#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 \
224 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
225 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
226 ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
227
228#define ENCODER_SIL170B_ENUM_ID1 \
229 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
230 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
231 ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
232
233#define ENCODER_CH7303_ENUM_ID1 \
234 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
235 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
236 ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
237
238#define ENCODER_CH7301_ENUM_ID1 \
239 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
240 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
241 ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
242
243#define ENCODER_INTERNAL_DVO1_ENUM_ID1 \
244 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
245 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
246 ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
247
248#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 \
249 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
250 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
251 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
252
253#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 \
254 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
255 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
256 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
257
258#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 \
259 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
260 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
261 ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
262
263#define ENCODER_TITFP513_ENUM_ID1 \
264 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
265 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
266 ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
267
268#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 \
269 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
270 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
271 ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
272
273#define ENCODER_VT1623_ENUM_ID1 \
274 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
275 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
276 ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
277
278#define ENCODER_HDMI_SI1930_ENUM_ID1 \
279 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
280 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
281 ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
282
283#define ENCODER_HDMI_INTERNAL_ENUM_ID1 \
284 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
285 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
286 ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
287
288#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 \
289 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
290 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
291 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
292
293#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 \
294 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
295 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
296 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
297
298#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 \
299 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
300 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
301 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
302
303#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 \
304 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
305 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
306 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
307
308#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 \
309 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
310 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
311 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) /* Shared with CV/TV and CRT */
312
313#define ENCODER_SI178_ENUM_ID1 \
314 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
315 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
316 ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
317
318#define ENCODER_MVPU_FPGA_ENUM_ID1 \
319 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
320 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
321 ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
322
323#define ENCODER_INTERNAL_DDI_ENUM_ID1 \
324 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
325 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
326 ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
327
328#define ENCODER_VT1625_ENUM_ID1 \
329 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
330 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
331 ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
332
333#define ENCODER_HDMI_SI1932_ENUM_ID1 \
334 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
335 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
336 ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
337
338#define ENCODER_DP_DP501_ENUM_ID1 \
339 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
340 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
341 ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
342
343#define ENCODER_DP_AN9801_ENUM_ID1 \
344 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
345 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
346 ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
347
348#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 \
349 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
350 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
351 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
352
353#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 \
354 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
355 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
356 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
357
358#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 \
359 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
360 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
361 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
362
363#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 \
364 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
365 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
366 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
367
368#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 \
369 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
370 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
371 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
372
373#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 \
374 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
375 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
376 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
377
378#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 \
379 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
380 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
381 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
382
383#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 \
384 (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
385 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
386 ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
387
388/****************************************************/
389/* Connector Object ID definition - Shared with BIOS */
390/****************************************************/
391/*
392#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 0x3101
393#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 0x3102
394#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 0x3103
395#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 0x3104
396#define CONNECTOR_VGA_ENUM_ID1 0x3105
397#define CONNECTOR_COMPOSITE_ENUM_ID1 0x3106
398#define CONNECTOR_SVIDEO_ENUM_ID1 0x3107
399#define CONNECTOR_YPbPr_ENUM_ID1 0x3108
400#define CONNECTOR_D_CONNECTORE_ENUM_ID1 0x3109
401#define CONNECTOR_9PIN_DIN_ENUM_ID1 0x310A
402#define CONNECTOR_SCART_ENUM_ID1 0x310B
403#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 0x310C
404#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 0x310D
405#define CONNECTOR_LVDS_ENUM_ID1 0x310E
406#define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F
407#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110
408*/
409#define CONNECTOR_LVDS_ENUM_ID1 \
410 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
411 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
412 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
413
414#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 \
415 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
416 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
417 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
418
419#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 \
420 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
421 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
422 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
423
424#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 \
425 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
426 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
427 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
428
429#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 \
430 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
431 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
432 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
433
434#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 \
435 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
436 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
437 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
438
439#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 \
440 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
441 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
442 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
443
444#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 \
445 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
446 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
447 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
448
449#define CONNECTOR_VGA_ENUM_ID1 \
450 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
451 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
452 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
453
454#define CONNECTOR_VGA_ENUM_ID2 \
455 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
456 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
457 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
458
459#define CONNECTOR_COMPOSITE_ENUM_ID1 \
460 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
461 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
462 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
463
464#define CONNECTOR_SVIDEO_ENUM_ID1 \
465 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
466 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
467 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
468
469#define CONNECTOR_YPbPr_ENUM_ID1 \
470 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
471 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
472 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
473
474#define CONNECTOR_D_CONNECTOR_ENUM_ID1 \
475 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
476 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
477 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
478
479#define CONNECTOR_9PIN_DIN_ENUM_ID1 \
480 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
481 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
482 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
483
484#define CONNECTOR_SCART_ENUM_ID1 \
485 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
486 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
487 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
488
489#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 \
490 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
491 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
492 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
493
494#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 \
495 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
496 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
497 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
498
499#define CONNECTOR_7PIN_DIN_ENUM_ID1 \
500 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
501 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
502 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
503
504#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 \
505 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
506 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
507 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
508
509#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 \
510 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
511 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
512 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
513
514#define CONNECTOR_CROSSFIRE_ENUM_ID1 \
515 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
516 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
517 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
518
519#define CONNECTOR_CROSSFIRE_ENUM_ID2 \
520 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
521 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
522 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
523
524#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 \
525 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
526 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
527 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
528
529#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 \
530 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
531 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
532 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
533
534#define CONNECTOR_DISPLAYPORT_ENUM_ID1 \
535 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
536 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
537 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
538
539#define CONNECTOR_DISPLAYPORT_ENUM_ID2 \
540 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
541 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
542 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
543
544#define CONNECTOR_DISPLAYPORT_ENUM_ID3 \
545 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
546 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
547 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
548
549#define CONNECTOR_DISPLAYPORT_ENUM_ID4 \
550 (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
551 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
552 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
553
554/****************************************************/
555/* Router Object ID definition - Shared with BIOS */
556/****************************************************/
557#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 \
558 (GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
559 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
560 ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
561
562/* deleted */
563
564/****************************************************/
565/* Object Cap definition - Shared with BIOS */
566/****************************************************/
567#define GRAPHICS_OBJECT_CAP_I2C 0x00000001L
568#define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L
569
570#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01
571#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02
572#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03
573
574#if defined(_X86_)
575#pragma pack()
576#endif
577
578#endif /*GRAPHICTYPE */
diff --git a/drivers/gpu/drm/radeon/atom-bits.h b/drivers/gpu/drm/radeon/atom-bits.h
new file mode 100644
index 000000000000..e8fae5c77514
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom-bits.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25#ifndef ATOM_BITS_H
26#define ATOM_BITS_H
27
28static inline uint8_t get_u8(void *bios, int ptr)
29{
30 return ((unsigned char *)bios)[ptr];
31}
32#define U8(ptr) get_u8(ctx->ctx->bios, (ptr))
33#define CU8(ptr) get_u8(ctx->bios, (ptr))
34static inline uint16_t get_u16(void *bios, int ptr)
35{
36 return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
37}
38#define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
39#define CU16(ptr) get_u16(ctx->bios, (ptr))
40static inline uint32_t get_u32(void *bios, int ptr)
41{
42 return get_u16(bios, ptr)|(((uint32_t)get_u16(bios, ptr+2))<<16);
43}
44#define U32(ptr) get_u32(ctx->ctx->bios, (ptr))
45#define CU32(ptr) get_u32(ctx->bios, (ptr))
46#define CSTR(ptr) (((char *)(ctx->bios))+(ptr))
47
48#endif
diff --git a/drivers/gpu/drm/radeon/atom-names.h b/drivers/gpu/drm/radeon/atom-names.h
new file mode 100644
index 000000000000..6f907a5ffa5f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom-names.h
@@ -0,0 +1,100 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25#ifndef ATOM_NAMES_H
26#define ATOM_NAMES_H
27
28#include "atom.h"
29
30#ifdef ATOM_DEBUG
31
32#define ATOM_OP_NAMES_CNT 123
33static char *atom_op_names[ATOM_OP_NAMES_CNT] = {
34"RESERVED", "MOVE_REG", "MOVE_PS", "MOVE_WS", "MOVE_FB", "MOVE_PLL",
35"MOVE_MC", "AND_REG", "AND_PS", "AND_WS", "AND_FB", "AND_PLL", "AND_MC",
36"OR_REG", "OR_PS", "OR_WS", "OR_FB", "OR_PLL", "OR_MC", "SHIFT_LEFT_REG",
37"SHIFT_LEFT_PS", "SHIFT_LEFT_WS", "SHIFT_LEFT_FB", "SHIFT_LEFT_PLL",
38"SHIFT_LEFT_MC", "SHIFT_RIGHT_REG", "SHIFT_RIGHT_PS", "SHIFT_RIGHT_WS",
39"SHIFT_RIGHT_FB", "SHIFT_RIGHT_PLL", "SHIFT_RIGHT_MC", "MUL_REG",
40"MUL_PS", "MUL_WS", "MUL_FB", "MUL_PLL", "MUL_MC", "DIV_REG", "DIV_PS",
41"DIV_WS", "DIV_FB", "DIV_PLL", "DIV_MC", "ADD_REG", "ADD_PS", "ADD_WS",
42"ADD_FB", "ADD_PLL", "ADD_MC", "SUB_REG", "SUB_PS", "SUB_WS", "SUB_FB",
43"SUB_PLL", "SUB_MC", "SET_ATI_PORT", "SET_PCI_PORT", "SET_SYS_IO_PORT",
44"SET_REG_BLOCK", "SET_FB_BASE", "COMPARE_REG", "COMPARE_PS",
45"COMPARE_WS", "COMPARE_FB", "COMPARE_PLL", "COMPARE_MC", "SWITCH",
46"JUMP", "JUMP_EQUAL", "JUMP_BELOW", "JUMP_ABOVE", "JUMP_BELOW_OR_EQUAL",
47"JUMP_ABOVE_OR_EQUAL", "JUMP_NOT_EQUAL", "TEST_REG", "TEST_PS", "TEST_WS",
48"TEST_FB", "TEST_PLL", "TEST_MC", "DELAY_MILLISEC", "DELAY_MICROSEC",
49"CALL_TABLE", "REPEAT", "CLEAR_REG", "CLEAR_PS", "CLEAR_WS", "CLEAR_FB",
50"CLEAR_PLL", "CLEAR_MC", "NOP", "EOT", "MASK_REG", "MASK_PS", "MASK_WS",
51"MASK_FB", "MASK_PLL", "MASK_MC", "POST_CARD", "BEEP", "SAVE_REG",
52"RESTORE_REG", "SET_DATA_BLOCK", "XOR_REG", "XOR_PS", "XOR_WS", "XOR_FB",
53"XOR_PLL", "XOR_MC", "SHL_REG", "SHL_PS", "SHL_WS", "SHL_FB", "SHL_PLL",
54"SHL_MC", "SHR_REG", "SHR_PS", "SHR_WS", "SHR_FB", "SHR_PLL", "SHR_MC",
55"DEBUG", "CTB_DS",
56};
57
58#define ATOM_TABLE_NAMES_CNT 74
59static char *atom_table_names[ATOM_TABLE_NAMES_CNT] = {
60"ASIC_Init", "GetDisplaySurfaceSize", "ASIC_RegistersInit",
61"VRAM_BlockVenderDetection", "SetClocksRatio", "MemoryControllerInit",
62"GPIO_PinInit", "MemoryParamAdjust", "DVOEncoderControl",
63"GPIOPinControl", "SetEngineClock", "SetMemoryClock", "SetPixelClock",
64"DynamicClockGating", "ResetMemoryDLL", "ResetMemoryDevice",
65"MemoryPLLInit", "EnableMemorySelfRefresh", "AdjustMemoryController",
66"EnableASIC_StaticPwrMgt", "ASIC_StaticPwrMgtStatusChange",
67"DAC_LoadDetection", "TMDS2EncoderControl", "LCD1OutputControl",
68"DAC1EncoderControl", "DAC2EncoderControl", "DVOOutputControl",
69"CV1OutputControl", "SetCRTC_DPM_State", "TVEncoderControl",
70"TMDS1EncoderControl", "LVDSEncoderControl", "TV1OutputControl",
71"EnableScaler", "BlankCRTC", "EnableCRTC", "GetPixelClock",
72"EnableVGA_Render", "EnableVGA_Access", "SetCRTC_Timing",
73"SetCRTC_OverScan", "SetCRTC_Replication", "SelectCRTC_Source",
74"EnableGraphSurfaces", "UpdateCRTC_DoubleBufferRegisters",
75"LUT_AutoFill", "EnableHW_IconCursor", "GetMemoryClock",
76"GetEngineClock", "SetCRTC_UsingDTDTiming", "TVBootUpStdPinDetection",
77"DFP2OutputControl", "VRAM_BlockDetectionByStrap", "MemoryCleanUp",
78"ReadEDIDFromHWAssistedI2C", "WriteOneByteToHWAssistedI2C",
79"ReadHWAssistedI2CStatus", "SpeedFanControl", "PowerConnectorDetection",
80"MC_Synchronization", "ComputeMemoryEnginePLL", "MemoryRefreshConversion",
81"VRAM_GetCurrentInfoBlock", "DynamicMemorySettings", "MemoryTraining",
82"EnableLVDS_SS", "DFP1OutputControl", "SetVoltage", "CRT1OutputControl",
83"CRT2OutputControl", "SetupHWAssistedI2CStatus", "ClockSource",
84"MemoryDeviceInit", "EnableYUV",
85};
86
87#define ATOM_IO_NAMES_CNT 5
88static char *atom_io_names[ATOM_IO_NAMES_CNT] = {
89"MM", "PLL", "MC", "PCIE", "PCIE PORT",
90};
91
92#else
93
94#define ATOM_OP_NAMES_CNT 0
95#define ATOM_TABLE_NAMES_CNT 0
96#define ATOM_IO_NAMES_CNT 0
97
98#endif
99
100#endif
diff --git a/drivers/gpu/drm/radeon/atom-types.h b/drivers/gpu/drm/radeon/atom-types.h
new file mode 100644
index 000000000000..1125b866cdb0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom-types.h
@@ -0,0 +1,42 @@
1/*
2 * Copyright 2008 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Dave Airlie
23 */
24
25#ifndef ATOM_TYPES_H
26#define ATOM_TYPES_H
27
28/* sync atom types to kernel types */
29
30typedef uint16_t USHORT;
31typedef uint32_t ULONG;
32typedef uint8_t UCHAR;
33
34
35#ifndef ATOM_BIG_ENDIAN
36#if defined(__BIG_ENDIAN)
37#define ATOM_BIG_ENDIAN 1
38#else
39#define ATOM_BIG_ENDIAN 0
40#endif
41#endif
42#endif
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
new file mode 100644
index 000000000000..901befe03da2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -0,0 +1,1215 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25#include <linux/module.h>
26#include <linux/sched.h>
27
28#define ATOM_DEBUG
29
30#include "atom.h"
31#include "atom-names.h"
32#include "atom-bits.h"
33
34#define ATOM_COND_ABOVE 0
35#define ATOM_COND_ABOVEOREQUAL 1
36#define ATOM_COND_ALWAYS 2
37#define ATOM_COND_BELOW 3
38#define ATOM_COND_BELOWOREQUAL 4
39#define ATOM_COND_EQUAL 5
40#define ATOM_COND_NOTEQUAL 6
41
42#define ATOM_PORT_ATI 0
43#define ATOM_PORT_PCI 1
44#define ATOM_PORT_SYSIO 2
45
46#define ATOM_UNIT_MICROSEC 0
47#define ATOM_UNIT_MILLISEC 1
48
49#define PLL_INDEX 2
50#define PLL_DATA 3
51
52typedef struct {
53 struct atom_context *ctx;
54
55 uint32_t *ps, *ws;
56 int ps_shift;
57 uint16_t start;
58} atom_exec_context;
59
60int atom_debug = 0;
61void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
62
63static uint32_t atom_arg_mask[8] =
64 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
650xFF000000 };
66static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
67
68static int atom_dst_to_src[8][4] = {
69 /* translate destination alignment field to the source alignment encoding */
70 {0, 0, 0, 0},
71 {1, 2, 3, 0},
72 {1, 2, 3, 0},
73 {1, 2, 3, 0},
74 {4, 5, 6, 7},
75 {4, 5, 6, 7},
76 {4, 5, 6, 7},
77 {4, 5, 6, 7},
78};
79static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
80
81static int debug_depth = 0;
82#ifdef ATOM_DEBUG
83static void debug_print_spaces(int n)
84{
85 while (n--)
86 printk(" ");
87}
88
89#define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
90#define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
91#else
92#define DEBUG(...) do { } while (0)
93#define SDEBUG(...) do { } while (0)
94#endif
95
96static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
97 uint32_t index, uint32_t data)
98{
99 uint32_t temp = 0xCDCDCDCD;
100 while (1)
101 switch (CU8(base)) {
102 case ATOM_IIO_NOP:
103 base++;
104 break;
105 case ATOM_IIO_READ:
106 temp = ctx->card->reg_read(ctx->card, CU16(base + 1));
107 base += 3;
108 break;
109 case ATOM_IIO_WRITE:
110 ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
111 base += 3;
112 break;
113 case ATOM_IIO_CLEAR:
114 temp &=
115 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
116 CU8(base + 2));
117 base += 3;
118 break;
119 case ATOM_IIO_SET:
120 temp |=
121 (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
122 2);
123 base += 3;
124 break;
125 case ATOM_IIO_MOVE_INDEX:
126 temp &=
127 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
128 CU8(base + 2));
129 temp |=
130 ((index >> CU8(base + 2)) &
131 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
132 3);
133 base += 4;
134 break;
135 case ATOM_IIO_MOVE_DATA:
136 temp &=
137 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
138 CU8(base + 2));
139 temp |=
140 ((data >> CU8(base + 2)) &
141 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
142 3);
143 base += 4;
144 break;
145 case ATOM_IIO_MOVE_ATTR:
146 temp &=
147 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
148 CU8(base + 2));
149 temp |=
150 ((ctx->
151 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
152 CU8
153 (base
154 +
155 1))))
156 << CU8(base + 3);
157 base += 4;
158 break;
159 case ATOM_IIO_END:
160 return temp;
161 default:
162 printk(KERN_INFO "Unknown IIO opcode.\n");
163 return 0;
164 }
165}
166
167static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
168 int *ptr, uint32_t *saved, int print)
169{
170 uint32_t idx, val = 0xCDCDCDCD, align, arg;
171 struct atom_context *gctx = ctx->ctx;
172 arg = attr & 7;
173 align = (attr >> 3) & 7;
174 switch (arg) {
175 case ATOM_ARG_REG:
176 idx = U16(*ptr);
177 (*ptr) += 2;
178 if (print)
179 DEBUG("REG[0x%04X]", idx);
180 idx += gctx->reg_block;
181 switch (gctx->io_mode) {
182 case ATOM_IO_MM:
183 val = gctx->card->reg_read(gctx->card, idx);
184 break;
185 case ATOM_IO_PCI:
186 printk(KERN_INFO
187 "PCI registers are not implemented.\n");
188 return 0;
189 case ATOM_IO_SYSIO:
190 printk(KERN_INFO
191 "SYSIO registers are not implemented.\n");
192 return 0;
193 default:
194 if (!(gctx->io_mode & 0x80)) {
195 printk(KERN_INFO "Bad IO mode.\n");
196 return 0;
197 }
198 if (!gctx->iio[gctx->io_mode & 0x7F]) {
199 printk(KERN_INFO
200 "Undefined indirect IO read method %d.\n",
201 gctx->io_mode & 0x7F);
202 return 0;
203 }
204 val =
205 atom_iio_execute(gctx,
206 gctx->iio[gctx->io_mode & 0x7F],
207 idx, 0);
208 }
209 break;
210 case ATOM_ARG_PS:
211 idx = U8(*ptr);
212 (*ptr)++;
213 val = le32_to_cpu(ctx->ps[idx]);
214 if (print)
215 DEBUG("PS[0x%02X,0x%04X]", idx, val);
216 break;
217 case ATOM_ARG_WS:
218 idx = U8(*ptr);
219 (*ptr)++;
220 if (print)
221 DEBUG("WS[0x%02X]", idx);
222 switch (idx) {
223 case ATOM_WS_QUOTIENT:
224 val = gctx->divmul[0];
225 break;
226 case ATOM_WS_REMAINDER:
227 val = gctx->divmul[1];
228 break;
229 case ATOM_WS_DATAPTR:
230 val = gctx->data_block;
231 break;
232 case ATOM_WS_SHIFT:
233 val = gctx->shift;
234 break;
235 case ATOM_WS_OR_MASK:
236 val = 1 << gctx->shift;
237 break;
238 case ATOM_WS_AND_MASK:
239 val = ~(1 << gctx->shift);
240 break;
241 case ATOM_WS_FB_WINDOW:
242 val = gctx->fb_base;
243 break;
244 case ATOM_WS_ATTRIBUTES:
245 val = gctx->io_attr;
246 break;
247 default:
248 val = ctx->ws[idx];
249 }
250 break;
251 case ATOM_ARG_ID:
252 idx = U16(*ptr);
253 (*ptr) += 2;
254 if (print) {
255 if (gctx->data_block)
256 DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
257 else
258 DEBUG("ID[0x%04X]", idx);
259 }
260 val = U32(idx + gctx->data_block);
261 break;
262 case ATOM_ARG_FB:
263 idx = U8(*ptr);
264 (*ptr)++;
265 if (print)
266 DEBUG("FB[0x%02X]", idx);
267 printk(KERN_INFO "FB access is not implemented.\n");
268 return 0;
269 case ATOM_ARG_IMM:
270 switch (align) {
271 case ATOM_SRC_DWORD:
272 val = U32(*ptr);
273 (*ptr) += 4;
274 if (print)
275 DEBUG("IMM 0x%08X\n", val);
276 return val;
277 case ATOM_SRC_WORD0:
278 case ATOM_SRC_WORD8:
279 case ATOM_SRC_WORD16:
280 val = U16(*ptr);
281 (*ptr) += 2;
282 if (print)
283 DEBUG("IMM 0x%04X\n", val);
284 return val;
285 case ATOM_SRC_BYTE0:
286 case ATOM_SRC_BYTE8:
287 case ATOM_SRC_BYTE16:
288 case ATOM_SRC_BYTE24:
289 val = U8(*ptr);
290 (*ptr)++;
291 if (print)
292 DEBUG("IMM 0x%02X\n", val);
293 return val;
294 }
295 return 0;
296 case ATOM_ARG_PLL:
297 idx = U8(*ptr);
298 (*ptr)++;
299 if (print)
300 DEBUG("PLL[0x%02X]", idx);
301 val = gctx->card->pll_read(gctx->card, idx);
302 break;
303 case ATOM_ARG_MC:
304 idx = U8(*ptr);
305 (*ptr)++;
306 if (print)
307 DEBUG("MC[0x%02X]", idx);
308 val = gctx->card->mc_read(gctx->card, idx);
309 break;
310 }
311 if (saved)
312 *saved = val;
313 val &= atom_arg_mask[align];
314 val >>= atom_arg_shift[align];
315 if (print)
316 switch (align) {
317 case ATOM_SRC_DWORD:
318 DEBUG(".[31:0] -> 0x%08X\n", val);
319 break;
320 case ATOM_SRC_WORD0:
321 DEBUG(".[15:0] -> 0x%04X\n", val);
322 break;
323 case ATOM_SRC_WORD8:
324 DEBUG(".[23:8] -> 0x%04X\n", val);
325 break;
326 case ATOM_SRC_WORD16:
327 DEBUG(".[31:16] -> 0x%04X\n", val);
328 break;
329 case ATOM_SRC_BYTE0:
330 DEBUG(".[7:0] -> 0x%02X\n", val);
331 break;
332 case ATOM_SRC_BYTE8:
333 DEBUG(".[15:8] -> 0x%02X\n", val);
334 break;
335 case ATOM_SRC_BYTE16:
336 DEBUG(".[23:16] -> 0x%02X\n", val);
337 break;
338 case ATOM_SRC_BYTE24:
339 DEBUG(".[31:24] -> 0x%02X\n", val);
340 break;
341 }
342 return val;
343}
344
345static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
346{
347 uint32_t align = (attr >> 3) & 7, arg = attr & 7;
348 switch (arg) {
349 case ATOM_ARG_REG:
350 case ATOM_ARG_ID:
351 (*ptr) += 2;
352 break;
353 case ATOM_ARG_PLL:
354 case ATOM_ARG_MC:
355 case ATOM_ARG_PS:
356 case ATOM_ARG_WS:
357 case ATOM_ARG_FB:
358 (*ptr)++;
359 break;
360 case ATOM_ARG_IMM:
361 switch (align) {
362 case ATOM_SRC_DWORD:
363 (*ptr) += 4;
364 return;
365 case ATOM_SRC_WORD0:
366 case ATOM_SRC_WORD8:
367 case ATOM_SRC_WORD16:
368 (*ptr) += 2;
369 return;
370 case ATOM_SRC_BYTE0:
371 case ATOM_SRC_BYTE8:
372 case ATOM_SRC_BYTE16:
373 case ATOM_SRC_BYTE24:
374 (*ptr)++;
375 return;
376 }
377 return;
378 }
379}
380
381static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
382{
383 return atom_get_src_int(ctx, attr, ptr, NULL, 1);
384}
385
386static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
387 int *ptr, uint32_t *saved, int print)
388{
389 return atom_get_src_int(ctx,
390 arg | atom_dst_to_src[(attr >> 3) &
391 7][(attr >> 6) & 3] << 3,
392 ptr, saved, print);
393}
394
395static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
396{
397 atom_skip_src_int(ctx,
398 arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
399 3] << 3, ptr);
400}
401
402static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
403 int *ptr, uint32_t val, uint32_t saved)
404{
405 uint32_t align =
406 atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
407 val, idx;
408 struct atom_context *gctx = ctx->ctx;
409 old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
410 val <<= atom_arg_shift[align];
411 val &= atom_arg_mask[align];
412 saved &= ~atom_arg_mask[align];
413 val |= saved;
414 switch (arg) {
415 case ATOM_ARG_REG:
416 idx = U16(*ptr);
417 (*ptr) += 2;
418 DEBUG("REG[0x%04X]", idx);
419 idx += gctx->reg_block;
420 switch (gctx->io_mode) {
421 case ATOM_IO_MM:
422 if (idx == 0)
423 gctx->card->reg_write(gctx->card, idx,
424 val << 2);
425 else
426 gctx->card->reg_write(gctx->card, idx, val);
427 break;
428 case ATOM_IO_PCI:
429 printk(KERN_INFO
430 "PCI registers are not implemented.\n");
431 return;
432 case ATOM_IO_SYSIO:
433 printk(KERN_INFO
434 "SYSIO registers are not implemented.\n");
435 return;
436 default:
437 if (!(gctx->io_mode & 0x80)) {
438 printk(KERN_INFO "Bad IO mode.\n");
439 return;
440 }
441 if (!gctx->iio[gctx->io_mode & 0xFF]) {
442 printk(KERN_INFO
443 "Undefined indirect IO write method %d.\n",
444 gctx->io_mode & 0x7F);
445 return;
446 }
447 atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
448 idx, val);
449 }
450 break;
451 case ATOM_ARG_PS:
452 idx = U8(*ptr);
453 (*ptr)++;
454 DEBUG("PS[0x%02X]", idx);
455 ctx->ps[idx] = cpu_to_le32(val);
456 break;
457 case ATOM_ARG_WS:
458 idx = U8(*ptr);
459 (*ptr)++;
460 DEBUG("WS[0x%02X]", idx);
461 switch (idx) {
462 case ATOM_WS_QUOTIENT:
463 gctx->divmul[0] = val;
464 break;
465 case ATOM_WS_REMAINDER:
466 gctx->divmul[1] = val;
467 break;
468 case ATOM_WS_DATAPTR:
469 gctx->data_block = val;
470 break;
471 case ATOM_WS_SHIFT:
472 gctx->shift = val;
473 break;
474 case ATOM_WS_OR_MASK:
475 case ATOM_WS_AND_MASK:
476 break;
477 case ATOM_WS_FB_WINDOW:
478 gctx->fb_base = val;
479 break;
480 case ATOM_WS_ATTRIBUTES:
481 gctx->io_attr = val;
482 break;
483 default:
484 ctx->ws[idx] = val;
485 }
486 break;
487 case ATOM_ARG_FB:
488 idx = U8(*ptr);
489 (*ptr)++;
490 DEBUG("FB[0x%02X]", idx);
491 printk(KERN_INFO "FB access is not implemented.\n");
492 return;
493 case ATOM_ARG_PLL:
494 idx = U8(*ptr);
495 (*ptr)++;
496 DEBUG("PLL[0x%02X]", idx);
497 gctx->card->pll_write(gctx->card, idx, val);
498 break;
499 case ATOM_ARG_MC:
500 idx = U8(*ptr);
501 (*ptr)++;
502 DEBUG("MC[0x%02X]", idx);
503 gctx->card->mc_write(gctx->card, idx, val);
504 return;
505 }
506 switch (align) {
507 case ATOM_SRC_DWORD:
508 DEBUG(".[31:0] <- 0x%08X\n", old_val);
509 break;
510 case ATOM_SRC_WORD0:
511 DEBUG(".[15:0] <- 0x%04X\n", old_val);
512 break;
513 case ATOM_SRC_WORD8:
514 DEBUG(".[23:8] <- 0x%04X\n", old_val);
515 break;
516 case ATOM_SRC_WORD16:
517 DEBUG(".[31:16] <- 0x%04X\n", old_val);
518 break;
519 case ATOM_SRC_BYTE0:
520 DEBUG(".[7:0] <- 0x%02X\n", old_val);
521 break;
522 case ATOM_SRC_BYTE8:
523 DEBUG(".[15:8] <- 0x%02X\n", old_val);
524 break;
525 case ATOM_SRC_BYTE16:
526 DEBUG(".[23:16] <- 0x%02X\n", old_val);
527 break;
528 case ATOM_SRC_BYTE24:
529 DEBUG(".[31:24] <- 0x%02X\n", old_val);
530 break;
531 }
532}
533
534static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
535{
536 uint8_t attr = U8((*ptr)++);
537 uint32_t dst, src, saved;
538 int dptr = *ptr;
539 SDEBUG(" dst: ");
540 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
541 SDEBUG(" src: ");
542 src = atom_get_src(ctx, attr, ptr);
543 dst += src;
544 SDEBUG(" dst: ");
545 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
546}
547
548static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
549{
550 uint8_t attr = U8((*ptr)++);
551 uint32_t dst, src, saved;
552 int dptr = *ptr;
553 SDEBUG(" dst: ");
554 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
555 SDEBUG(" src: ");
556 src = atom_get_src(ctx, attr, ptr);
557 dst &= src;
558 SDEBUG(" dst: ");
559 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
560}
561
562static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
563{
564 printk("ATOM BIOS beeped!\n");
565}
566
567static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
568{
569 int idx = U8((*ptr)++);
570 if (idx < ATOM_TABLE_NAMES_CNT)
571 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
572 else
573 SDEBUG(" table: %d\n", idx);
574 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
575 atom_execute_table(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
576}
577
578static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
579{
580 uint8_t attr = U8((*ptr)++);
581 uint32_t saved;
582 int dptr = *ptr;
583 attr &= 0x38;
584 attr |= atom_def_dst[attr >> 3] << 6;
585 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
586 SDEBUG(" dst: ");
587 atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
588}
589
590static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
591{
592 uint8_t attr = U8((*ptr)++);
593 uint32_t dst, src;
594 SDEBUG(" src1: ");
595 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
596 SDEBUG(" src2: ");
597 src = atom_get_src(ctx, attr, ptr);
598 ctx->ctx->cs_equal = (dst == src);
599 ctx->ctx->cs_above = (dst > src);
600 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
601 ctx->ctx->cs_above ? "GT" : "LE");
602}
603
604static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
605{
606 uint8_t count = U8((*ptr)++);
607 SDEBUG(" count: %d\n", count);
608 if (arg == ATOM_UNIT_MICROSEC)
609 schedule_timeout_uninterruptible(usecs_to_jiffies(count));
610 else
611 schedule_timeout_uninterruptible(msecs_to_jiffies(count));
612}
613
614static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
615{
616 uint8_t attr = U8((*ptr)++);
617 uint32_t dst, src;
618 SDEBUG(" src1: ");
619 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
620 SDEBUG(" src2: ");
621 src = atom_get_src(ctx, attr, ptr);
622 if (src != 0) {
623 ctx->ctx->divmul[0] = dst / src;
624 ctx->ctx->divmul[1] = dst % src;
625 } else {
626 ctx->ctx->divmul[0] = 0;
627 ctx->ctx->divmul[1] = 0;
628 }
629}
630
631static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
632{
633 /* functionally, a nop */
634}
635
636static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
637{
638 int execute = 0, target = U16(*ptr);
639 (*ptr) += 2;
640 switch (arg) {
641 case ATOM_COND_ABOVE:
642 execute = ctx->ctx->cs_above;
643 break;
644 case ATOM_COND_ABOVEOREQUAL:
645 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
646 break;
647 case ATOM_COND_ALWAYS:
648 execute = 1;
649 break;
650 case ATOM_COND_BELOW:
651 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
652 break;
653 case ATOM_COND_BELOWOREQUAL:
654 execute = !ctx->ctx->cs_above;
655 break;
656 case ATOM_COND_EQUAL:
657 execute = ctx->ctx->cs_equal;
658 break;
659 case ATOM_COND_NOTEQUAL:
660 execute = !ctx->ctx->cs_equal;
661 break;
662 }
663 if (arg != ATOM_COND_ALWAYS)
664 SDEBUG(" taken: %s\n", execute ? "yes" : "no");
665 SDEBUG(" target: 0x%04X\n", target);
666 if (execute)
667 *ptr = ctx->start + target;
668}
669
670static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
671{
672 uint8_t attr = U8((*ptr)++);
673 uint32_t dst, src1, src2, saved;
674 int dptr = *ptr;
675 SDEBUG(" dst: ");
676 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
677 SDEBUG(" src1: ");
678 src1 = atom_get_src(ctx, attr, ptr);
679 SDEBUG(" src2: ");
680 src2 = atom_get_src(ctx, attr, ptr);
681 dst &= src1;
682 dst |= src2;
683 SDEBUG(" dst: ");
684 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
685}
686
687static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
688{
689 uint8_t attr = U8((*ptr)++);
690 uint32_t src, saved;
691 int dptr = *ptr;
692 if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
693 atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
694 else {
695 atom_skip_dst(ctx, arg, attr, ptr);
696 saved = 0xCDCDCDCD;
697 }
698 SDEBUG(" src: ");
699 src = atom_get_src(ctx, attr, ptr);
700 SDEBUG(" dst: ");
701 atom_put_dst(ctx, arg, attr, &dptr, src, saved);
702}
703
704static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
705{
706 uint8_t attr = U8((*ptr)++);
707 uint32_t dst, src;
708 SDEBUG(" src1: ");
709 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
710 SDEBUG(" src2: ");
711 src = atom_get_src(ctx, attr, ptr);
712 ctx->ctx->divmul[0] = dst * src;
713}
714
715static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
716{
717 /* nothing */
718}
719
720static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
721{
722 uint8_t attr = U8((*ptr)++);
723 uint32_t dst, src, saved;
724 int dptr = *ptr;
725 SDEBUG(" dst: ");
726 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
727 SDEBUG(" src: ");
728 src = atom_get_src(ctx, attr, ptr);
729 dst |= src;
730 SDEBUG(" dst: ");
731 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
732}
733
734static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
735{
736 uint8_t val = U8((*ptr)++);
737 SDEBUG("POST card output: 0x%02X\n", val);
738}
739
740static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
741{
742 printk(KERN_INFO "unimplemented!\n");
743}
744
745static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
746{
747 printk(KERN_INFO "unimplemented!\n");
748}
749
750static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
751{
752 printk(KERN_INFO "unimplemented!\n");
753}
754
755static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
756{
757 int idx = U8(*ptr);
758 (*ptr)++;
759 SDEBUG(" block: %d\n", idx);
760 if (!idx)
761 ctx->ctx->data_block = 0;
762 else if (idx == 255)
763 ctx->ctx->data_block = ctx->start;
764 else
765 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
766 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block);
767}
768
769static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
770{
771 uint8_t attr = U8((*ptr)++);
772 SDEBUG(" fb_base: ");
773 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
774}
775
776static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
777{
778 int port;
779 switch (arg) {
780 case ATOM_PORT_ATI:
781 port = U16(*ptr);
782 if (port < ATOM_IO_NAMES_CNT)
783 SDEBUG(" port: %d (%s)\n", port, atom_io_names[port]);
784 else
785 SDEBUG(" port: %d\n", port);
786 if (!port)
787 ctx->ctx->io_mode = ATOM_IO_MM;
788 else
789 ctx->ctx->io_mode = ATOM_IO_IIO | port;
790 (*ptr) += 2;
791 break;
792 case ATOM_PORT_PCI:
793 ctx->ctx->io_mode = ATOM_IO_PCI;
794 (*ptr)++;
795 break;
796 case ATOM_PORT_SYSIO:
797 ctx->ctx->io_mode = ATOM_IO_SYSIO;
798 (*ptr)++;
799 break;
800 }
801}
802
803static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
804{
805 ctx->ctx->reg_block = U16(*ptr);
806 (*ptr) += 2;
807 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block);
808}
809
810static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
811{
812 uint8_t attr = U8((*ptr)++), shift;
813 uint32_t saved, dst;
814 int dptr = *ptr;
815 attr &= 0x38;
816 attr |= atom_def_dst[attr >> 3] << 6;
817 SDEBUG(" dst: ");
818 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
819 shift = U8((*ptr)++);
820 SDEBUG(" shift: %d\n", shift);
821 dst <<= shift;
822 SDEBUG(" dst: ");
823 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
824}
825
826static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
827{
828 uint8_t attr = U8((*ptr)++), shift;
829 uint32_t saved, dst;
830 int dptr = *ptr;
831 attr &= 0x38;
832 attr |= atom_def_dst[attr >> 3] << 6;
833 SDEBUG(" dst: ");
834 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
835 shift = U8((*ptr)++);
836 SDEBUG(" shift: %d\n", shift);
837 dst >>= shift;
838 SDEBUG(" dst: ");
839 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
840}
841
842static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
843{
844 uint8_t attr = U8((*ptr)++);
845 uint32_t dst, src, saved;
846 int dptr = *ptr;
847 SDEBUG(" dst: ");
848 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
849 SDEBUG(" src: ");
850 src = atom_get_src(ctx, attr, ptr);
851 dst -= src;
852 SDEBUG(" dst: ");
853 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
854}
855
856static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
857{
858 uint8_t attr = U8((*ptr)++);
859 uint32_t src, val, target;
860 SDEBUG(" switch: ");
861 src = atom_get_src(ctx, attr, ptr);
862 while (U16(*ptr) != ATOM_CASE_END)
863 if (U8(*ptr) == ATOM_CASE_MAGIC) {
864 (*ptr)++;
865 SDEBUG(" case: ");
866 val =
867 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
868 ptr);
869 target = U16(*ptr);
870 if (val == src) {
871 SDEBUG(" target: %04X\n", target);
872 *ptr = ctx->start + target;
873 return;
874 }
875 (*ptr) += 2;
876 } else {
877 printk(KERN_INFO "Bad case.\n");
878 return;
879 }
880 (*ptr) += 2;
881}
882
883static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
884{
885 uint8_t attr = U8((*ptr)++);
886 uint32_t dst, src;
887 SDEBUG(" src1: ");
888 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
889 SDEBUG(" src2: ");
890 src = atom_get_src(ctx, attr, ptr);
891 ctx->ctx->cs_equal = ((dst & src) == 0);
892 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
893}
894
895static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
896{
897 uint8_t attr = U8((*ptr)++);
898 uint32_t dst, src, saved;
899 int dptr = *ptr;
900 SDEBUG(" dst: ");
901 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
902 SDEBUG(" src: ");
903 src = atom_get_src(ctx, attr, ptr);
904 dst ^= src;
905 SDEBUG(" dst: ");
906 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
907}
908
909static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
910{
911 printk(KERN_INFO "unimplemented!\n");
912}
913
914static struct {
915 void (*func) (atom_exec_context *, int *, int);
916 int arg;
917} opcode_table[ATOM_OP_CNT] = {
918 {
919 NULL, 0}, {
920 atom_op_move, ATOM_ARG_REG}, {
921 atom_op_move, ATOM_ARG_PS}, {
922 atom_op_move, ATOM_ARG_WS}, {
923 atom_op_move, ATOM_ARG_FB}, {
924 atom_op_move, ATOM_ARG_PLL}, {
925 atom_op_move, ATOM_ARG_MC}, {
926 atom_op_and, ATOM_ARG_REG}, {
927 atom_op_and, ATOM_ARG_PS}, {
928 atom_op_and, ATOM_ARG_WS}, {
929 atom_op_and, ATOM_ARG_FB}, {
930 atom_op_and, ATOM_ARG_PLL}, {
931 atom_op_and, ATOM_ARG_MC}, {
932 atom_op_or, ATOM_ARG_REG}, {
933 atom_op_or, ATOM_ARG_PS}, {
934 atom_op_or, ATOM_ARG_WS}, {
935 atom_op_or, ATOM_ARG_FB}, {
936 atom_op_or, ATOM_ARG_PLL}, {
937 atom_op_or, ATOM_ARG_MC}, {
938 atom_op_shl, ATOM_ARG_REG}, {
939 atom_op_shl, ATOM_ARG_PS}, {
940 atom_op_shl, ATOM_ARG_WS}, {
941 atom_op_shl, ATOM_ARG_FB}, {
942 atom_op_shl, ATOM_ARG_PLL}, {
943 atom_op_shl, ATOM_ARG_MC}, {
944 atom_op_shr, ATOM_ARG_REG}, {
945 atom_op_shr, ATOM_ARG_PS}, {
946 atom_op_shr, ATOM_ARG_WS}, {
947 atom_op_shr, ATOM_ARG_FB}, {
948 atom_op_shr, ATOM_ARG_PLL}, {
949 atom_op_shr, ATOM_ARG_MC}, {
950 atom_op_mul, ATOM_ARG_REG}, {
951 atom_op_mul, ATOM_ARG_PS}, {
952 atom_op_mul, ATOM_ARG_WS}, {
953 atom_op_mul, ATOM_ARG_FB}, {
954 atom_op_mul, ATOM_ARG_PLL}, {
955 atom_op_mul, ATOM_ARG_MC}, {
956 atom_op_div, ATOM_ARG_REG}, {
957 atom_op_div, ATOM_ARG_PS}, {
958 atom_op_div, ATOM_ARG_WS}, {
959 atom_op_div, ATOM_ARG_FB}, {
960 atom_op_div, ATOM_ARG_PLL}, {
961 atom_op_div, ATOM_ARG_MC}, {
962 atom_op_add, ATOM_ARG_REG}, {
963 atom_op_add, ATOM_ARG_PS}, {
964 atom_op_add, ATOM_ARG_WS}, {
965 atom_op_add, ATOM_ARG_FB}, {
966 atom_op_add, ATOM_ARG_PLL}, {
967 atom_op_add, ATOM_ARG_MC}, {
968 atom_op_sub, ATOM_ARG_REG}, {
969 atom_op_sub, ATOM_ARG_PS}, {
970 atom_op_sub, ATOM_ARG_WS}, {
971 atom_op_sub, ATOM_ARG_FB}, {
972 atom_op_sub, ATOM_ARG_PLL}, {
973 atom_op_sub, ATOM_ARG_MC}, {
974 atom_op_setport, ATOM_PORT_ATI}, {
975 atom_op_setport, ATOM_PORT_PCI}, {
976 atom_op_setport, ATOM_PORT_SYSIO}, {
977 atom_op_setregblock, 0}, {
978 atom_op_setfbbase, 0}, {
979 atom_op_compare, ATOM_ARG_REG}, {
980 atom_op_compare, ATOM_ARG_PS}, {
981 atom_op_compare, ATOM_ARG_WS}, {
982 atom_op_compare, ATOM_ARG_FB}, {
983 atom_op_compare, ATOM_ARG_PLL}, {
984 atom_op_compare, ATOM_ARG_MC}, {
985 atom_op_switch, 0}, {
986 atom_op_jump, ATOM_COND_ALWAYS}, {
987 atom_op_jump, ATOM_COND_EQUAL}, {
988 atom_op_jump, ATOM_COND_BELOW}, {
989 atom_op_jump, ATOM_COND_ABOVE}, {
990 atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
991 atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
992 atom_op_jump, ATOM_COND_NOTEQUAL}, {
993 atom_op_test, ATOM_ARG_REG}, {
994 atom_op_test, ATOM_ARG_PS}, {
995 atom_op_test, ATOM_ARG_WS}, {
996 atom_op_test, ATOM_ARG_FB}, {
997 atom_op_test, ATOM_ARG_PLL}, {
998 atom_op_test, ATOM_ARG_MC}, {
999 atom_op_delay, ATOM_UNIT_MILLISEC}, {
1000 atom_op_delay, ATOM_UNIT_MICROSEC}, {
1001 atom_op_calltable, 0}, {
1002 atom_op_repeat, 0}, {
1003 atom_op_clear, ATOM_ARG_REG}, {
1004 atom_op_clear, ATOM_ARG_PS}, {
1005 atom_op_clear, ATOM_ARG_WS}, {
1006 atom_op_clear, ATOM_ARG_FB}, {
1007 atom_op_clear, ATOM_ARG_PLL}, {
1008 atom_op_clear, ATOM_ARG_MC}, {
1009 atom_op_nop, 0}, {
1010 atom_op_eot, 0}, {
1011 atom_op_mask, ATOM_ARG_REG}, {
1012 atom_op_mask, ATOM_ARG_PS}, {
1013 atom_op_mask, ATOM_ARG_WS}, {
1014 atom_op_mask, ATOM_ARG_FB}, {
1015 atom_op_mask, ATOM_ARG_PLL}, {
1016 atom_op_mask, ATOM_ARG_MC}, {
1017 atom_op_postcard, 0}, {
1018 atom_op_beep, 0}, {
1019 atom_op_savereg, 0}, {
1020 atom_op_restorereg, 0}, {
1021 atom_op_setdatablock, 0}, {
1022 atom_op_xor, ATOM_ARG_REG}, {
1023 atom_op_xor, ATOM_ARG_PS}, {
1024 atom_op_xor, ATOM_ARG_WS}, {
1025 atom_op_xor, ATOM_ARG_FB}, {
1026 atom_op_xor, ATOM_ARG_PLL}, {
1027 atom_op_xor, ATOM_ARG_MC}, {
1028 atom_op_shl, ATOM_ARG_REG}, {
1029 atom_op_shl, ATOM_ARG_PS}, {
1030 atom_op_shl, ATOM_ARG_WS}, {
1031 atom_op_shl, ATOM_ARG_FB}, {
1032 atom_op_shl, ATOM_ARG_PLL}, {
1033 atom_op_shl, ATOM_ARG_MC}, {
1034 atom_op_shr, ATOM_ARG_REG}, {
1035 atom_op_shr, ATOM_ARG_PS}, {
1036 atom_op_shr, ATOM_ARG_WS}, {
1037 atom_op_shr, ATOM_ARG_FB}, {
1038 atom_op_shr, ATOM_ARG_PLL}, {
1039 atom_op_shr, ATOM_ARG_MC}, {
1040atom_op_debug, 0},};
1041
1042void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1043{
1044 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1045 int len, ws, ps, ptr;
1046 unsigned char op;
1047 atom_exec_context ectx;
1048
1049 if (!base)
1050 return;
1051
1052 len = CU16(base + ATOM_CT_SIZE_PTR);
1053 ws = CU8(base + ATOM_CT_WS_PTR);
1054 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1055 ptr = base + ATOM_CT_CODE_PTR;
1056
1057 SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1058
1059 /* reset reg block */
1060 ctx->reg_block = 0;
1061 ectx.ctx = ctx;
1062 ectx.ps_shift = ps / 4;
1063 ectx.start = base;
1064 ectx.ps = params;
1065 if (ws)
1066 ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
1067 else
1068 ectx.ws = NULL;
1069
1070 debug_depth++;
1071 while (1) {
1072 op = CU8(ptr++);
1073 if (op < ATOM_OP_NAMES_CNT)
1074 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1075 else
1076 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1077
1078 if (op < ATOM_OP_CNT && op > 0)
1079 opcode_table[op].func(&ectx, &ptr,
1080 opcode_table[op].arg);
1081 else
1082 break;
1083
1084 if (op == ATOM_OP_EOT)
1085 break;
1086 }
1087 debug_depth--;
1088 SDEBUG("<<\n");
1089
1090 if (ws)
1091 kfree(ectx.ws);
1092}
1093
1094static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1095
1096static void atom_index_iio(struct atom_context *ctx, int base)
1097{
1098 ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1099 while (CU8(base) == ATOM_IIO_START) {
1100 ctx->iio[CU8(base + 1)] = base + 2;
1101 base += 2;
1102 while (CU8(base) != ATOM_IIO_END)
1103 base += atom_iio_len[CU8(base)];
1104 base += 3;
1105 }
1106}
1107
1108struct atom_context *atom_parse(struct card_info *card, void *bios)
1109{
1110 int base;
1111 struct atom_context *ctx =
1112 kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1113 char *str;
1114 char name[512];
1115 int i;
1116
1117 ctx->card = card;
1118 ctx->bios = bios;
1119
1120 if (CU16(0) != ATOM_BIOS_MAGIC) {
1121 printk(KERN_INFO "Invalid BIOS magic.\n");
1122 kfree(ctx);
1123 return NULL;
1124 }
1125 if (strncmp
1126 (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1127 strlen(ATOM_ATI_MAGIC))) {
1128 printk(KERN_INFO "Invalid ATI magic.\n");
1129 kfree(ctx);
1130 return NULL;
1131 }
1132
1133 base = CU16(ATOM_ROM_TABLE_PTR);
1134 if (strncmp
1135 (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1136 strlen(ATOM_ROM_MAGIC))) {
1137 printk(KERN_INFO "Invalid ATOM magic.\n");
1138 kfree(ctx);
1139 return NULL;
1140 }
1141
1142 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1143 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1144 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1145
1146 str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
1147 while (*str && ((*str == '\n') || (*str == '\r')))
1148 str++;
1149 /* name string isn't always 0 terminated */
1150 for (i = 0; i < 511; i++) {
1151 name[i] = str[i];
1152 if (name[i] < '.' || name[i] > 'z') {
1153 name[i] = 0;
1154 break;
1155 }
1156 }
1157 printk(KERN_INFO "ATOM BIOS: %s\n", name);
1158
1159 return ctx;
1160}
1161
1162int atom_asic_init(struct atom_context *ctx)
1163{
1164 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1165 uint32_t ps[16];
1166 memset(ps, 0, 64);
1167
1168 ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1169 ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1170 if (!ps[0] || !ps[1])
1171 return 1;
1172
1173 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1174 return 1;
1175 atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1176
1177 return 0;
1178}
1179
1180void atom_destroy(struct atom_context *ctx)
1181{
1182 if (ctx->iio)
1183 kfree(ctx->iio);
1184 kfree(ctx);
1185}
1186
1187void atom_parse_data_header(struct atom_context *ctx, int index,
1188 uint16_t * size, uint8_t * frev, uint8_t * crev,
1189 uint16_t * data_start)
1190{
1191 int offset = index * 2 + 4;
1192 int idx = CU16(ctx->data_table + offset);
1193
1194 if (size)
1195 *size = CU16(idx);
1196 if (frev)
1197 *frev = CU8(idx + 2);
1198 if (crev)
1199 *crev = CU8(idx + 3);
1200 *data_start = idx;
1201 return;
1202}
1203
1204void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1205 uint8_t * crev)
1206{
1207 int offset = index * 2 + 4;
1208 int idx = CU16(ctx->cmd_table + offset);
1209
1210 if (frev)
1211 *frev = CU8(idx + 2);
1212 if (crev)
1213 *crev = CU8(idx + 3);
1214 return;
1215}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
new file mode 100644
index 000000000000..e6eb38f2bcae
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -0,0 +1,149 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25#ifndef ATOM_H
26#define ATOM_H
27
28#include <linux/types.h>
29#include "drmP.h"
30
31#define ATOM_BIOS_MAGIC 0xAA55
32#define ATOM_ATI_MAGIC_PTR 0x30
33#define ATOM_ATI_MAGIC " 761295520"
34#define ATOM_ROM_TABLE_PTR 0x48
35
36#define ATOM_ROM_MAGIC "ATOM"
37#define ATOM_ROM_MAGIC_PTR 4
38
39#define ATOM_ROM_MSG_PTR 0x10
40#define ATOM_ROM_CMD_PTR 0x1E
41#define ATOM_ROM_DATA_PTR 0x20
42
43#define ATOM_CMD_INIT 0
44#define ATOM_CMD_SETSCLK 0x0A
45#define ATOM_CMD_SETMCLK 0x0B
46#define ATOM_CMD_SETPCLK 0x0C
47
48#define ATOM_DATA_FWI_PTR 0xC
49#define ATOM_DATA_IIO_PTR 0x32
50
51#define ATOM_FWI_DEFSCLK_PTR 8
52#define ATOM_FWI_DEFMCLK_PTR 0xC
53#define ATOM_FWI_MAXSCLK_PTR 0x24
54#define ATOM_FWI_MAXMCLK_PTR 0x28
55
56#define ATOM_CT_SIZE_PTR 0
57#define ATOM_CT_WS_PTR 4
58#define ATOM_CT_PS_PTR 5
59#define ATOM_CT_PS_MASK 0x7F
60#define ATOM_CT_CODE_PTR 6
61
62#define ATOM_OP_CNT 123
63#define ATOM_OP_EOT 91
64
65#define ATOM_CASE_MAGIC 0x63
66#define ATOM_CASE_END 0x5A5A
67
68#define ATOM_ARG_REG 0
69#define ATOM_ARG_PS 1
70#define ATOM_ARG_WS 2
71#define ATOM_ARG_FB 3
72#define ATOM_ARG_ID 4
73#define ATOM_ARG_IMM 5
74#define ATOM_ARG_PLL 6
75#define ATOM_ARG_MC 7
76
77#define ATOM_SRC_DWORD 0
78#define ATOM_SRC_WORD0 1
79#define ATOM_SRC_WORD8 2
80#define ATOM_SRC_WORD16 3
81#define ATOM_SRC_BYTE0 4
82#define ATOM_SRC_BYTE8 5
83#define ATOM_SRC_BYTE16 6
84#define ATOM_SRC_BYTE24 7
85
86#define ATOM_WS_QUOTIENT 0x40
87#define ATOM_WS_REMAINDER 0x41
88#define ATOM_WS_DATAPTR 0x42
89#define ATOM_WS_SHIFT 0x43
90#define ATOM_WS_OR_MASK 0x44
91#define ATOM_WS_AND_MASK 0x45
92#define ATOM_WS_FB_WINDOW 0x46
93#define ATOM_WS_ATTRIBUTES 0x47
94
95#define ATOM_IIO_NOP 0
96#define ATOM_IIO_START 1
97#define ATOM_IIO_READ 2
98#define ATOM_IIO_WRITE 3
99#define ATOM_IIO_CLEAR 4
100#define ATOM_IIO_SET 5
101#define ATOM_IIO_MOVE_INDEX 6
102#define ATOM_IIO_MOVE_ATTR 7
103#define ATOM_IIO_MOVE_DATA 8
104#define ATOM_IIO_END 9
105
106#define ATOM_IO_MM 0
107#define ATOM_IO_PCI 1
108#define ATOM_IO_SYSIO 2
109#define ATOM_IO_IIO 0x80
110
111struct card_info {
112 struct drm_device *dev;
113 void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
114 uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */
115 void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
116 uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */
117 void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
118 uint32_t (* pll_read)(struct card_info *, uint32_t); /* filled by driver */
119};
120
121struct atom_context {
122 struct card_info *card;
123 void *bios;
124 uint32_t cmd_table, data_table;
125 uint16_t *iio;
126
127 uint16_t data_block;
128 uint32_t fb_base;
129 uint32_t divmul[2];
130 uint16_t io_attr;
131 uint16_t reg_block;
132 uint8_t shift;
133 int cs_equal, cs_above;
134 int io_mode;
135};
136
137extern int atom_debug;
138
139struct atom_context *atom_parse(struct card_info *, void *);
140void atom_execute_table(struct atom_context *, int, uint32_t *);
141int atom_asic_init(struct atom_context *);
142void atom_destroy(struct atom_context *);
143void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
144void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
145#include "atom-types.h"
146#include "atombios.h"
147#include "ObjectID.h"
148
149#endif
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
new file mode 100644
index 000000000000..cf67928abbc8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -0,0 +1,4785 @@
1/*
2 * Copyright 2006-2007 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23/****************************************************************************/
24/*Portion I: Definitions shared between VBIOS and Driver */
25/****************************************************************************/
26
27#ifndef _ATOMBIOS_H
28#define _ATOMBIOS_H
29
30#define ATOM_VERSION_MAJOR 0x00020000
31#define ATOM_VERSION_MINOR 0x00000002
32
33#define ATOM_HEADER_VERSION (ATOM_VERSION_MAJOR | ATOM_VERSION_MINOR)
34
35/* Endianness should be specified before inclusion,
36 * default to little endian
37 */
38#ifndef ATOM_BIG_ENDIAN
39#error Endian not specified
40#endif
41
42#ifdef _H2INC
43#ifndef ULONG
44typedef unsigned long ULONG;
45#endif
46
47#ifndef UCHAR
48typedef unsigned char UCHAR;
49#endif
50
51#ifndef USHORT
52typedef unsigned short USHORT;
53#endif
54#endif
55
56#define ATOM_DAC_A 0
57#define ATOM_DAC_B 1
58#define ATOM_EXT_DAC 2
59
60#define ATOM_CRTC1 0
61#define ATOM_CRTC2 1
62
63#define ATOM_DIGA 0
64#define ATOM_DIGB 1
65
66#define ATOM_PPLL1 0
67#define ATOM_PPLL2 1
68
69#define ATOM_SCALER1 0
70#define ATOM_SCALER2 1
71
72#define ATOM_SCALER_DISABLE 0
73#define ATOM_SCALER_CENTER 1
74#define ATOM_SCALER_EXPANSION 2
75#define ATOM_SCALER_MULTI_EX 3
76
77#define ATOM_DISABLE 0
78#define ATOM_ENABLE 1
79#define ATOM_LCD_BLOFF (ATOM_DISABLE+2)
80#define ATOM_LCD_BLON (ATOM_ENABLE+2)
81#define ATOM_LCD_BL_BRIGHTNESS_CONTROL (ATOM_ENABLE+3)
82#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5)
83#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5)
84#define ATOM_ENCODER_INIT (ATOM_DISABLE+7)
85
86#define ATOM_BLANKING 1
87#define ATOM_BLANKING_OFF 0
88
89#define ATOM_CURSOR1 0
90#define ATOM_CURSOR2 1
91
92#define ATOM_ICON1 0
93#define ATOM_ICON2 1
94
95#define ATOM_CRT1 0
96#define ATOM_CRT2 1
97
98#define ATOM_TV_NTSC 1
99#define ATOM_TV_NTSCJ 2
100#define ATOM_TV_PAL 3
101#define ATOM_TV_PALM 4
102#define ATOM_TV_PALCN 5
103#define ATOM_TV_PALN 6
104#define ATOM_TV_PAL60 7
105#define ATOM_TV_SECAM 8
106#define ATOM_TV_CV 16
107
108#define ATOM_DAC1_PS2 1
109#define ATOM_DAC1_CV 2
110#define ATOM_DAC1_NTSC 3
111#define ATOM_DAC1_PAL 4
112
113#define ATOM_DAC2_PS2 ATOM_DAC1_PS2
114#define ATOM_DAC2_CV ATOM_DAC1_CV
115#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC
116#define ATOM_DAC2_PAL ATOM_DAC1_PAL
117
118#define ATOM_PM_ON 0
119#define ATOM_PM_STANDBY 1
120#define ATOM_PM_SUSPEND 2
121#define ATOM_PM_OFF 3
122
123/* Bit0:{=0:single, =1:dual},
124 Bit1 {=0:666RGB, =1:888RGB},
125 Bit2:3:{Grey level}
126 Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}*/
127
128#define ATOM_PANEL_MISC_DUAL 0x00000001
129#define ATOM_PANEL_MISC_888RGB 0x00000002
130#define ATOM_PANEL_MISC_GREY_LEVEL 0x0000000C
131#define ATOM_PANEL_MISC_FPDI 0x00000010
132#define ATOM_PANEL_MISC_GREY_LEVEL_SHIFT 2
133#define ATOM_PANEL_MISC_SPATIAL 0x00000020
134#define ATOM_PANEL_MISC_TEMPORAL 0x00000040
135#define ATOM_PANEL_MISC_API_ENABLED 0x00000080
136
137#define MEMTYPE_DDR1 "DDR1"
138#define MEMTYPE_DDR2 "DDR2"
139#define MEMTYPE_DDR3 "DDR3"
140#define MEMTYPE_DDR4 "DDR4"
141
142#define ASIC_BUS_TYPE_PCI "PCI"
143#define ASIC_BUS_TYPE_AGP "AGP"
144#define ASIC_BUS_TYPE_PCIE "PCI_EXPRESS"
145
146/* Maximum size of that FireGL flag string */
147
148#define ATOM_FIREGL_FLAG_STRING "FGL" /* Flag used to enable FireGL Support */
149#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 /* sizeof( ATOM_FIREGL_FLAG_STRING ) */
150
151#define ATOM_FAKE_DESKTOP_STRING "DSK" /* Flag used to enable mobile ASIC on Desktop */
152#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING
153
154#define ATOM_M54T_FLAG_STRING "M54T" /* Flag used to enable M54T Support */
155#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 /* sizeof( ATOM_M54T_FLAG_STRING ) */
156
157#define HW_ASSISTED_I2C_STATUS_FAILURE 2
158#define HW_ASSISTED_I2C_STATUS_SUCCESS 1
159
160#pragma pack(1) /* BIOS data must use byte aligment */
161
162/* Define offset to location of ROM header. */
163
164#define OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER 0x00000048L
165#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L
166
167#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94
168#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */
169#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f
170#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e
171
172/* Common header for all ROM Data tables.
173 Every table pointed _ATOM_MASTER_DATA_TABLE has this common header.
174 And the pointer actually points to this header. */
175
176typedef struct _ATOM_COMMON_TABLE_HEADER {
177 USHORT usStructureSize;
178 UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */
179 UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */
180 /*Image can't be updated, while Driver needs to carry the new table! */
181} ATOM_COMMON_TABLE_HEADER;
182
183typedef struct _ATOM_ROM_HEADER {
184 ATOM_COMMON_TABLE_HEADER sHeader;
185 UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios,
186 atombios should init it as "ATOM", don't change the position */
187 USHORT usBiosRuntimeSegmentAddress;
188 USHORT usProtectedModeInfoOffset;
189 USHORT usConfigFilenameOffset;
190 USHORT usCRC_BlockOffset;
191 USHORT usBIOS_BootupMessageOffset;
192 USHORT usInt10Offset;
193 USHORT usPciBusDevInitCode;
194 USHORT usIoBaseAddress;
195 USHORT usSubsystemVendorID;
196 USHORT usSubsystemID;
197 USHORT usPCI_InfoOffset;
198 USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
199 USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */
200 UCHAR ucExtendedFunctionCode;
201 UCHAR ucReserved;
202} ATOM_ROM_HEADER;
203
204/*==============================Command Table Portion==================================== */
205
206#ifdef UEFI_BUILD
207#define UTEMP USHORT
208#define USHORT void*
209#endif
210
211typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES {
212 USHORT ASIC_Init; /* Function Table, used by various SW components,latest version 1.1 */
213 USHORT GetDisplaySurfaceSize; /* Atomic Table, Used by Bios when enabling HW ICON */
214 USHORT ASIC_RegistersInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
215 USHORT VRAM_BlockVenderDetection; /* Atomic Table, used only by Bios */
216 USHORT DIGxEncoderControl; /* Only used by Bios */
217 USHORT MemoryControllerInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
218 USHORT EnableCRTCMemReq; /* Function Table,directly used by various SW components,latest version 2.1 */
219 USHORT MemoryParamAdjust; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed */
220 USHORT DVOEncoderControl; /* Function Table,directly used by various SW components,latest version 1.2 */
221 USHORT GPIOPinControl; /* Atomic Table, only used by Bios */
222 USHORT SetEngineClock; /*Function Table,directly used by various SW components,latest version 1.1 */
223 USHORT SetMemoryClock; /* Function Table,directly used by various SW components,latest version 1.1 */
224 USHORT SetPixelClock; /*Function Table,directly used by various SW components,latest version 1.2 */
225 USHORT DynamicClockGating; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
226 USHORT ResetMemoryDLL; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
227 USHORT ResetMemoryDevice; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
228 USHORT MemoryPLLInit;
229 USHORT AdjustDisplayPll; /* only used by Bios */
230 USHORT AdjustMemoryController; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
231 USHORT EnableASIC_StaticPwrMgt; /* Atomic Table, only used by Bios */
232 USHORT ASIC_StaticPwrMgtStatusChange; /* Obsolete, only used by Bios */
233 USHORT DAC_LoadDetection; /* Atomic Table, directly used by various SW components,latest version 1.2 */
234 USHORT LVTMAEncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.3 */
235 USHORT LCD1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
236 USHORT DAC1EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
237 USHORT DAC2EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
238 USHORT DVOOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
239 USHORT CV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
240 USHORT GetConditionalGoldenSetting; /* only used by Bios */
241 USHORT TVEncoderControl; /* Function Table,directly used by various SW components,latest version 1.1 */
242 USHORT TMDSAEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */
243 USHORT LVDSEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */
244 USHORT TV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
245 USHORT EnableScaler; /* Atomic Table, used only by Bios */
246 USHORT BlankCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */
247 USHORT EnableCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */
248 USHORT GetPixelClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
249 USHORT EnableVGA_Render; /* Function Table,directly used by various SW components,latest version 1.1 */
250 USHORT EnableVGA_Access; /* Obsolete , only used by Bios */
251 USHORT SetCRTC_Timing; /* Atomic Table, directly used by various SW components,latest version 1.1 */
252 USHORT SetCRTC_OverScan; /* Atomic Table, used by various SW components,latest version 1.1 */
253 USHORT SetCRTC_Replication; /* Atomic Table, used only by Bios */
254 USHORT SelectCRTC_Source; /* Atomic Table, directly used by various SW components,latest version 1.1 */
255 USHORT EnableGraphSurfaces; /* Atomic Table, used only by Bios */
256 USHORT UpdateCRTC_DoubleBufferRegisters;
257 USHORT LUT_AutoFill; /* Atomic Table, only used by Bios */
258 USHORT EnableHW_IconCursor; /* Atomic Table, only used by Bios */
259 USHORT GetMemoryClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
260 USHORT GetEngineClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
261 USHORT SetCRTC_UsingDTDTiming; /* Atomic Table, directly used by various SW components,latest version 1.1 */
262 USHORT ExternalEncoderControl; /* Atomic Table, directly used by various SW components,latest version 2.1 */
263 USHORT LVTMAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
264 USHORT VRAM_BlockDetectionByStrap; /* Atomic Table, used only by Bios */
265 USHORT MemoryCleanUp; /* Atomic Table, only used by Bios */
266 USHORT ProcessI2cChannelTransaction; /* Function Table,only used by Bios */
267 USHORT WriteOneByteToHWAssistedI2C; /* Function Table,indirectly used by various SW components */
268 USHORT ReadHWAssistedI2CStatus; /* Atomic Table, indirectly used by various SW components */
269 USHORT SpeedFanControl; /* Function Table,indirectly used by various SW components,called from ASIC_Init */
270 USHORT PowerConnectorDetection; /* Atomic Table, directly used by various SW components,latest version 1.1 */
271 USHORT MC_Synchronization; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
272 USHORT ComputeMemoryEnginePLL; /* Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock */
273 USHORT MemoryRefreshConversion; /* Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock */
274 USHORT VRAM_GetCurrentInfoBlock; /* Atomic Table, used only by Bios */
275 USHORT DynamicMemorySettings; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
276 USHORT MemoryTraining; /* Atomic Table, used only by Bios */
277 USHORT EnableSpreadSpectrumOnPPLL; /* Atomic Table, directly used by various SW components,latest version 1.2 */
278 USHORT TMDSAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
279 USHORT SetVoltage; /* Function Table,directly and/or indirectly used by various SW components,latest version 1.1 */
280 USHORT DAC1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
281 USHORT DAC2OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
282 USHORT SetupHWAssistedI2CStatus; /* Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" */
283 USHORT ClockSource; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
284 USHORT MemoryDeviceInit; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
285 USHORT EnableYUV; /* Atomic Table, indirectly used by various SW components,called from EnableVGARender */
286 USHORT DIG1EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
287 USHORT DIG2EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
288 USHORT DIG1TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
289 USHORT DIG2TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
290 USHORT ProcessAuxChannelTransaction; /* Function Table,only used by Bios */
291 USHORT DPEncoderService; /* Function Table,only used by Bios */
292} ATOM_MASTER_LIST_OF_COMMAND_TABLES;
293
294/* For backward compatible */
295#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction
296#define UNIPHYTransmitterControl DIG1TransmitterControl
297#define LVTMATransmitterControl DIG2TransmitterControl
298#define SetCRTC_DPM_State GetConditionalGoldenSetting
299#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
300
301typedef struct _ATOM_MASTER_COMMAND_TABLE {
302 ATOM_COMMON_TABLE_HEADER sHeader;
303 ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
304} ATOM_MASTER_COMMAND_TABLE;
305
306/****************************************************************************/
307/* Structures used in every command table */
308/****************************************************************************/
309typedef struct _ATOM_TABLE_ATTRIBUTE {
310#if ATOM_BIG_ENDIAN
311 USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */
312 USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */
313 USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */
314#else
315 USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */
316 USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */
317 USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */
318#endif
319} ATOM_TABLE_ATTRIBUTE;
320
321typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS {
322 ATOM_TABLE_ATTRIBUTE sbfAccess;
323 USHORT susAccess;
324} ATOM_TABLE_ATTRIBUTE_ACCESS;
325
326/****************************************************************************/
327/* Common header for all command tables. */
328/* Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. */
329/* And the pointer actually points to this header. */
330/****************************************************************************/
331typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER {
332 ATOM_COMMON_TABLE_HEADER CommonHeader;
333 ATOM_TABLE_ATTRIBUTE TableAttribute;
334} ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
335
336/****************************************************************************/
337/* Structures used by ComputeMemoryEnginePLLTable */
338/****************************************************************************/
339#define COMPUTE_MEMORY_PLL_PARAM 1
340#define COMPUTE_ENGINE_PLL_PARAM 2
341
342typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS {
343 ULONG ulClock; /* When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div */
344 UCHAR ucAction; /* 0:reserved //1:Memory //2:Engine */
345 UCHAR ucReserved; /* may expand to return larger Fbdiv later */
346 UCHAR ucFbDiv; /* return value */
347 UCHAR ucPostDiv; /* return value */
348} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
349
350typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 {
351 ULONG ulClock; /* When return, [23:0] return real clock */
352 UCHAR ucAction; /* 0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register */
353 USHORT usFbDiv; /* return Feedback value to be written to register */
354 UCHAR ucPostDiv; /* return post div to be written to register */
355} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
356#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
357
358#define SET_CLOCK_FREQ_MASK 0x00FFFFFF /* Clock change tables only take bit [23:0] as the requested clock value */
359#define USE_NON_BUS_CLOCK_MASK 0x01000000 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */
360#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */
361#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */
362#define FIRST_TIME_CHANGE_CLOCK 0x08000000 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */
363#define SKIP_SW_PROGRAM_PLL 0x10000000 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */
364#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK
365
366#define b3USE_NON_BUS_CLOCK_MASK 0x01 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */
367#define b3USE_MEMORY_SELF_REFRESH 0x02 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */
368#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */
369#define b3FIRST_TIME_CHANGE_CLOCK 0x08 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */
370#define b3SKIP_SW_PROGRAM_PLL 0x10 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */
371
372typedef struct _ATOM_COMPUTE_CLOCK_FREQ {
373#if ATOM_BIG_ENDIAN
374 ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */
375 ULONG ulClockFreq:24; /* in unit of 10kHz */
376#else
377 ULONG ulClockFreq:24; /* in unit of 10kHz */
378 ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */
379#endif
380} ATOM_COMPUTE_CLOCK_FREQ;
381
382typedef struct _ATOM_S_MPLL_FB_DIVIDER {
383 USHORT usFbDivFrac;
384 USHORT usFbDiv;
385} ATOM_S_MPLL_FB_DIVIDER;
386
387typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 {
388 union {
389 ATOM_COMPUTE_CLOCK_FREQ ulClock; /* Input Parameter */
390 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; /* Output Parameter */
391 };
392 UCHAR ucRefDiv; /* Output Parameter */
393 UCHAR ucPostDiv; /* Output Parameter */
394 UCHAR ucCntlFlag; /* Output Parameter */
395 UCHAR ucReserved;
396} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
397
398/* ucCntlFlag */
399#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1
400#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2
401#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4
402
403typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER {
404 ATOM_COMPUTE_CLOCK_FREQ ulClock;
405 ULONG ulReserved[2];
406} DYNAMICE_MEMORY_SETTINGS_PARAMETER;
407
408typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER {
409 ATOM_COMPUTE_CLOCK_FREQ ulClock;
410 ULONG ulMemoryClock;
411 ULONG ulReserved;
412} DYNAMICE_ENGINE_SETTINGS_PARAMETER;
413
414/****************************************************************************/
415/* Structures used by SetEngineClockTable */
416/****************************************************************************/
417typedef struct _SET_ENGINE_CLOCK_PARAMETERS {
418 ULONG ulTargetEngineClock; /* In 10Khz unit */
419} SET_ENGINE_CLOCK_PARAMETERS;
420
421typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION {
422 ULONG ulTargetEngineClock; /* In 10Khz unit */
423 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
424} SET_ENGINE_CLOCK_PS_ALLOCATION;
425
426/****************************************************************************/
427/* Structures used by SetMemoryClockTable */
428/****************************************************************************/
429typedef struct _SET_MEMORY_CLOCK_PARAMETERS {
430 ULONG ulTargetMemoryClock; /* In 10Khz unit */
431} SET_MEMORY_CLOCK_PARAMETERS;
432
433typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION {
434 ULONG ulTargetMemoryClock; /* In 10Khz unit */
435 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
436} SET_MEMORY_CLOCK_PS_ALLOCATION;
437
438/****************************************************************************/
439/* Structures used by ASIC_Init.ctb */
440/****************************************************************************/
441typedef struct _ASIC_INIT_PARAMETERS {
442 ULONG ulDefaultEngineClock; /* In 10Khz unit */
443 ULONG ulDefaultMemoryClock; /* In 10Khz unit */
444} ASIC_INIT_PARAMETERS;
445
446typedef struct _ASIC_INIT_PS_ALLOCATION {
447 ASIC_INIT_PARAMETERS sASICInitClocks;
448 SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; /* Caller doesn't need to init this structure */
449} ASIC_INIT_PS_ALLOCATION;
450
451/****************************************************************************/
452/* Structure used by DynamicClockGatingTable.ctb */
453/****************************************************************************/
454typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS {
455 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
456 UCHAR ucPadding[3];
457} DYNAMIC_CLOCK_GATING_PARAMETERS;
458#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS
459
460/****************************************************************************/
461/* Structure used by EnableASIC_StaticPwrMgtTable.ctb */
462/****************************************************************************/
463typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS {
464 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
465 UCHAR ucPadding[3];
466} ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
467#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
468
469/****************************************************************************/
470/* Structures used by DAC_LoadDetectionTable.ctb */
471/****************************************************************************/
472typedef struct _DAC_LOAD_DETECTION_PARAMETERS {
473 USHORT usDeviceID; /* {ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} */
474 UCHAR ucDacType; /* {ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} */
475 UCHAR ucMisc; /* Valid only when table revision =1.3 and above */
476} DAC_LOAD_DETECTION_PARAMETERS;
477
478/* DAC_LOAD_DETECTION_PARAMETERS.ucMisc */
479#define DAC_LOAD_MISC_YPrPb 0x01
480
481typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION {
482 DAC_LOAD_DETECTION_PARAMETERS sDacload;
483 ULONG Reserved[2]; /* Don't set this one, allocation for EXT DAC */
484} DAC_LOAD_DETECTION_PS_ALLOCATION;
485
486/****************************************************************************/
487/* Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb */
488/****************************************************************************/
489typedef struct _DAC_ENCODER_CONTROL_PARAMETERS {
490 USHORT usPixelClock; /* in 10KHz; for bios convenient */
491 UCHAR ucDacStandard; /* See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) */
492 UCHAR ucAction; /* 0: turn off encoder */
493 /* 1: setup and turn on encoder */
494 /* 7: ATOM_ENCODER_INIT Initialize DAC */
495} DAC_ENCODER_CONTROL_PARAMETERS;
496
497#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS
498
499/****************************************************************************/
500/* Structures used by DIG1EncoderControlTable */
501/* DIG2EncoderControlTable */
502/* ExternalEncoderControlTable */
503/****************************************************************************/
504typedef struct _DIG_ENCODER_CONTROL_PARAMETERS {
505 USHORT usPixelClock; /* in 10KHz; for bios convenient */
506 UCHAR ucConfig;
507 /* [2] Link Select: */
508 /* =0: PHY linkA if bfLane<3 */
509 /* =1: PHY linkB if bfLanes<3 */
510 /* =0: PHY linkA+B if bfLanes=3 */
511 /* [3] Transmitter Sel */
512 /* =0: UNIPHY or PCIEPHY */
513 /* =1: LVTMA */
514 UCHAR ucAction; /* =0: turn off encoder */
515 /* =1: turn on encoder */
516 UCHAR ucEncoderMode;
517 /* =0: DP encoder */
518 /* =1: LVDS encoder */
519 /* =2: DVI encoder */
520 /* =3: HDMI encoder */
521 /* =4: SDVO encoder */
522 UCHAR ucLaneNum; /* how many lanes to enable */
523 UCHAR ucReserved[2];
524} DIG_ENCODER_CONTROL_PARAMETERS;
525#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS
526#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS
527
528/* ucConfig */
529#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01
530#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00
531#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01
532#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04
533#define ATOM_ENCODER_CONFIG_LINKA 0x00
534#define ATOM_ENCODER_CONFIG_LINKB 0x04
535#define ATOM_ENCODER_CONFIG_LINKA_B ATOM_TRANSMITTER_CONFIG_LINKA
536#define ATOM_ENCODER_CONFIG_LINKB_A ATOM_ENCODER_CONFIG_LINKB
537#define ATOM_ENCODER_CONFIG_TRANSMITTER_SEL_MASK 0x08
538#define ATOM_ENCODER_CONFIG_UNIPHY 0x00
539#define ATOM_ENCODER_CONFIG_LVTMA 0x08
540#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00
541#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08
542#define ATOM_ENCODER_CONFIG_DIGB 0x80 /* VBIOS Internal use, outside SW should set this bit=0 */
543/* ucAction */
544/* ATOM_ENABLE: Enable Encoder */
545/* ATOM_DISABLE: Disable Encoder */
546
547/* ucEncoderMode */
548#define ATOM_ENCODER_MODE_DP 0
549#define ATOM_ENCODER_MODE_LVDS 1
550#define ATOM_ENCODER_MODE_DVI 2
551#define ATOM_ENCODER_MODE_HDMI 3
552#define ATOM_ENCODER_MODE_SDVO 4
553#define ATOM_ENCODER_MODE_TV 13
554#define ATOM_ENCODER_MODE_CV 14
555#define ATOM_ENCODER_MODE_CRT 15
556
557typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 {
558#if ATOM_BIG_ENDIAN
559 UCHAR ucReserved1:2;
560 UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */
561 UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */
562 UCHAR ucReserved:1;
563 UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */
564#else
565 UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */
566 UCHAR ucReserved:1;
567 UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */
568 UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */
569 UCHAR ucReserved1:2;
570#endif
571} ATOM_DIG_ENCODER_CONFIG_V2;
572
573typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
574 USHORT usPixelClock; /* in 10KHz; for bios convenient */
575 ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
576 UCHAR ucAction;
577 UCHAR ucEncoderMode;
578 /* =0: DP encoder */
579 /* =1: LVDS encoder */
580 /* =2: DVI encoder */
581 /* =3: HDMI encoder */
582 /* =4: SDVO encoder */
583 UCHAR ucLaneNum; /* how many lanes to enable */
584 UCHAR ucReserved[2];
585} DIG_ENCODER_CONTROL_PARAMETERS_V2;
586
587/* ucConfig */
588#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01
589#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00
590#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01
591#define ATOM_ENCODER_CONFIG_V2_LINK_SEL_MASK 0x04
592#define ATOM_ENCODER_CONFIG_V2_LINKA 0x00
593#define ATOM_ENCODER_CONFIG_V2_LINKB 0x04
594#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER_SEL_MASK 0x18
595#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER1 0x00
596#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08
597#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10
598
599/****************************************************************************/
600/* Structures used by UNIPHYTransmitterControlTable */
601/* LVTMATransmitterControlTable */
602/* DVOOutputControlTable */
603/****************************************************************************/
604typedef struct _ATOM_DP_VS_MODE {
605 UCHAR ucLaneSel;
606 UCHAR ucLaneSet;
607} ATOM_DP_VS_MODE;
608
609typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
610 union {
611 USHORT usPixelClock; /* in 10KHz; for bios convenient */
612 USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */
613 ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */
614 };
615 UCHAR ucConfig;
616 /* [0]=0: 4 lane Link, */
617 /* =1: 8 lane Link ( Dual Links TMDS ) */
618 /* [1]=0: InCoherent mode */
619 /* =1: Coherent Mode */
620 /* [2] Link Select: */
621 /* =0: PHY linkA if bfLane<3 */
622 /* =1: PHY linkB if bfLanes<3 */
623 /* =0: PHY linkA+B if bfLanes=3 */
624 /* [5:4]PCIE lane Sel */
625 /* =0: lane 0~3 or 0~7 */
626 /* =1: lane 4~7 */
627 /* =2: lane 8~11 or 8~15 */
628 /* =3: lane 12~15 */
629 UCHAR ucAction; /* =0: turn off encoder */
630 /* =1: turn on encoder */
631 UCHAR ucReserved[4];
632} DIG_TRANSMITTER_CONTROL_PARAMETERS;
633
634#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS
635
636/* ucInitInfo */
637#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff
638
639/* ucConfig */
640#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01
641#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02
642#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04
643#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00
644#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04
645#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00
646#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04
647
648#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
649#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
650#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
651
652#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30
653#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00
654#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PCIE 0x20
655#define ATOM_TRANSMITTER_CONFIG_CLKSRC_XTALIN 0x30
656#define ATOM_TRANSMITTER_CONFIG_LANE_SEL_MASK 0xc0
657#define ATOM_TRANSMITTER_CONFIG_LANE_0_3 0x00
658#define ATOM_TRANSMITTER_CONFIG_LANE_0_7 0x00
659#define ATOM_TRANSMITTER_CONFIG_LANE_4_7 0x40
660#define ATOM_TRANSMITTER_CONFIG_LANE_8_11 0x80
661#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80
662#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0
663
664/* ucAction */
665#define ATOM_TRANSMITTER_ACTION_DISABLE 0
666#define ATOM_TRANSMITTER_ACTION_ENABLE 1
667#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2
668#define ATOM_TRANSMITTER_ACTION_LCD_BLON 3
669#define ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL 4
670#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_START 5
671#define ATOM_TRANSMITTER_ACTION_LCD_SELFTEST_STOP 6
672#define ATOM_TRANSMITTER_ACTION_INIT 7
673#define ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT 8
674#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9
675#define ATOM_TRANSMITTER_ACTION_SETUP 10
676#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11
677
678/* Following are used for DigTransmitterControlTable ver1.2 */
679typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 {
680#if ATOM_BIG_ENDIAN
681 UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */
682 /* =1 Dig Transmitter 2 ( Uniphy CD ) */
683 /* =2 Dig Transmitter 3 ( Uniphy EF ) */
684 UCHAR ucReserved:1;
685 UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */
686 UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */
687 UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */
688 /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */
689
690 UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */
691 UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */
692#else
693 UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */
694 UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */
695 UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */
696 /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */
697 UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */
698 UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */
699 UCHAR ucReserved:1;
700 UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */
701 /* =1 Dig Transmitter 2 ( Uniphy CD ) */
702 /* =2 Dig Transmitter 3 ( Uniphy EF ) */
703#endif
704} ATOM_DIG_TRANSMITTER_CONFIG_V2;
705
706/* ucConfig */
707/* Bit0 */
708#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01
709
710/* Bit1 */
711#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02
712
713/* Bit2 */
714#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04
715#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00
716#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04
717
718/* Bit3 */
719#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08
720#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */
721#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */
722
723/* Bit4 */
724#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10
725
726/* Bit7:6 */
727#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0
728#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 /* AB */
729#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 /* CD */
730#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 /* EF */
731
732typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 {
733 union {
734 USHORT usPixelClock; /* in 10KHz; for bios convenient */
735 USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */
736 ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */
737 };
738 ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
739 UCHAR ucAction; /* define as ATOM_TRANSMITER_ACTION_XXX */
740 UCHAR ucReserved[4];
741} DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
742
743/****************************************************************************/
744/* Structures used by DAC1OuputControlTable */
745/* DAC2OuputControlTable */
746/* LVTMAOutputControlTable (Before DEC30) */
747/* TMDSAOutputControlTable (Before DEC30) */
748/****************************************************************************/
749typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS {
750 UCHAR ucAction; /* Possible input:ATOM_ENABLE||ATOMDISABLE */
751 /* When the display is LCD, in addition to above: */
752 /* ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| */
753 /* ATOM_LCD_SELFTEST_STOP */
754
755 UCHAR aucPadding[3]; /* padding to DWORD aligned */
756} DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
757
758#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
759
760#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
761#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
762
763#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
764#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
765
766#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
767#define CV1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
768
769#define TV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
770#define TV1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
771
772#define DFP1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
773#define DFP1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
774
775#define DFP2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
776#define DFP2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
777
778#define LCD1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
779#define LCD1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
780
781#define DVO_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
782#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION
783#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS
784
785/****************************************************************************/
786/* Structures used by BlankCRTCTable */
787/****************************************************************************/
788typedef struct _BLANK_CRTC_PARAMETERS {
789 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
790 UCHAR ucBlanking; /* ATOM_BLANKING or ATOM_BLANKINGOFF */
791 USHORT usBlackColorRCr;
792 USHORT usBlackColorGY;
793 USHORT usBlackColorBCb;
794} BLANK_CRTC_PARAMETERS;
795#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS
796
797/****************************************************************************/
798/* Structures used by EnableCRTCTable */
799/* EnableCRTCMemReqTable */
800/* UpdateCRTC_DoubleBufferRegistersTable */
801/****************************************************************************/
802typedef struct _ENABLE_CRTC_PARAMETERS {
803 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
804 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
805 UCHAR ucPadding[2];
806} ENABLE_CRTC_PARAMETERS;
807#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS
808
809/****************************************************************************/
810/* Structures used by SetCRTC_OverScanTable */
811/****************************************************************************/
812typedef struct _SET_CRTC_OVERSCAN_PARAMETERS {
813 USHORT usOverscanRight; /* right */
814 USHORT usOverscanLeft; /* left */
815 USHORT usOverscanBottom; /* bottom */
816 USHORT usOverscanTop; /* top */
817 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
818 UCHAR ucPadding[3];
819} SET_CRTC_OVERSCAN_PARAMETERS;
820#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS
821
822/****************************************************************************/
823/* Structures used by SetCRTC_ReplicationTable */
824/****************************************************************************/
825typedef struct _SET_CRTC_REPLICATION_PARAMETERS {
826 UCHAR ucH_Replication; /* horizontal replication */
827 UCHAR ucV_Replication; /* vertical replication */
828 UCHAR usCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
829 UCHAR ucPadding;
830} SET_CRTC_REPLICATION_PARAMETERS;
831#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS
832
833/****************************************************************************/
834/* Structures used by SelectCRTC_SourceTable */
835/****************************************************************************/
836typedef struct _SELECT_CRTC_SOURCE_PARAMETERS {
837 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
838 UCHAR ucDevice; /* ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... */
839 UCHAR ucPadding[2];
840} SELECT_CRTC_SOURCE_PARAMETERS;
841#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS
842
843typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 {
844 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
845 UCHAR ucEncoderID; /* DAC1/DAC2/TVOUT/DIG1/DIG2/DVO */
846 UCHAR ucEncodeMode; /* Encoding mode, only valid when using DIG1/DIG2/DVO */
847 UCHAR ucPadding;
848} SELECT_CRTC_SOURCE_PARAMETERS_V2;
849
850/* ucEncoderID */
851/* #define ASIC_INT_DAC1_ENCODER_ID 0x00 */
852/* #define ASIC_INT_TV_ENCODER_ID 0x02 */
853/* #define ASIC_INT_DIG1_ENCODER_ID 0x03 */
854/* #define ASIC_INT_DAC2_ENCODER_ID 0x04 */
855/* #define ASIC_EXT_TV_ENCODER_ID 0x06 */
856/* #define ASIC_INT_DVO_ENCODER_ID 0x07 */
857/* #define ASIC_INT_DIG2_ENCODER_ID 0x09 */
858/* #define ASIC_EXT_DIG_ENCODER_ID 0x05 */
859
860/* ucEncodeMode */
861/* #define ATOM_ENCODER_MODE_DP 0 */
862/* #define ATOM_ENCODER_MODE_LVDS 1 */
863/* #define ATOM_ENCODER_MODE_DVI 2 */
864/* #define ATOM_ENCODER_MODE_HDMI 3 */
865/* #define ATOM_ENCODER_MODE_SDVO 4 */
866/* #define ATOM_ENCODER_MODE_TV 13 */
867/* #define ATOM_ENCODER_MODE_CV 14 */
868/* #define ATOM_ENCODER_MODE_CRT 15 */
869
870/****************************************************************************/
871/* Structures used by SetPixelClockTable */
872/* GetPixelClockTable */
873/****************************************************************************/
874/* Major revision=1., Minor revision=1 */
875typedef struct _PIXEL_CLOCK_PARAMETERS {
876 USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
877 /* 0 means disable PPLL */
878 USHORT usRefDiv; /* Reference divider */
879 USHORT usFbDiv; /* feedback divider */
880 UCHAR ucPostDiv; /* post divider */
881 UCHAR ucFracFbDiv; /* fractional feedback divider */
882 UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
883 UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */
884 UCHAR ucCRTC; /* Which CRTC uses this Ppll */
885 UCHAR ucPadding;
886} PIXEL_CLOCK_PARAMETERS;
887
888/* Major revision=1., Minor revision=2, add ucMiscIfno */
889/* ucMiscInfo: */
890#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1
891#define MISC_DEVICE_INDEX_MASK 0xF0
892#define MISC_DEVICE_INDEX_SHIFT 4
893
894typedef struct _PIXEL_CLOCK_PARAMETERS_V2 {
895 USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
896 /* 0 means disable PPLL */
897 USHORT usRefDiv; /* Reference divider */
898 USHORT usFbDiv; /* feedback divider */
899 UCHAR ucPostDiv; /* post divider */
900 UCHAR ucFracFbDiv; /* fractional feedback divider */
901 UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
902 UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */
903 UCHAR ucCRTC; /* Which CRTC uses this Ppll */
904 UCHAR ucMiscInfo; /* Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog */
905} PIXEL_CLOCK_PARAMETERS_V2;
906
907/* Major revision=1., Minor revision=3, structure/definition change */
908/* ucEncoderMode: */
909/* ATOM_ENCODER_MODE_DP */
910/* ATOM_ENOCDER_MODE_LVDS */
911/* ATOM_ENOCDER_MODE_DVI */
912/* ATOM_ENOCDER_MODE_HDMI */
913/* ATOM_ENOCDER_MODE_SDVO */
914/* ATOM_ENCODER_MODE_TV 13 */
915/* ATOM_ENCODER_MODE_CV 14 */
916/* ATOM_ENCODER_MODE_CRT 15 */
917
918/* ucDVOConfig */
919/* #define DVO_ENCODER_CONFIG_RATE_SEL 0x01 */
920/* #define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 */
921/* #define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 */
922/* #define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c */
923/* #define DVO_ENCODER_CONFIG_LOW12BIT 0x00 */
924/* #define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 */
925/* #define DVO_ENCODER_CONFIG_24BIT 0x08 */
926
927/* ucMiscInfo: also changed, see below */
928#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01
929#define PIXEL_CLOCK_MISC_VGA_MODE 0x02
930#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04
931#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00
932#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04
933#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08
934
935typedef struct _PIXEL_CLOCK_PARAMETERS_V3 {
936 USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
937 /* 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. */
938 USHORT usRefDiv; /* Reference divider */
939 USHORT usFbDiv; /* feedback divider */
940 UCHAR ucPostDiv; /* post divider */
941 UCHAR ucFracFbDiv; /* fractional feedback divider */
942 UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
943 UCHAR ucTransmitterId; /* graphic encoder id defined in objectId.h */
944 union {
945 UCHAR ucEncoderMode; /* encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ */
946 UCHAR ucDVOConfig; /* when use DVO, need to know SDR/DDR, 12bit or 24bit */
947 };
948 UCHAR ucMiscInfo; /* bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel */
949 /* bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source */
950} PIXEL_CLOCK_PARAMETERS_V3;
951
952#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2
953#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST
954
955/****************************************************************************/
956/* Structures used by AdjustDisplayPllTable */
957/****************************************************************************/
958typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS {
959 USHORT usPixelClock;
960 UCHAR ucTransmitterID;
961 UCHAR ucEncodeMode;
962 union {
963 UCHAR ucDVOConfig; /* if DVO, need passing link rate and output 12bitlow or 24bit */
964 UCHAR ucConfig; /* if none DVO, not defined yet */
965 };
966 UCHAR ucReserved[3];
967} ADJUST_DISPLAY_PLL_PARAMETERS;
968
969#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10
970
971#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS
972
973/****************************************************************************/
974/* Structures used by EnableYUVTable */
975/****************************************************************************/
976typedef struct _ENABLE_YUV_PARAMETERS {
977 UCHAR ucEnable; /* ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) */
978 UCHAR ucCRTC; /* Which CRTC needs this YUV or RGB format */
979 UCHAR ucPadding[2];
980} ENABLE_YUV_PARAMETERS;
981#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS
982
983/****************************************************************************/
984/* Structures used by GetMemoryClockTable */
985/****************************************************************************/
986typedef struct _GET_MEMORY_CLOCK_PARAMETERS {
987 ULONG ulReturnMemoryClock; /* current memory speed in 10KHz unit */
988} GET_MEMORY_CLOCK_PARAMETERS;
989#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS
990
991/****************************************************************************/
992/* Structures used by GetEngineClockTable */
993/****************************************************************************/
994typedef struct _GET_ENGINE_CLOCK_PARAMETERS {
995 ULONG ulReturnEngineClock; /* current engine speed in 10KHz unit */
996} GET_ENGINE_CLOCK_PARAMETERS;
997#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS
998
999/****************************************************************************/
1000/* Following Structures and constant may be obsolete */
1001/****************************************************************************/
1002/* Maxium 8 bytes,the data read in will be placed in the parameter space. */
1003/* Read operaion successeful when the paramter space is non-zero, otherwise read operation failed */
1004typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS {
1005 USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
1006 USHORT usVRAMAddress; /* Adress in Frame Buffer where to pace raw EDID */
1007 USHORT usStatus; /* When use output: lower byte EDID checksum, high byte hardware status */
1008 /* WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte */
1009 UCHAR ucSlaveAddr; /* Read from which slave */
1010 UCHAR ucLineNumber; /* Read from which HW assisted line */
1011} READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
1012#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
1013
1014#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0
1015#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1
1016#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2
1017#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3
1018#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4
1019
1020typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS {
1021 USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
1022 USHORT usByteOffset; /* Write to which byte */
1023 /* Upper portion of usByteOffset is Format of data */
1024 /* 1bytePS+offsetPS */
1025 /* 2bytesPS+offsetPS */
1026 /* blockID+offsetPS */
1027 /* blockID+offsetID */
1028 /* blockID+counterID+offsetID */
1029 UCHAR ucData; /* PS data1 */
1030 UCHAR ucStatus; /* Status byte 1=success, 2=failure, Also is used as PS data2 */
1031 UCHAR ucSlaveAddr; /* Write to which slave */
1032 UCHAR ucLineNumber; /* Write from which HW assisted line */
1033} WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
1034
1035#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
1036
1037typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS {
1038 USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
1039 UCHAR ucSlaveAddr; /* Write to which slave */
1040 UCHAR ucLineNumber; /* Write from which HW assisted line */
1041} SET_UP_HW_I2C_DATA_PARAMETERS;
1042
1043/**************************************************************************/
1044#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
1045
1046/****************************************************************************/
1047/* Structures used by PowerConnectorDetectionTable */
1048/****************************************************************************/
1049typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS {
1050 UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */
1051 UCHAR ucPwrBehaviorId;
1052 USHORT usPwrBudget; /* how much power currently boot to in unit of watt */
1053} POWER_CONNECTOR_DETECTION_PARAMETERS;
1054
1055typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION {
1056 UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */
1057 UCHAR ucReserved;
1058 USHORT usPwrBudget; /* how much power currently boot to in unit of watt */
1059 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1060} POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
1061
1062/****************************LVDS SS Command Table Definitions**********************/
1063
1064/****************************************************************************/
1065/* Structures used by EnableSpreadSpectrumOnPPLLTable */
1066/****************************************************************************/
1067typedef struct _ENABLE_LVDS_SS_PARAMETERS {
1068 USHORT usSpreadSpectrumPercentage;
1069 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
1070 UCHAR ucSpreadSpectrumStepSize_Delay; /* bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY */
1071 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
1072 UCHAR ucPadding[3];
1073} ENABLE_LVDS_SS_PARAMETERS;
1074
1075/* ucTableFormatRevision=1,ucTableContentRevision=2 */
1076typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 {
1077 USHORT usSpreadSpectrumPercentage;
1078 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
1079 UCHAR ucSpreadSpectrumStep; /* */
1080 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
1081 UCHAR ucSpreadSpectrumDelay;
1082 UCHAR ucSpreadSpectrumRange;
1083 UCHAR ucPadding;
1084} ENABLE_LVDS_SS_PARAMETERS_V2;
1085
1086/* This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. */
1087typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL {
1088 USHORT usSpreadSpectrumPercentage;
1089 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
1090 UCHAR ucSpreadSpectrumStep; /* */
1091 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
1092 UCHAR ucSpreadSpectrumDelay;
1093 UCHAR ucSpreadSpectrumRange;
1094 UCHAR ucPpll; /* ATOM_PPLL1/ATOM_PPLL2 */
1095} ENABLE_SPREAD_SPECTRUM_ON_PPLL;
1096
1097#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL
1098
1099/**************************************************************************/
1100
1101typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION {
1102 PIXEL_CLOCK_PARAMETERS sPCLKInput;
1103 ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved; /* Caller doesn't need to init this portion */
1104} SET_PIXEL_CLOCK_PS_ALLOCATION;
1105
1106#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION
1107
1108/****************************************************************************/
1109/* Structures used by ### */
1110/****************************************************************************/
1111typedef struct _MEMORY_TRAINING_PARAMETERS {
1112 ULONG ulTargetMemoryClock; /* In 10Khz unit */
1113} MEMORY_TRAINING_PARAMETERS;
1114#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS
1115
1116/****************************LVDS and other encoder command table definitions **********************/
1117
1118/****************************************************************************/
1119/* Structures used by LVDSEncoderControlTable (Before DCE30) */
1120/* LVTMAEncoderControlTable (Before DCE30) */
1121/* TMDSAEncoderControlTable (Before DCE30) */
1122/****************************************************************************/
1123typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS {
1124 USHORT usPixelClock; /* in 10KHz; for bios convenient */
1125 UCHAR ucMisc; /* bit0=0: Enable single link */
1126 /* =1: Enable dual link */
1127 /* Bit1=0: 666RGB */
1128 /* =1: 888RGB */
1129 UCHAR ucAction; /* 0: turn off encoder */
1130 /* 1: setup and turn on encoder */
1131} LVDS_ENCODER_CONTROL_PARAMETERS;
1132
1133#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS
1134
1135#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS
1136#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS
1137
1138#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS
1139#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS
1140
1141/* ucTableFormatRevision=1,ucTableContentRevision=2 */
1142typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
1143 USHORT usPixelClock; /* in 10KHz; for bios convenient */
1144 UCHAR ucMisc; /* see PANEL_ENCODER_MISC_xx defintions below */
1145 UCHAR ucAction; /* 0: turn off encoder */
1146 /* 1: setup and turn on encoder */
1147 UCHAR ucTruncate; /* bit0=0: Disable truncate */
1148 /* =1: Enable truncate */
1149 /* bit4=0: 666RGB */
1150 /* =1: 888RGB */
1151 UCHAR ucSpatial; /* bit0=0: Disable spatial dithering */
1152 /* =1: Enable spatial dithering */
1153 /* bit4=0: 666RGB */
1154 /* =1: 888RGB */
1155 UCHAR ucTemporal; /* bit0=0: Disable temporal dithering */
1156 /* =1: Enable temporal dithering */
1157 /* bit4=0: 666RGB */
1158 /* =1: 888RGB */
1159 /* bit5=0: Gray level 2 */
1160 /* =1: Gray level 4 */
1161 UCHAR ucFRC; /* bit4=0: 25FRC_SEL pattern E */
1162 /* =1: 25FRC_SEL pattern F */
1163 /* bit6:5=0: 50FRC_SEL pattern A */
1164 /* =1: 50FRC_SEL pattern B */
1165 /* =2: 50FRC_SEL pattern C */
1166 /* =3: 50FRC_SEL pattern D */
1167 /* bit7=0: 75FRC_SEL pattern E */
1168 /* =1: 75FRC_SEL pattern F */
1169} LVDS_ENCODER_CONTROL_PARAMETERS_V2;
1170
1171#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1172
1173#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1174#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
1175
1176#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
1177#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2
1178
1179#define LVDS_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1180#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
1181
1182#define TMDS1_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
1183#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS1_ENCODER_CONTROL_PARAMETERS_V3
1184
1185#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
1186#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3
1187
1188/****************************************************************************/
1189/* Structures used by ### */
1190/****************************************************************************/
1191typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS {
1192 UCHAR ucEnable; /* Enable or Disable External TMDS encoder */
1193 UCHAR ucMisc; /* Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} */
1194 UCHAR ucPadding[2];
1195} ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
1196
1197typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION {
1198 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder;
1199 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
1200} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
1201
1202#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
1203
1204typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 {
1205 ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder;
1206 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
1207} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
1208
1209typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION {
1210 DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder;
1211 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1212} EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
1213
1214/****************************************************************************/
1215/* Structures used by DVOEncoderControlTable */
1216/****************************************************************************/
1217/* ucTableFormatRevision=1,ucTableContentRevision=3 */
1218
1219/* ucDVOConfig: */
1220#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
1221#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
1222#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
1223#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c
1224#define DVO_ENCODER_CONFIG_LOW12BIT 0x00
1225#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
1226#define DVO_ENCODER_CONFIG_24BIT 0x08
1227
1228typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
1229 USHORT usPixelClock;
1230 UCHAR ucDVOConfig;
1231 UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */
1232 UCHAR ucReseved[4];
1233} DVO_ENCODER_CONTROL_PARAMETERS_V3;
1234#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3
1235
1236/* ucTableFormatRevision=1 */
1237/* ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for */
1238/* bit1=0: non-coherent mode */
1239/* =1: coherent mode */
1240
1241/* ========================================================================================== */
1242/* Only change is here next time when changing encoder parameter definitions again! */
1243#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
1244#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST
1245
1246#define TMDS1_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
1247#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS1_ENCODER_CONTROL_PARAMETERS_LAST
1248
1249#define TMDS2_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
1250#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_LAST TMDS2_ENCODER_CONTROL_PARAMETERS_LAST
1251
1252#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS
1253#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION
1254
1255/* ========================================================================================== */
1256#define PANEL_ENCODER_MISC_DUAL 0x01
1257#define PANEL_ENCODER_MISC_COHERENT 0x02
1258#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04
1259#define PANEL_ENCODER_MISC_HDMI_TYPE 0x08
1260
1261#define PANEL_ENCODER_ACTION_DISABLE ATOM_DISABLE
1262#define PANEL_ENCODER_ACTION_ENABLE ATOM_ENABLE
1263#define PANEL_ENCODER_ACTION_COHERENTSEQ (ATOM_ENABLE+1)
1264
1265#define PANEL_ENCODER_TRUNCATE_EN 0x01
1266#define PANEL_ENCODER_TRUNCATE_DEPTH 0x10
1267#define PANEL_ENCODER_SPATIAL_DITHER_EN 0x01
1268#define PANEL_ENCODER_SPATIAL_DITHER_DEPTH 0x10
1269#define PANEL_ENCODER_TEMPORAL_DITHER_EN 0x01
1270#define PANEL_ENCODER_TEMPORAL_DITHER_DEPTH 0x10
1271#define PANEL_ENCODER_TEMPORAL_LEVEL_4 0x20
1272#define PANEL_ENCODER_25FRC_MASK 0x10
1273#define PANEL_ENCODER_25FRC_E 0x00
1274#define PANEL_ENCODER_25FRC_F 0x10
1275#define PANEL_ENCODER_50FRC_MASK 0x60
1276#define PANEL_ENCODER_50FRC_A 0x00
1277#define PANEL_ENCODER_50FRC_B 0x20
1278#define PANEL_ENCODER_50FRC_C 0x40
1279#define PANEL_ENCODER_50FRC_D 0x60
1280#define PANEL_ENCODER_75FRC_MASK 0x80
1281#define PANEL_ENCODER_75FRC_E 0x00
1282#define PANEL_ENCODER_75FRC_F 0x80
1283
1284/****************************************************************************/
1285/* Structures used by SetVoltageTable */
1286/****************************************************************************/
1287#define SET_VOLTAGE_TYPE_ASIC_VDDC 1
1288#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2
1289#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3
1290#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4
1291#define SET_VOLTAGE_INIT_MODE 5
1292#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 /* Gets the Max. voltage for the soldered Asic */
1293
1294#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1
1295#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2
1296#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4
1297
1298#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0
1299#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
1300#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2
1301
1302typedef struct _SET_VOLTAGE_PARAMETERS {
1303 UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
1304 UCHAR ucVoltageMode; /* To set all, to set source A or source B or ... */
1305 UCHAR ucVoltageIndex; /* An index to tell which voltage level */
1306 UCHAR ucReserved;
1307} SET_VOLTAGE_PARAMETERS;
1308
1309typedef struct _SET_VOLTAGE_PARAMETERS_V2 {
1310 UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
1311 UCHAR ucVoltageMode; /* Not used, maybe use for state machine for differen power mode */
1312 USHORT usVoltageLevel; /* real voltage level */
1313} SET_VOLTAGE_PARAMETERS_V2;
1314
1315typedef struct _SET_VOLTAGE_PS_ALLOCATION {
1316 SET_VOLTAGE_PARAMETERS sASICSetVoltage;
1317 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
1318} SET_VOLTAGE_PS_ALLOCATION;
1319
1320/****************************************************************************/
1321/* Structures used by TVEncoderControlTable */
1322/****************************************************************************/
1323typedef struct _TV_ENCODER_CONTROL_PARAMETERS {
1324 USHORT usPixelClock; /* in 10KHz; for bios convenient */
1325 UCHAR ucTvStandard; /* See definition "ATOM_TV_NTSC ..." */
1326 UCHAR ucAction; /* 0: turn off encoder */
1327 /* 1: setup and turn on encoder */
1328} TV_ENCODER_CONTROL_PARAMETERS;
1329
1330typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION {
1331 TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
1332 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Don't set this one */
1333} TV_ENCODER_CONTROL_PS_ALLOCATION;
1334
1335/* ==============================Data Table Portion==================================== */
1336
1337#ifdef UEFI_BUILD
1338#define UTEMP USHORT
1339#define USHORT void*
1340#endif
1341
1342/****************************************************************************/
1343/* Structure used in Data.mtb */
1344/****************************************************************************/
1345typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES {
1346 USHORT UtilityPipeLine; /* Offest for the utility to get parser info,Don't change this position! */
1347 USHORT MultimediaCapabilityInfo; /* Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios */
1348 USHORT MultimediaConfigInfo; /* Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios */
1349 USHORT StandardVESA_Timing; /* Only used by Bios */
1350 USHORT FirmwareInfo; /* Shared by various SW components,latest version 1.4 */
1351 USHORT DAC_Info; /* Will be obsolete from R600 */
1352 USHORT LVDS_Info; /* Shared by various SW components,latest version 1.1 */
1353 USHORT TMDS_Info; /* Will be obsolete from R600 */
1354 USHORT AnalogTV_Info; /* Shared by various SW components,latest version 1.1 */
1355 USHORT SupportedDevicesInfo; /* Will be obsolete from R600 */
1356 USHORT GPIO_I2C_Info; /* Shared by various SW components,latest version 1.2 will be used from R600 */
1357 USHORT VRAM_UsageByFirmware; /* Shared by various SW components,latest version 1.3 will be used from R600 */
1358 USHORT GPIO_Pin_LUT; /* Shared by various SW components,latest version 1.1 */
1359 USHORT VESA_ToInternalModeLUT; /* Only used by Bios */
1360 USHORT ComponentVideoInfo; /* Shared by various SW components,latest version 2.1 will be used from R600 */
1361 USHORT PowerPlayInfo; /* Shared by various SW components,latest version 2.1,new design from R600 */
1362 USHORT CompassionateData; /* Will be obsolete from R600 */
1363 USHORT SaveRestoreInfo; /* Only used by Bios */
1364 USHORT PPLL_SS_Info; /* Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info */
1365 USHORT OemInfo; /* Defined and used by external SW, should be obsolete soon */
1366 USHORT XTMDS_Info; /* Will be obsolete from R600 */
1367 USHORT MclkSS_Info; /* Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used */
1368 USHORT Object_Header; /* Shared by various SW components,latest version 1.1 */
1369 USHORT IndirectIOAccess; /* Only used by Bios,this table position can't change at all!! */
1370 USHORT MC_InitParameter; /* Only used by command table */
1371 USHORT ASIC_VDDC_Info; /* Will be obsolete from R600 */
1372 USHORT ASIC_InternalSS_Info; /* New tabel name from R600, used to be called "ASIC_MVDDC_Info" */
1373 USHORT TV_VideoMode; /* Only used by command table */
1374 USHORT VRAM_Info; /* Only used by command table, latest version 1.3 */
1375 USHORT MemoryTrainingInfo; /* Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 */
1376 USHORT IntegratedSystemInfo; /* Shared by various SW components */
1377 USHORT ASIC_ProfilingInfo; /* New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 */
1378 USHORT VoltageObjectInfo; /* Shared by various SW components, latest version 1.1 */
1379 USHORT PowerSourceInfo; /* Shared by various SW components, latest versoin 1.1 */
1380} ATOM_MASTER_LIST_OF_DATA_TABLES;
1381
1382#ifdef UEFI_BUILD
1383#define USHORT UTEMP
1384#endif
1385
1386typedef struct _ATOM_MASTER_DATA_TABLE {
1387 ATOM_COMMON_TABLE_HEADER sHeader;
1388 ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
1389} ATOM_MASTER_DATA_TABLE;
1390
1391/****************************************************************************/
1392/* Structure used in MultimediaCapabilityInfoTable */
1393/****************************************************************************/
1394typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO {
1395 ATOM_COMMON_TABLE_HEADER sHeader;
1396 ULONG ulSignature; /* HW info table signature string "$ATI" */
1397 UCHAR ucI2C_Type; /* I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) */
1398 UCHAR ucTV_OutInfo; /* Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) */
1399 UCHAR ucVideoPortInfo; /* Provides the video port capabilities */
1400 UCHAR ucHostPortInfo; /* Provides host port configuration information */
1401} ATOM_MULTIMEDIA_CAPABILITY_INFO;
1402
1403/****************************************************************************/
1404/* Structure used in MultimediaConfigInfoTable */
1405/****************************************************************************/
1406typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO {
1407 ATOM_COMMON_TABLE_HEADER sHeader;
1408 ULONG ulSignature; /* MM info table signature sting "$MMT" */
1409 UCHAR ucTunerInfo; /* Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) */
1410 UCHAR ucAudioChipInfo; /* List the audio chip type (3:0) product type (4) and OEM revision (7:5) */
1411 UCHAR ucProductID; /* Defines as OEM ID or ATI board ID dependent on product type setting */
1412 UCHAR ucMiscInfo1; /* Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) */
1413 UCHAR ucMiscInfo2; /* I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) */
1414 UCHAR ucMiscInfo3; /* Video Decoder Type (3:0) Video In Standard/Crystal (7:4) */
1415 UCHAR ucMiscInfo4; /* Video Decoder Host Config (2:0) reserved (7:3) */
1416 UCHAR ucVideoInput0Info; /* Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1417 UCHAR ucVideoInput1Info; /* Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1418 UCHAR ucVideoInput2Info; /* Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1419 UCHAR ucVideoInput3Info; /* Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1420 UCHAR ucVideoInput4Info; /* Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
1421} ATOM_MULTIMEDIA_CONFIG_INFO;
1422
1423/****************************************************************************/
1424/* Structures used in FirmwareInfoTable */
1425/****************************************************************************/
1426
1427/* usBIOSCapability Defintion: */
1428/* Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */
1429/* Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */
1430/* Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */
1431/* Others: Reserved */
1432#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001
1433#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002
1434#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004
1435#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008
1436#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010
1437#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020
1438#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040
1439#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080
1440#define ATOM_BIOS_INFO_HYPERMEMORY_SUPPORT 0x0100
1441#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00
1442#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000
1443#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000
1444
1445#ifndef _H2INC
1446
1447/* Please don't add or expand this bitfield structure below, this one will retire soon.! */
1448typedef struct _ATOM_FIRMWARE_CAPABILITY {
1449#if ATOM_BIG_ENDIAN
1450 USHORT Reserved:3;
1451 USHORT HyperMemory_Size:4;
1452 USHORT HyperMemory_Support:1;
1453 USHORT PPMode_Assigned:1;
1454 USHORT WMI_SUPPORT:1;
1455 USHORT GPUControlsBL:1;
1456 USHORT EngineClockSS_Support:1;
1457 USHORT MemoryClockSS_Support:1;
1458 USHORT ExtendedDesktopSupport:1;
1459 USHORT DualCRTC_Support:1;
1460 USHORT FirmwarePosted:1;
1461#else
1462 USHORT FirmwarePosted:1;
1463 USHORT DualCRTC_Support:1;
1464 USHORT ExtendedDesktopSupport:1;
1465 USHORT MemoryClockSS_Support:1;
1466 USHORT EngineClockSS_Support:1;
1467 USHORT GPUControlsBL:1;
1468 USHORT WMI_SUPPORT:1;
1469 USHORT PPMode_Assigned:1;
1470 USHORT HyperMemory_Support:1;
1471 USHORT HyperMemory_Size:4;
1472 USHORT Reserved:3;
1473#endif
1474} ATOM_FIRMWARE_CAPABILITY;
1475
1476typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS {
1477 ATOM_FIRMWARE_CAPABILITY sbfAccess;
1478 USHORT susAccess;
1479} ATOM_FIRMWARE_CAPABILITY_ACCESS;
1480
1481#else
1482
1483typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS {
1484 USHORT susAccess;
1485} ATOM_FIRMWARE_CAPABILITY_ACCESS;
1486
1487#endif
1488
1489typedef struct _ATOM_FIRMWARE_INFO {
1490 ATOM_COMMON_TABLE_HEADER sHeader;
1491 ULONG ulFirmwareRevision;
1492 ULONG ulDefaultEngineClock; /* In 10Khz unit */
1493 ULONG ulDefaultMemoryClock; /* In 10Khz unit */
1494 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
1495 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
1496 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
1497 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
1498 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
1499 ULONG ulASICMaxEngineClock; /* In 10Khz unit */
1500 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
1501 UCHAR ucASICMaxTemperature;
1502 UCHAR ucPadding[3]; /* Don't use them */
1503 ULONG aulReservedForBIOS[3]; /* Don't use them */
1504 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
1505 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
1506 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
1507 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
1508 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
1509 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
1510 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
1511 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
1512 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
1513 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit, the definitions above can't change!!! */
1514 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1515 USHORT usReferenceClock; /* In 10Khz unit */
1516 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
1517 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
1518 UCHAR ucDesign_ID; /* Indicate what is the board design */
1519 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
1520} ATOM_FIRMWARE_INFO;
1521
1522typedef struct _ATOM_FIRMWARE_INFO_V1_2 {
1523 ATOM_COMMON_TABLE_HEADER sHeader;
1524 ULONG ulFirmwareRevision;
1525 ULONG ulDefaultEngineClock; /* In 10Khz unit */
1526 ULONG ulDefaultMemoryClock; /* In 10Khz unit */
1527 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
1528 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
1529 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
1530 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
1531 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
1532 ULONG ulASICMaxEngineClock; /* In 10Khz unit */
1533 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
1534 UCHAR ucASICMaxTemperature;
1535 UCHAR ucMinAllowedBL_Level;
1536 UCHAR ucPadding[2]; /* Don't use them */
1537 ULONG aulReservedForBIOS[2]; /* Don't use them */
1538 ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
1539 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
1540 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
1541 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
1542 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
1543 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
1544 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
1545 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
1546 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
1547 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
1548 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
1549 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1550 USHORT usReferenceClock; /* In 10Khz unit */
1551 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
1552 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
1553 UCHAR ucDesign_ID; /* Indicate what is the board design */
1554 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
1555} ATOM_FIRMWARE_INFO_V1_2;
1556
1557typedef struct _ATOM_FIRMWARE_INFO_V1_3 {
1558 ATOM_COMMON_TABLE_HEADER sHeader;
1559 ULONG ulFirmwareRevision;
1560 ULONG ulDefaultEngineClock; /* In 10Khz unit */
1561 ULONG ulDefaultMemoryClock; /* In 10Khz unit */
1562 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
1563 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
1564 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
1565 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
1566 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
1567 ULONG ulASICMaxEngineClock; /* In 10Khz unit */
1568 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
1569 UCHAR ucASICMaxTemperature;
1570 UCHAR ucMinAllowedBL_Level;
1571 UCHAR ucPadding[2]; /* Don't use them */
1572 ULONG aulReservedForBIOS; /* Don't use them */
1573 ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */
1574 ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
1575 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
1576 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
1577 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
1578 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
1579 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
1580 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
1581 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
1582 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
1583 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
1584 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
1585 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1586 USHORT usReferenceClock; /* In 10Khz unit */
1587 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
1588 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
1589 UCHAR ucDesign_ID; /* Indicate what is the board design */
1590 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
1591} ATOM_FIRMWARE_INFO_V1_3;
1592
1593typedef struct _ATOM_FIRMWARE_INFO_V1_4 {
1594 ATOM_COMMON_TABLE_HEADER sHeader;
1595 ULONG ulFirmwareRevision;
1596 ULONG ulDefaultEngineClock; /* In 10Khz unit */
1597 ULONG ulDefaultMemoryClock; /* In 10Khz unit */
1598 ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
1599 ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
1600 ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
1601 ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
1602 ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
1603 ULONG ulASICMaxEngineClock; /* In 10Khz unit */
1604 ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
1605 UCHAR ucASICMaxTemperature;
1606 UCHAR ucMinAllowedBL_Level;
1607 USHORT usBootUpVDDCVoltage; /* In MV unit */
1608 USHORT usLcdMinPixelClockPLL_Output; /* In MHz unit */
1609 USHORT usLcdMaxPixelClockPLL_Output; /* In MHz unit */
1610 ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */
1611 ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
1612 USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
1613 USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
1614 USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
1615 USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
1616 USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
1617 USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
1618 USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
1619 USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
1620 USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
1621 USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
1622 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
1623 USHORT usReferenceClock; /* In 10Khz unit */
1624 USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
1625 UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
1626 UCHAR ucDesign_ID; /* Indicate what is the board design */
1627 UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
1628} ATOM_FIRMWARE_INFO_V1_4;
1629
1630#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V1_4
1631
1632/****************************************************************************/
1633/* Structures used in IntegratedSystemInfoTable */
1634/****************************************************************************/
1635#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2
1636#define IGP_CAP_FLAG_AC_CARD 0x4
1637#define IGP_CAP_FLAG_SDVO_CARD 0x8
1638#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10
1639
1640typedef struct _ATOM_INTEGRATED_SYSTEM_INFO {
1641 ATOM_COMMON_TABLE_HEADER sHeader;
1642 ULONG ulBootUpEngineClock; /* in 10kHz unit */
1643 ULONG ulBootUpMemoryClock; /* in 10kHz unit */
1644 ULONG ulMaxSystemMemoryClock; /* in 10kHz unit */
1645 ULONG ulMinSystemMemoryClock; /* in 10kHz unit */
1646 UCHAR ucNumberOfCyclesInPeriodHi;
1647 UCHAR ucLCDTimingSel; /* =0:not valid.!=0 sel this timing descriptor from LCD EDID. */
1648 USHORT usReserved1;
1649 USHORT usInterNBVoltageLow; /* An intermidiate PMW value to set the voltage */
1650 USHORT usInterNBVoltageHigh; /* Another intermidiate PMW value to set the voltage */
1651 ULONG ulReserved[2];
1652
1653 USHORT usFSBClock; /* In MHz unit */
1654 USHORT usCapabilityFlag; /* Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable */
1655 /* Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card */
1656 /* Bit[4]==1: P/2 mode, ==0: P/1 mode */
1657 USHORT usPCIENBCfgReg7; /* bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal */
1658 USHORT usK8MemoryClock; /* in MHz unit */
1659 USHORT usK8SyncStartDelay; /* in 0.01 us unit */
1660 USHORT usK8DataReturnTime; /* in 0.01 us unit */
1661 UCHAR ucMaxNBVoltage;
1662 UCHAR ucMinNBVoltage;
1663 UCHAR ucMemoryType; /* [7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved */
1664 UCHAR ucNumberOfCyclesInPeriod; /* CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod */
1665 UCHAR ucStartingPWM_HighTime; /* CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime */
1666 UCHAR ucHTLinkWidth; /* 16 bit vs. 8 bit */
1667 UCHAR ucMaxNBVoltageHigh;
1668 UCHAR ucMinNBVoltageHigh;
1669} ATOM_INTEGRATED_SYSTEM_INFO;
1670
1671/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO
1672ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock
1673 For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock
1674ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
1675 For AMD IGP,for now this can be 0
1676ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
1677 For AMD IGP,for now this can be 0
1678
1679usFSBClock: For Intel IGP,it's FSB Freq
1680 For AMD IGP,it's HT Link Speed
1681
1682usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200
1683usK8SyncStartDelay: For AMD IGP only. Memory access latency in K8, required for watermark calculation
1684usK8DataReturnTime: For AMD IGP only. Memory access latency in K8, required for watermark calculation
1685
1686VC:Voltage Control
1687ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
1688ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
1689
1690ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value.
1691ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0
1692
1693ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
1694ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
1695
1696usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
1697usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
1698*/
1699
1700/*
1701The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST;
1702Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need.
1703The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries.
1704
1705SW components can access the IGP system infor structure in the same way as before
1706*/
1707
1708typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 {
1709 ATOM_COMMON_TABLE_HEADER sHeader;
1710 ULONG ulBootUpEngineClock; /* in 10kHz unit */
1711 ULONG ulReserved1[2]; /* must be 0x0 for the reserved */
1712 ULONG ulBootUpUMAClock; /* in 10kHz unit */
1713 ULONG ulBootUpSidePortClock; /* in 10kHz unit */
1714 ULONG ulMinSidePortClock; /* in 10kHz unit */
1715 ULONG ulReserved2[6]; /* must be 0x0 for the reserved */
1716 ULONG ulSystemConfig; /* see explanation below */
1717 ULONG ulBootUpReqDisplayVector;
1718 ULONG ulOtherDisplayMisc;
1719 ULONG ulDDISlot1Config;
1720 ULONG ulDDISlot2Config;
1721 UCHAR ucMemoryType; /* [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved */
1722 UCHAR ucUMAChannelNumber;
1723 UCHAR ucDockingPinBit;
1724 UCHAR ucDockingPinPolarity;
1725 ULONG ulDockingPinCFGInfo;
1726 ULONG ulCPUCapInfo;
1727 USHORT usNumberOfCyclesInPeriod;
1728 USHORT usMaxNBVoltage;
1729 USHORT usMinNBVoltage;
1730 USHORT usBootUpNBVoltage;
1731 ULONG ulHTLinkFreq; /* in 10Khz */
1732 USHORT usMinHTLinkWidth;
1733 USHORT usMaxHTLinkWidth;
1734 USHORT usUMASyncStartDelay;
1735 USHORT usUMADataReturnTime;
1736 USHORT usLinkStatusZeroTime;
1737 USHORT usReserved;
1738 ULONG ulHighVoltageHTLinkFreq; /* in 10Khz */
1739 ULONG ulLowVoltageHTLinkFreq; /* in 10Khz */
1740 USHORT usMaxUpStreamHTLinkWidth;
1741 USHORT usMaxDownStreamHTLinkWidth;
1742 USHORT usMinUpStreamHTLinkWidth;
1743 USHORT usMinDownStreamHTLinkWidth;
1744 ULONG ulReserved3[97]; /* must be 0x0 */
1745} ATOM_INTEGRATED_SYSTEM_INFO_V2;
1746
1747/*
1748ulBootUpEngineClock: Boot-up Engine Clock in 10Khz;
1749ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present
1750ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock
1751
1752ulSystemConfig:
1753Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode;
1754Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state
1755 =0: system boots up at driver control state. Power state depends on PowerPlay table.
1756Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used.
1757Bit[3]=1: Only one power state(Performance) will be supported.
1758 =0: Multiple power states supported from PowerPlay table.
1759Bit[4]=1: CLMC is supported and enabled on current system.
1760 =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.
1761Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.
1762 =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied.
1763Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored.
1764 =0: Voltage settings is determined by powerplay table.
1765Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue.
1766 =0: Enable CLMC as regular mode, CDLD and CILR will be enabled.
1767
1768ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions.
1769
1770ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion;
1771 [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition;
1772
1773ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design).
1774 [3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
1775 [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
1776 [15:8] - Lane configuration attribute;
1777 [23:16]- Connector type, possible value:
1778 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D
1779 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D
1780 CONNECTOR_OBJECT_ID_HDMI_TYPE_A
1781 CONNECTOR_OBJECT_ID_DISPLAYPORT
1782 [31:24]- Reserved
1783
1784ulDDISlot2Config: Same as Slot1.
1785ucMemoryType: SidePort memory type, set it to 0x0 when Sideport memory is not installed. Driver needs this info to change sideport memory clock. Not for display in CCC.
1786For IGP, Hypermemory is the only memory type showed in CCC.
1787
1788ucUMAChannelNumber: how many channels for the UMA;
1789
1790ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin
1791ucDockingPinBit: which bit in this register to read the pin status;
1792ucDockingPinPolarity:Polarity of the pin when docked;
1793
1794ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
1795
1796usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
1797usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode.
1798usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode.
1799 GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0
1800 PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1
1801 GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE
1802usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value.
1803
1804ulHTLinkFreq: Bootup HT link Frequency in 10Khz.
1805usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth.
1806 If CDLW enabled, both upstream and downstream width should be the same during bootup.
1807usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
1808 If CDLW enabled, both upstream and downstream width should be the same during bootup.
1809
1810usUMASyncStartDelay: Memory access latency, required for watermark calculation
1811usUMADataReturnTime: Memory access latency, required for watermark calculation
1812usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us
1813for Griffin or Greyhound. SBIOS needs to convert to actual time by:
1814 if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us)
1815 if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us)
1816 if T0Ttime [5:4]=10b, then usLinkStatusZeroTime=T0Ttime [3:0]*2.0us (0.0 to 30us)
1817 if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us)
1818
1819ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0.
1820 This must be less than or equal to ulHTLinkFreq(bootup frequency).
1821ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0.
1822 This must be less than or equal to ulHighVoltageHTLinkFreq.
1823
1824usMaxUpStreamHTLinkWidth: Asymmetric link width support in the future, to replace usMaxHTLinkWidth. Not used for now.
1825usMaxDownStreamHTLinkWidth: same as above.
1826usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to replace usMinHTLinkWidth. Not used for now.
1827usMinDownStreamHTLinkWidth: same as above.
1828*/
1829
1830#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
1831#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
1832#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004
1833#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008
1834#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010
1835#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020
1836#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040
1837#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080
1838
1839#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF
1840
1841#define b0IGP_DDI_SLOT_LANE_MAP_MASK 0x0F
1842#define b0IGP_DDI_SLOT_DOCKING_LANE_MAP_MASK 0xF0
1843#define b0IGP_DDI_SLOT_CONFIG_LANE_0_3 0x01
1844#define b0IGP_DDI_SLOT_CONFIG_LANE_4_7 0x02
1845#define b0IGP_DDI_SLOT_CONFIG_LANE_8_11 0x04
1846#define b0IGP_DDI_SLOT_CONFIG_LANE_12_15 0x08
1847
1848#define IGP_DDI_SLOT_ATTRIBUTE_MASK 0x0000FF00
1849#define IGP_DDI_SLOT_CONFIG_REVERSED 0x00000100
1850#define b1IGP_DDI_SLOT_CONFIG_REVERSED 0x01
1851
1852#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000
1853
1854#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000
1855#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001
1856#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002
1857#define ATOM_DFP_INT_ENCODER1_INDEX 0x00000003
1858#define ATOM_CRT_INT_ENCODER2_INDEX 0x00000004
1859#define ATOM_LCD_EXT_ENCODER1_INDEX 0x00000005
1860#define ATOM_TV_EXT_ENCODER1_INDEX 0x00000006
1861#define ATOM_DFP_EXT_ENCODER1_INDEX 0x00000007
1862#define ATOM_CV_INT_ENCODER1_INDEX 0x00000008
1863#define ATOM_DFP_INT_ENCODER2_INDEX 0x00000009
1864#define ATOM_CRT_EXT_ENCODER1_INDEX 0x0000000A
1865#define ATOM_CV_EXT_ENCODER1_INDEX 0x0000000B
1866#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C
1867#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D
1868
1869/* define ASIC internal encoder id ( bit vector ) */
1870#define ASIC_INT_DAC1_ENCODER_ID 0x00
1871#define ASIC_INT_TV_ENCODER_ID 0x02
1872#define ASIC_INT_DIG1_ENCODER_ID 0x03
1873#define ASIC_INT_DAC2_ENCODER_ID 0x04
1874#define ASIC_EXT_TV_ENCODER_ID 0x06
1875#define ASIC_INT_DVO_ENCODER_ID 0x07
1876#define ASIC_INT_DIG2_ENCODER_ID 0x09
1877#define ASIC_EXT_DIG_ENCODER_ID 0x05
1878
1879/* define Encoder attribute */
1880#define ATOM_ANALOG_ENCODER 0
1881#define ATOM_DIGITAL_ENCODER 1
1882
1883#define ATOM_DEVICE_CRT1_INDEX 0x00000000
1884#define ATOM_DEVICE_LCD1_INDEX 0x00000001
1885#define ATOM_DEVICE_TV1_INDEX 0x00000002
1886#define ATOM_DEVICE_DFP1_INDEX 0x00000003
1887#define ATOM_DEVICE_CRT2_INDEX 0x00000004
1888#define ATOM_DEVICE_LCD2_INDEX 0x00000005
1889#define ATOM_DEVICE_TV2_INDEX 0x00000006
1890#define ATOM_DEVICE_DFP2_INDEX 0x00000007
1891#define ATOM_DEVICE_CV_INDEX 0x00000008
1892#define ATOM_DEVICE_DFP3_INDEX 0x00000009
1893#define ATOM_DEVICE_DFP4_INDEX 0x0000000A
1894#define ATOM_DEVICE_DFP5_INDEX 0x0000000B
1895#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C
1896#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D
1897#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E
1898#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F
1899#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1)
1900#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO
1901#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1)
1902
1903#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1)
1904
1905#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX)
1906#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX)
1907#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX)
1908#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX)
1909#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX)
1910#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX)
1911#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX)
1912#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX)
1913#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX)
1914#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX)
1915#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX )
1916#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX)
1917
1918#define ATOM_DEVICE_CRT_SUPPORT \
1919 (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
1920#define ATOM_DEVICE_DFP_SUPPORT \
1921 (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | \
1922 ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | \
1923 ATOM_DEVICE_DFP5_SUPPORT)
1924#define ATOM_DEVICE_TV_SUPPORT \
1925 (ATOM_DEVICE_TV1_SUPPORT | ATOM_DEVICE_TV2_SUPPORT)
1926#define ATOM_DEVICE_LCD_SUPPORT \
1927 (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
1928
1929#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0
1930#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004
1931#define ATOM_DEVICE_CONNECTOR_VGA 0x00000001
1932#define ATOM_DEVICE_CONNECTOR_DVI_I 0x00000002
1933#define ATOM_DEVICE_CONNECTOR_DVI_D 0x00000003
1934#define ATOM_DEVICE_CONNECTOR_DVI_A 0x00000004
1935#define ATOM_DEVICE_CONNECTOR_SVIDEO 0x00000005
1936#define ATOM_DEVICE_CONNECTOR_COMPOSITE 0x00000006
1937#define ATOM_DEVICE_CONNECTOR_LVDS 0x00000007
1938#define ATOM_DEVICE_CONNECTOR_DIGI_LINK 0x00000008
1939#define ATOM_DEVICE_CONNECTOR_SCART 0x00000009
1940#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_A 0x0000000A
1941#define ATOM_DEVICE_CONNECTOR_HDMI_TYPE_B 0x0000000B
1942#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E
1943#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F
1944
1945#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F
1946#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000
1947#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000
1948#define ATOM_DEVICE_DAC_INFO_DACA 0x00000001
1949#define ATOM_DEVICE_DAC_INFO_DACB 0x00000002
1950#define ATOM_DEVICE_DAC_INFO_EXDAC 0x00000003
1951
1952#define ATOM_DEVICE_I2C_ID_NOI2C 0x00000000
1953
1954#define ATOM_DEVICE_I2C_LINEMUX_MASK 0x0000000F
1955#define ATOM_DEVICE_I2C_LINEMUX_SHIFT 0x00000000
1956
1957#define ATOM_DEVICE_I2C_ID_MASK 0x00000070
1958#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004
1959#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001
1960#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002
1961#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 /* For IGP RS600 */
1962#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 /* For IGP RS690 */
1963
1964#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080
1965#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007
1966#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000
1967#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001
1968
1969/* usDeviceSupport: */
1970/* Bits0 = 0 - no CRT1 support= 1- CRT1 is supported */
1971/* Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported */
1972/* Bit 2 = 0 - no TV1 support= 1- TV1 is supported */
1973/* Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported */
1974/* Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported */
1975/* Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported */
1976/* Bit 6 = 0 - no TV2 support= 1- TV2 is supported */
1977/* Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported */
1978/* Bit 8 = 0 - no CV support= 1- CV is supported */
1979/* Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported */
1980/* Byte1 (Supported Device Info) */
1981/* Bit 0 = = 0 - no CV support= 1- CV is supported */
1982/* */
1983/* */
1984
1985/* ucI2C_ConfigID */
1986/* [7:0] - I2C LINE Associate ID */
1987/* = 0 - no I2C */
1988/* [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) */
1989/* = 0, [6:0]=SW assisted I2C ID */
1990/* [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use */
1991/* = 2, HW engine for Multimedia use */
1992/* = 3-7 Reserved for future I2C engines */
1993/* [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C */
1994
1995typedef struct _ATOM_I2C_ID_CONFIG {
1996#if ATOM_BIG_ENDIAN
1997 UCHAR bfHW_Capable:1;
1998 UCHAR bfHW_EngineID:3;
1999 UCHAR bfI2C_LineMux:4;
2000#else
2001 UCHAR bfI2C_LineMux:4;
2002 UCHAR bfHW_EngineID:3;
2003 UCHAR bfHW_Capable:1;
2004#endif
2005} ATOM_I2C_ID_CONFIG;
2006
2007typedef union _ATOM_I2C_ID_CONFIG_ACCESS {
2008 ATOM_I2C_ID_CONFIG sbfAccess;
2009 UCHAR ucAccess;
2010} ATOM_I2C_ID_CONFIG_ACCESS;
2011
2012/****************************************************************************/
2013/* Structure used in GPIO_I2C_InfoTable */
2014/****************************************************************************/
2015typedef struct _ATOM_GPIO_I2C_ASSIGMENT {
2016 USHORT usClkMaskRegisterIndex;
2017 USHORT usClkEnRegisterIndex;
2018 USHORT usClkY_RegisterIndex;
2019 USHORT usClkA_RegisterIndex;
2020 USHORT usDataMaskRegisterIndex;
2021 USHORT usDataEnRegisterIndex;
2022 USHORT usDataY_RegisterIndex;
2023 USHORT usDataA_RegisterIndex;
2024 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
2025 UCHAR ucClkMaskShift;
2026 UCHAR ucClkEnShift;
2027 UCHAR ucClkY_Shift;
2028 UCHAR ucClkA_Shift;
2029 UCHAR ucDataMaskShift;
2030 UCHAR ucDataEnShift;
2031 UCHAR ucDataY_Shift;
2032 UCHAR ucDataA_Shift;
2033 UCHAR ucReserved1;
2034 UCHAR ucReserved2;
2035} ATOM_GPIO_I2C_ASSIGMENT;
2036
2037typedef struct _ATOM_GPIO_I2C_INFO {
2038 ATOM_COMMON_TABLE_HEADER sHeader;
2039 ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
2040} ATOM_GPIO_I2C_INFO;
2041
2042/****************************************************************************/
2043/* Common Structure used in other structures */
2044/****************************************************************************/
2045
2046#ifndef _H2INC
2047
2048/* Please don't add or expand this bitfield structure below, this one will retire soon.! */
2049typedef struct _ATOM_MODE_MISC_INFO {
2050#if ATOM_BIG_ENDIAN
2051 USHORT Reserved:6;
2052 USHORT RGB888:1;
2053 USHORT DoubleClock:1;
2054 USHORT Interlace:1;
2055 USHORT CompositeSync:1;
2056 USHORT V_ReplicationBy2:1;
2057 USHORT H_ReplicationBy2:1;
2058 USHORT VerticalCutOff:1;
2059 USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */
2060 USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */
2061 USHORT HorizontalCutOff:1;
2062#else
2063 USHORT HorizontalCutOff:1;
2064 USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */
2065 USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */
2066 USHORT VerticalCutOff:1;
2067 USHORT H_ReplicationBy2:1;
2068 USHORT V_ReplicationBy2:1;
2069 USHORT CompositeSync:1;
2070 USHORT Interlace:1;
2071 USHORT DoubleClock:1;
2072 USHORT RGB888:1;
2073 USHORT Reserved:6;
2074#endif
2075} ATOM_MODE_MISC_INFO;
2076
2077typedef union _ATOM_MODE_MISC_INFO_ACCESS {
2078 ATOM_MODE_MISC_INFO sbfAccess;
2079 USHORT usAccess;
2080} ATOM_MODE_MISC_INFO_ACCESS;
2081
2082#else
2083
2084typedef union _ATOM_MODE_MISC_INFO_ACCESS {
2085 USHORT usAccess;
2086} ATOM_MODE_MISC_INFO_ACCESS;
2087
2088#endif
2089
2090/* usModeMiscInfo- */
2091#define ATOM_H_CUTOFF 0x01
2092#define ATOM_HSYNC_POLARITY 0x02 /* 0=Active High, 1=Active Low */
2093#define ATOM_VSYNC_POLARITY 0x04 /* 0=Active High, 1=Active Low */
2094#define ATOM_V_CUTOFF 0x08
2095#define ATOM_H_REPLICATIONBY2 0x10
2096#define ATOM_V_REPLICATIONBY2 0x20
2097#define ATOM_COMPOSITESYNC 0x40
2098#define ATOM_INTERLACE 0x80
2099#define ATOM_DOUBLE_CLOCK_MODE 0x100
2100#define ATOM_RGB888_MODE 0x200
2101
2102/* usRefreshRate- */
2103#define ATOM_REFRESH_43 43
2104#define ATOM_REFRESH_47 47
2105#define ATOM_REFRESH_56 56
2106#define ATOM_REFRESH_60 60
2107#define ATOM_REFRESH_65 65
2108#define ATOM_REFRESH_70 70
2109#define ATOM_REFRESH_72 72
2110#define ATOM_REFRESH_75 75
2111#define ATOM_REFRESH_85 85
2112
2113/* ATOM_MODE_TIMING data are exactly the same as VESA timing data. */
2114/* Translation from EDID to ATOM_MODE_TIMING, use the following formula. */
2115/* */
2116/* VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK */
2117/* = EDID_HA + EDID_HBL */
2118/* VESA_HDISP = VESA_ACTIVE = EDID_HA */
2119/* VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH */
2120/* = EDID_HA + EDID_HSO */
2121/* VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW */
2122/* VESA_BORDER = EDID_BORDER */
2123
2124/****************************************************************************/
2125/* Structure used in SetCRTC_UsingDTDTimingTable */
2126/****************************************************************************/
2127typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS {
2128 USHORT usH_Size;
2129 USHORT usH_Blanking_Time;
2130 USHORT usV_Size;
2131 USHORT usV_Blanking_Time;
2132 USHORT usH_SyncOffset;
2133 USHORT usH_SyncWidth;
2134 USHORT usV_SyncOffset;
2135 USHORT usV_SyncWidth;
2136 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2137 UCHAR ucH_Border; /* From DFP EDID */
2138 UCHAR ucV_Border;
2139 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
2140 UCHAR ucPadding[3];
2141} SET_CRTC_USING_DTD_TIMING_PARAMETERS;
2142
2143/****************************************************************************/
2144/* Structure used in SetCRTC_TimingTable */
2145/****************************************************************************/
2146typedef struct _SET_CRTC_TIMING_PARAMETERS {
2147 USHORT usH_Total; /* horizontal total */
2148 USHORT usH_Disp; /* horizontal display */
2149 USHORT usH_SyncStart; /* horozontal Sync start */
2150 USHORT usH_SyncWidth; /* horizontal Sync width */
2151 USHORT usV_Total; /* vertical total */
2152 USHORT usV_Disp; /* vertical display */
2153 USHORT usV_SyncStart; /* vertical Sync start */
2154 USHORT usV_SyncWidth; /* vertical Sync width */
2155 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2156 UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
2157 UCHAR ucOverscanRight; /* right */
2158 UCHAR ucOverscanLeft; /* left */
2159 UCHAR ucOverscanBottom; /* bottom */
2160 UCHAR ucOverscanTop; /* top */
2161 UCHAR ucReserved;
2162} SET_CRTC_TIMING_PARAMETERS;
2163#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS
2164
2165/****************************************************************************/
2166/* Structure used in StandardVESA_TimingTable */
2167/* AnalogTV_InfoTable */
2168/* ComponentVideoInfoTable */
2169/****************************************************************************/
2170typedef struct _ATOM_MODE_TIMING {
2171 USHORT usCRTC_H_Total;
2172 USHORT usCRTC_H_Disp;
2173 USHORT usCRTC_H_SyncStart;
2174 USHORT usCRTC_H_SyncWidth;
2175 USHORT usCRTC_V_Total;
2176 USHORT usCRTC_V_Disp;
2177 USHORT usCRTC_V_SyncStart;
2178 USHORT usCRTC_V_SyncWidth;
2179 USHORT usPixelClock; /* in 10Khz unit */
2180 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2181 USHORT usCRTC_OverscanRight;
2182 USHORT usCRTC_OverscanLeft;
2183 USHORT usCRTC_OverscanBottom;
2184 USHORT usCRTC_OverscanTop;
2185 USHORT usReserve;
2186 UCHAR ucInternalModeNumber;
2187 UCHAR ucRefreshRate;
2188} ATOM_MODE_TIMING;
2189
2190typedef struct _ATOM_DTD_FORMAT {
2191 USHORT usPixClk;
2192 USHORT usHActive;
2193 USHORT usHBlanking_Time;
2194 USHORT usVActive;
2195 USHORT usVBlanking_Time;
2196 USHORT usHSyncOffset;
2197 USHORT usHSyncWidth;
2198 USHORT usVSyncOffset;
2199 USHORT usVSyncWidth;
2200 USHORT usImageHSize;
2201 USHORT usImageVSize;
2202 UCHAR ucHBorder;
2203 UCHAR ucVBorder;
2204 ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
2205 UCHAR ucInternalModeNumber;
2206 UCHAR ucRefreshRate;
2207} ATOM_DTD_FORMAT;
2208
2209/****************************************************************************/
2210/* Structure used in LVDS_InfoTable */
2211/* * Need a document to describe this table */
2212/****************************************************************************/
2213#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
2214#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
2215#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
2216#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
2217
2218/* Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. */
2219/* Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL */
2220#define LCDPANEL_CAP_READ_EDID 0x1
2221
2222/* ucTableFormatRevision=1 */
2223/* ucTableContentRevision=1 */
2224typedef struct _ATOM_LVDS_INFO {
2225 ATOM_COMMON_TABLE_HEADER sHeader;
2226 ATOM_DTD_FORMAT sLCDTiming;
2227 USHORT usModePatchTableOffset;
2228 USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */
2229 USHORT usOffDelayInMs;
2230 UCHAR ucPowerSequenceDigOntoDEin10Ms;
2231 UCHAR ucPowerSequenceDEtoBLOnin10Ms;
2232 UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */
2233 /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */
2234 /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */
2235 /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */
2236 UCHAR ucPanelDefaultRefreshRate;
2237 UCHAR ucPanelIdentification;
2238 UCHAR ucSS_Id;
2239} ATOM_LVDS_INFO;
2240
2241/* ucTableFormatRevision=1 */
2242/* ucTableContentRevision=2 */
2243typedef struct _ATOM_LVDS_INFO_V12 {
2244 ATOM_COMMON_TABLE_HEADER sHeader;
2245 ATOM_DTD_FORMAT sLCDTiming;
2246 USHORT usExtInfoTableOffset;
2247 USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */
2248 USHORT usOffDelayInMs;
2249 UCHAR ucPowerSequenceDigOntoDEin10Ms;
2250 UCHAR ucPowerSequenceDEtoBLOnin10Ms;
2251 UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */
2252 /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */
2253 /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */
2254 /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */
2255 UCHAR ucPanelDefaultRefreshRate;
2256 UCHAR ucPanelIdentification;
2257 UCHAR ucSS_Id;
2258 USHORT usLCDVenderID;
2259 USHORT usLCDProductID;
2260 UCHAR ucLCDPanel_SpecialHandlingCap;
2261 UCHAR ucPanelInfoSize; /* start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable */
2262 UCHAR ucReserved[2];
2263} ATOM_LVDS_INFO_V12;
2264
2265#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12
2266
2267typedef struct _ATOM_PATCH_RECORD_MODE {
2268 UCHAR ucRecordType;
2269 USHORT usHDisp;
2270 USHORT usVDisp;
2271} ATOM_PATCH_RECORD_MODE;
2272
2273typedef struct _ATOM_LCD_RTS_RECORD {
2274 UCHAR ucRecordType;
2275 UCHAR ucRTSValue;
2276} ATOM_LCD_RTS_RECORD;
2277
2278/* !! If the record below exits, it shoud always be the first record for easy use in command table!!! */
2279typedef struct _ATOM_LCD_MODE_CONTROL_CAP {
2280 UCHAR ucRecordType;
2281 USHORT usLCDCap;
2282} ATOM_LCD_MODE_CONTROL_CAP;
2283
2284#define LCD_MODE_CAP_BL_OFF 1
2285#define LCD_MODE_CAP_CRTC_OFF 2
2286#define LCD_MODE_CAP_PANEL_OFF 4
2287
2288typedef struct _ATOM_FAKE_EDID_PATCH_RECORD {
2289 UCHAR ucRecordType;
2290 UCHAR ucFakeEDIDLength;
2291 UCHAR ucFakeEDIDString[1]; /* This actually has ucFakeEdidLength elements. */
2292} ATOM_FAKE_EDID_PATCH_RECORD;
2293
2294typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD {
2295 UCHAR ucRecordType;
2296 USHORT usHSize;
2297 USHORT usVSize;
2298} ATOM_PANEL_RESOLUTION_PATCH_RECORD;
2299
2300#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1
2301#define LCD_RTS_RECORD_TYPE 2
2302#define LCD_CAP_RECORD_TYPE 3
2303#define LCD_FAKE_EDID_PATCH_RECORD_TYPE 4
2304#define LCD_PANEL_RESOLUTION_RECORD_TYPE 5
2305#define ATOM_RECORD_END_TYPE 0xFF
2306
2307/****************************Spread Spectrum Info Table Definitions **********************/
2308
2309/* ucTableFormatRevision=1 */
2310/* ucTableContentRevision=2 */
2311typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
2312 USHORT usSpreadSpectrumPercentage;
2313 UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
2314 UCHAR ucSS_Step;
2315 UCHAR ucSS_Delay;
2316 UCHAR ucSS_Id;
2317 UCHAR ucRecommandedRef_Div;
2318 UCHAR ucSS_Range; /* it was reserved for V11 */
2319} ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
2320
2321#define ATOM_MAX_SS_ENTRY 16
2322#define ATOM_DP_SS_ID1 0x0f1 /* SS modulation freq=30k */
2323#define ATOM_DP_SS_ID2 0x0f2 /* SS modulation freq=33k */
2324
2325#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
2326#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
2327#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001
2328#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001
2329#define ATOM_INTERNAL_SS_MASK 0x00000000
2330#define ATOM_EXTERNAL_SS_MASK 0x00000002
2331#define EXEC_SS_STEP_SIZE_SHIFT 2
2332#define EXEC_SS_DELAY_SHIFT 4
2333#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4
2334
2335typedef struct _ATOM_SPREAD_SPECTRUM_INFO {
2336 ATOM_COMMON_TABLE_HEADER sHeader;
2337 ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY];
2338} ATOM_SPREAD_SPECTRUM_INFO;
2339
2340/****************************************************************************/
2341/* Structure used in AnalogTV_InfoTable (Top level) */
2342/****************************************************************************/
2343/* ucTVBootUpDefaultStd definiton: */
2344
2345/* ATOM_TV_NTSC 1 */
2346/* ATOM_TV_NTSCJ 2 */
2347/* ATOM_TV_PAL 3 */
2348/* ATOM_TV_PALM 4 */
2349/* ATOM_TV_PALCN 5 */
2350/* ATOM_TV_PALN 6 */
2351/* ATOM_TV_PAL60 7 */
2352/* ATOM_TV_SECAM 8 */
2353
2354/* ucTVSuppportedStd definition: */
2355#define NTSC_SUPPORT 0x1
2356#define NTSCJ_SUPPORT 0x2
2357
2358#define PAL_SUPPORT 0x4
2359#define PALM_SUPPORT 0x8
2360#define PALCN_SUPPORT 0x10
2361#define PALN_SUPPORT 0x20
2362#define PAL60_SUPPORT 0x40
2363#define SECAM_SUPPORT 0x80
2364
2365#define MAX_SUPPORTED_TV_TIMING 2
2366
2367typedef struct _ATOM_ANALOG_TV_INFO {
2368 ATOM_COMMON_TABLE_HEADER sHeader;
2369 UCHAR ucTV_SupportedStandard;
2370 UCHAR ucTV_BootUpDefaultStandard;
2371 UCHAR ucExt_TV_ASIC_ID;
2372 UCHAR ucExt_TV_ASIC_SlaveAddr;
2373 /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; */
2374 ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING];
2375} ATOM_ANALOG_TV_INFO;
2376
2377/**************************************************************************/
2378/* VRAM usage and their defintions */
2379
2380/* One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */
2381/* Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */
2382/* All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! */
2383/* To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR */
2384/* To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX */
2385
2386#ifndef VESA_MEMORY_IN_64K_BLOCK
2387#define VESA_MEMORY_IN_64K_BLOCK 0x100 /* 256*64K=16Mb (Max. VESA memory is 16Mb!) */
2388#endif
2389
2390#define ATOM_EDID_RAW_DATASIZE 256 /* In Bytes */
2391#define ATOM_HWICON_SURFACE_SIZE 4096 /* In Bytes */
2392#define ATOM_HWICON_INFOTABLE_SIZE 32
2393#define MAX_DTD_MODE_IN_VRAM 6
2394#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) /* 28= (SIZEOF ATOM_DTD_FORMAT) */
2395#define ATOM_STD_MODE_SUPPORT_TBL_SIZE (32*8) /* 32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) */
2396#define DFP_ENCODER_TYPE_OFFSET 0x80
2397#define DP_ENCODER_LANE_NUM_OFFSET 0x84
2398#define DP_ENCODER_LINK_RATE_OFFSET 0x88
2399
2400#define ATOM_HWICON1_SURFACE_ADDR 0
2401#define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
2402#define ATOM_HWICON_INFOTABLE_ADDR (ATOM_HWICON2_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
2403#define ATOM_CRT1_EDID_ADDR (ATOM_HWICON_INFOTABLE_ADDR + ATOM_HWICON_INFOTABLE_SIZE)
2404#define ATOM_CRT1_DTD_MODE_TBL_ADDR (ATOM_CRT1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2405#define ATOM_CRT1_STD_MODE_TBL_ADDR (ATOM_CRT1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2406
2407#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2408#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2409#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2410
2411#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2412
2413#define ATOM_DFP1_EDID_ADDR (ATOM_TV1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2414#define ATOM_DFP1_DTD_MODE_TBL_ADDR (ATOM_DFP1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2415#define ATOM_DFP1_STD_MODE_TBL_ADDR (ATOM_DFP1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2416
2417#define ATOM_CRT2_EDID_ADDR (ATOM_DFP1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2418#define ATOM_CRT2_DTD_MODE_TBL_ADDR (ATOM_CRT2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2419#define ATOM_CRT2_STD_MODE_TBL_ADDR (ATOM_CRT2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2420
2421#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2422#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2423#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2424
2425#define ATOM_TV2_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2426#define ATOM_TV2_DTD_MODE_TBL_ADDR (ATOM_TV2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2427#define ATOM_TV2_STD_MODE_TBL_ADDR (ATOM_TV2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2428
2429#define ATOM_DFP2_EDID_ADDR (ATOM_TV2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2430#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2431#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2432
2433#define ATOM_CV_EDID_ADDR (ATOM_DFP2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2434#define ATOM_CV_DTD_MODE_TBL_ADDR (ATOM_CV_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2435#define ATOM_CV_STD_MODE_TBL_ADDR (ATOM_CV_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2436
2437#define ATOM_DFP3_EDID_ADDR (ATOM_CV_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2438#define ATOM_DFP3_DTD_MODE_TBL_ADDR (ATOM_DFP3_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2439#define ATOM_DFP3_STD_MODE_TBL_ADDR (ATOM_DFP3_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2440
2441#define ATOM_DFP4_EDID_ADDR (ATOM_DFP3_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2442#define ATOM_DFP4_DTD_MODE_TBL_ADDR (ATOM_DFP4_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2443#define ATOM_DFP4_STD_MODE_TBL_ADDR (ATOM_DFP4_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2444
2445#define ATOM_DFP5_EDID_ADDR (ATOM_DFP4_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2446#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2447#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2448
2449#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
2450
2451#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 256)
2452#define ATOM_STACK_STORAGE_END (ATOM_STACK_STORAGE_START + 512)
2453
2454/* The size below is in Kb! */
2455#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
2456
2457#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L
2458#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
2459#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
2460#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0
2461
2462/***********************************************************************************/
2463/* Structure used in VRAM_UsageByFirmwareTable */
2464/* Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm */
2465/* at running time. */
2466/* note2: From RV770, the memory is more than 32bit addressable, so we will change */
2467/* ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains */
2468/* exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware */
2469/* (in offset to start of memory address) is KB aligned instead of byte aligend. */
2470/***********************************************************************************/
2471#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1
2472
2473typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO {
2474 ULONG ulStartAddrUsedByFirmware;
2475 USHORT usFirmwareUseInKb;
2476 USHORT usReserved;
2477} ATOM_FIRMWARE_VRAM_RESERVE_INFO;
2478
2479typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE {
2480 ATOM_COMMON_TABLE_HEADER sHeader;
2481 ATOM_FIRMWARE_VRAM_RESERVE_INFO
2482 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
2483} ATOM_VRAM_USAGE_BY_FIRMWARE;
2484
2485/****************************************************************************/
2486/* Structure used in GPIO_Pin_LUTTable */
2487/****************************************************************************/
2488typedef struct _ATOM_GPIO_PIN_ASSIGNMENT {
2489 USHORT usGpioPin_AIndex;
2490 UCHAR ucGpioPinBitShift;
2491 UCHAR ucGPIO_ID;
2492} ATOM_GPIO_PIN_ASSIGNMENT;
2493
2494typedef struct _ATOM_GPIO_PIN_LUT {
2495 ATOM_COMMON_TABLE_HEADER sHeader;
2496 ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
2497} ATOM_GPIO_PIN_LUT;
2498
2499/****************************************************************************/
2500/* Structure used in ComponentVideoInfoTable */
2501/****************************************************************************/
2502#define GPIO_PIN_ACTIVE_HIGH 0x1
2503
2504#define MAX_SUPPORTED_CV_STANDARDS 5
2505
2506/* definitions for ATOM_D_INFO.ucSettings */
2507#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F /* [4:0] */
2508#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 /* [6:5] = must be zeroed out */
2509#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 /* [7] */
2510
2511typedef struct _ATOM_GPIO_INFO {
2512 USHORT usAOffset;
2513 UCHAR ucSettings;
2514 UCHAR ucReserved;
2515} ATOM_GPIO_INFO;
2516
2517/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) */
2518#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2
2519
2520/* definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i */
2521#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 /* [7]; */
2522#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F /* [6:0] */
2523
2524/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode */
2525/* Line 3 out put 5V. */
2526#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 /* represent gpio 3 state for 16:9 */
2527#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 /* represent gpio 4 state for 16:9 */
2528#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0
2529
2530/* Line 3 out put 2.2V */
2531#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 /* represent gpio 3 state for 4:3 Letter box */
2532#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 /* represent gpio 4 state for 4:3 Letter box */
2533#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2
2534
2535/* Line 3 out put 0V */
2536#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 /* represent gpio 3 state for 4:3 */
2537#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 /* represent gpio 4 state for 4:3 */
2538#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4
2539
2540#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F /* bit [5:0] */
2541
2542#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 /* bit 7 */
2543
2544/* GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. */
2545#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 /* bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */
2546#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 /* bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */
2547
2548typedef struct _ATOM_COMPONENT_VIDEO_INFO {
2549 ATOM_COMMON_TABLE_HEADER sHeader;
2550 USHORT usMask_PinRegisterIndex;
2551 USHORT usEN_PinRegisterIndex;
2552 USHORT usY_PinRegisterIndex;
2553 USHORT usA_PinRegisterIndex;
2554 UCHAR ucBitShift;
2555 UCHAR ucPinActiveState; /* ucPinActiveState: Bit0=1 active high, =0 active low */
2556 ATOM_DTD_FORMAT sReserved; /* must be zeroed out */
2557 UCHAR ucMiscInfo;
2558 UCHAR uc480i;
2559 UCHAR uc480p;
2560 UCHAR uc720p;
2561 UCHAR uc1080i;
2562 UCHAR ucLetterBoxMode;
2563 UCHAR ucReserved[3];
2564 UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */
2565 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
2566 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
2567} ATOM_COMPONENT_VIDEO_INFO;
2568
2569/* ucTableFormatRevision=2 */
2570/* ucTableContentRevision=1 */
2571typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 {
2572 ATOM_COMMON_TABLE_HEADER sHeader;
2573 UCHAR ucMiscInfo;
2574 UCHAR uc480i;
2575 UCHAR uc480p;
2576 UCHAR uc720p;
2577 UCHAR uc1080i;
2578 UCHAR ucReserved;
2579 UCHAR ucLetterBoxMode;
2580 UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */
2581 ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
2582 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
2583} ATOM_COMPONENT_VIDEO_INFO_V21;
2584
2585#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21
2586
2587/****************************************************************************/
2588/* Structure used in object_InfoTable */
2589/****************************************************************************/
2590typedef struct _ATOM_OBJECT_HEADER {
2591 ATOM_COMMON_TABLE_HEADER sHeader;
2592 USHORT usDeviceSupport;
2593 USHORT usConnectorObjectTableOffset;
2594 USHORT usRouterObjectTableOffset;
2595 USHORT usEncoderObjectTableOffset;
2596 USHORT usProtectionObjectTableOffset; /* only available when Protection block is independent. */
2597 USHORT usDisplayPathTableOffset;
2598} ATOM_OBJECT_HEADER;
2599
2600typedef struct _ATOM_DISPLAY_OBJECT_PATH {
2601 USHORT usDeviceTag; /* supported device */
2602 USHORT usSize; /* the size of ATOM_DISPLAY_OBJECT_PATH */
2603 USHORT usConnObjectId; /* Connector Object ID */
2604 USHORT usGPUObjectId; /* GPU ID */
2605 USHORT usGraphicObjIds[1]; /* 1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. */
2606} ATOM_DISPLAY_OBJECT_PATH;
2607
2608typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE {
2609 UCHAR ucNumOfDispPath;
2610 UCHAR ucVersion;
2611 UCHAR ucPadding[2];
2612 ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
2613} ATOM_DISPLAY_OBJECT_PATH_TABLE;
2614
2615typedef struct _ATOM_OBJECT /* each object has this structure */
2616{
2617 USHORT usObjectID;
2618 USHORT usSrcDstTableOffset;
2619 USHORT usRecordOffset; /* this pointing to a bunch of records defined below */
2620 USHORT usReserved;
2621} ATOM_OBJECT;
2622
2623typedef struct _ATOM_OBJECT_TABLE /* Above 4 object table offset pointing to a bunch of objects all have this structure */
2624{
2625 UCHAR ucNumberOfObjects;
2626 UCHAR ucPadding[3];
2627 ATOM_OBJECT asObjects[1];
2628} ATOM_OBJECT_TABLE;
2629
2630typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT /* usSrcDstTableOffset pointing to this structure */
2631{
2632 UCHAR ucNumberOfSrc;
2633 USHORT usSrcObjectID[1];
2634 UCHAR ucNumberOfDst;
2635 USHORT usDstObjectID[1];
2636} ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
2637
2638/* Related definitions, all records are differnt but they have a commond header */
2639typedef struct _ATOM_COMMON_RECORD_HEADER {
2640 UCHAR ucRecordType; /* An emun to indicate the record type */
2641 UCHAR ucRecordSize; /* The size of the whole record in byte */
2642} ATOM_COMMON_RECORD_HEADER;
2643
2644#define ATOM_I2C_RECORD_TYPE 1
2645#define ATOM_HPD_INT_RECORD_TYPE 2
2646#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3
2647#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4
2648#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
2649#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
2650#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7
2651#define ATOM_JTAG_RECORD_TYPE 8 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
2652#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9
2653#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10
2654#define ATOM_CONNECTOR_CF_RECORD_TYPE 11
2655#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12
2656#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13
2657#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14
2658#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15
2659
2660/* Must be updated when new record type is added,equal to that record definition! */
2661#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_CF_RECORD_TYPE
2662
2663typedef struct _ATOM_I2C_RECORD {
2664 ATOM_COMMON_RECORD_HEADER sheader;
2665 ATOM_I2C_ID_CONFIG sucI2cId;
2666 UCHAR ucI2CAddr; /* The slave address, it's 0 when the record is attached to connector for DDC */
2667} ATOM_I2C_RECORD;
2668
2669typedef struct _ATOM_HPD_INT_RECORD {
2670 ATOM_COMMON_RECORD_HEADER sheader;
2671 UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
2672 UCHAR ucPluggged_PinState;
2673} ATOM_HPD_INT_RECORD;
2674
2675typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {
2676 ATOM_COMMON_RECORD_HEADER sheader;
2677 UCHAR ucProtectionFlag;
2678 UCHAR ucReserved;
2679} ATOM_OUTPUT_PROTECTION_RECORD;
2680
2681typedef struct _ATOM_CONNECTOR_DEVICE_TAG {
2682 ULONG ulACPIDeviceEnum; /* Reserved for now */
2683 USHORT usDeviceID; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT" */
2684 USHORT usPadding;
2685} ATOM_CONNECTOR_DEVICE_TAG;
2686
2687typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD {
2688 ATOM_COMMON_RECORD_HEADER sheader;
2689 UCHAR ucNumberOfDevice;
2690 UCHAR ucReserved;
2691 ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation */
2692} ATOM_CONNECTOR_DEVICE_TAG_RECORD;
2693
2694typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD {
2695 ATOM_COMMON_RECORD_HEADER sheader;
2696 UCHAR ucConfigGPIOID;
2697 UCHAR ucConfigGPIOState; /* Set to 1 when it's active high to enable external flow in */
2698 UCHAR ucFlowinGPIPID;
2699 UCHAR ucExtInGPIPID;
2700} ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
2701
2702typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD {
2703 ATOM_COMMON_RECORD_HEADER sheader;
2704 UCHAR ucCTL1GPIO_ID;
2705 UCHAR ucCTL1GPIOState; /* Set to 1 when it's active high */
2706 UCHAR ucCTL2GPIO_ID;
2707 UCHAR ucCTL2GPIOState; /* Set to 1 when it's active high */
2708 UCHAR ucCTL3GPIO_ID;
2709 UCHAR ucCTL3GPIOState; /* Set to 1 when it's active high */
2710 UCHAR ucCTLFPGA_IN_ID;
2711 UCHAR ucPadding[3];
2712} ATOM_ENCODER_FPGA_CONTROL_RECORD;
2713
2714typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD {
2715 ATOM_COMMON_RECORD_HEADER sheader;
2716 UCHAR ucGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
2717 UCHAR ucTVActiveState; /* Indicating when the pin==0 or 1 when TV is connected */
2718} ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
2719
2720typedef struct _ATOM_JTAG_RECORD {
2721 ATOM_COMMON_RECORD_HEADER sheader;
2722 UCHAR ucTMSGPIO_ID;
2723 UCHAR ucTMSGPIOState; /* Set to 1 when it's active high */
2724 UCHAR ucTCKGPIO_ID;
2725 UCHAR ucTCKGPIOState; /* Set to 1 when it's active high */
2726 UCHAR ucTDOGPIO_ID;
2727 UCHAR ucTDOGPIOState; /* Set to 1 when it's active high */
2728 UCHAR ucTDIGPIO_ID;
2729 UCHAR ucTDIGPIOState; /* Set to 1 when it's active high */
2730 UCHAR ucPadding[2];
2731} ATOM_JTAG_RECORD;
2732
2733/* The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually */
2734typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR {
2735 UCHAR ucGPIOID; /* GPIO_ID, find the corresponding ID in GPIO_LUT table */
2736 UCHAR ucGPIO_PinState; /* Pin state showing how to set-up the pin */
2737} ATOM_GPIO_PIN_CONTROL_PAIR;
2738
2739typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD {
2740 ATOM_COMMON_RECORD_HEADER sheader;
2741 UCHAR ucFlags; /* Future expnadibility */
2742 UCHAR ucNumberOfPins; /* Number of GPIO pins used to control the object */
2743 ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; /* the real gpio pin pair determined by number of pins ucNumberOfPins */
2744} ATOM_OBJECT_GPIO_CNTL_RECORD;
2745
2746/* Definitions for GPIO pin state */
2747#define GPIO_PIN_TYPE_INPUT 0x00
2748#define GPIO_PIN_TYPE_OUTPUT 0x10
2749#define GPIO_PIN_TYPE_HW_CONTROL 0x20
2750
2751/* For GPIO_PIN_TYPE_OUTPUT the following is defined */
2752#define GPIO_PIN_OUTPUT_STATE_MASK 0x01
2753#define GPIO_PIN_OUTPUT_STATE_SHIFT 0
2754#define GPIO_PIN_STATE_ACTIVE_LOW 0x0
2755#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1
2756
2757typedef struct _ATOM_ENCODER_DVO_CF_RECORD {
2758 ATOM_COMMON_RECORD_HEADER sheader;
2759 ULONG ulStrengthControl; /* DVOA strength control for CF */
2760 UCHAR ucPadding[2];
2761} ATOM_ENCODER_DVO_CF_RECORD;
2762
2763/* value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle */
2764#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
2765#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
2766
2767typedef struct _ATOM_CONNECTOR_CF_RECORD {
2768 ATOM_COMMON_RECORD_HEADER sheader;
2769 USHORT usMaxPixClk;
2770 UCHAR ucFlowCntlGpioId;
2771 UCHAR ucSwapCntlGpioId;
2772 UCHAR ucConnectedDvoBundle;
2773 UCHAR ucPadding;
2774} ATOM_CONNECTOR_CF_RECORD;
2775
2776typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD {
2777 ATOM_COMMON_RECORD_HEADER sheader;
2778 ATOM_DTD_FORMAT asTiming;
2779} ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
2780
2781typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD {
2782 ATOM_COMMON_RECORD_HEADER sheader; /* ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE */
2783 UCHAR ucSubConnectorType; /* CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A */
2784 UCHAR ucReserved;
2785} ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
2786
2787typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD {
2788 ATOM_COMMON_RECORD_HEADER sheader;
2789 UCHAR ucMuxType; /* decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state */
2790 UCHAR ucMuxControlPin;
2791 UCHAR ucMuxState[2]; /* for alligment purpose */
2792} ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
2793
2794typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD {
2795 ATOM_COMMON_RECORD_HEADER sheader;
2796 UCHAR ucMuxType;
2797 UCHAR ucMuxControlPin;
2798 UCHAR ucMuxState[2]; /* for alligment purpose */
2799} ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
2800
2801/* define ucMuxType */
2802#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f
2803#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01
2804
2805/****************************************************************************/
2806/* ASIC voltage data table */
2807/****************************************************************************/
2808typedef struct _ATOM_VOLTAGE_INFO_HEADER {
2809 USHORT usVDDCBaseLevel; /* In number of 50mv unit */
2810 USHORT usReserved; /* For possible extension table offset */
2811 UCHAR ucNumOfVoltageEntries;
2812 UCHAR ucBytesPerVoltageEntry;
2813 UCHAR ucVoltageStep; /* Indicating in how many mv increament is one step, 0.5mv unit */
2814 UCHAR ucDefaultVoltageEntry;
2815 UCHAR ucVoltageControlI2cLine;
2816 UCHAR ucVoltageControlAddress;
2817 UCHAR ucVoltageControlOffset;
2818} ATOM_VOLTAGE_INFO_HEADER;
2819
2820typedef struct _ATOM_VOLTAGE_INFO {
2821 ATOM_COMMON_TABLE_HEADER sHeader;
2822 ATOM_VOLTAGE_INFO_HEADER viHeader;
2823 UCHAR ucVoltageEntries[64]; /* 64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry */
2824} ATOM_VOLTAGE_INFO;
2825
2826typedef struct _ATOM_VOLTAGE_FORMULA {
2827 USHORT usVoltageBaseLevel; /* In number of 1mv unit */
2828 USHORT usVoltageStep; /* Indicating in how many mv increament is one step, 1mv unit */
2829 UCHAR ucNumOfVoltageEntries; /* Number of Voltage Entry, which indicate max Voltage */
2830 UCHAR ucFlag; /* bit0=0 :step is 1mv =1 0.5mv */
2831 UCHAR ucBaseVID; /* if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep */
2832 UCHAR ucReserved;
2833 UCHAR ucVIDAdjustEntries[32]; /* 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries */
2834} ATOM_VOLTAGE_FORMULA;
2835
2836typedef struct _ATOM_VOLTAGE_CONTROL {
2837 UCHAR ucVoltageControlId; /* Indicate it is controlled by I2C or GPIO or HW state machine */
2838 UCHAR ucVoltageControlI2cLine;
2839 UCHAR ucVoltageControlAddress;
2840 UCHAR ucVoltageControlOffset;
2841 USHORT usGpioPin_AIndex; /* GPIO_PAD register index */
2842 UCHAR ucGpioPinBitShift[9]; /* at most 8 pin support 255 VIDs, termintate with 0xff */
2843 UCHAR ucReserved;
2844} ATOM_VOLTAGE_CONTROL;
2845
2846/* Define ucVoltageControlId */
2847#define VOLTAGE_CONTROLLED_BY_HW 0x00
2848#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F
2849#define VOLTAGE_CONTROLLED_BY_GPIO 0x80
2850#define VOLTAGE_CONTROL_ID_LM64 0x01 /* I2C control, used for R5xx Core Voltage */
2851#define VOLTAGE_CONTROL_ID_DAC 0x02 /* I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI */
2852#define VOLTAGE_CONTROL_ID_VT116xM 0x03 /* I2C control, used for R6xx Core Voltage */
2853#define VOLTAGE_CONTROL_ID_DS4402 0x04
2854
2855typedef struct _ATOM_VOLTAGE_OBJECT {
2856 UCHAR ucVoltageType; /* Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI */
2857 UCHAR ucSize; /* Size of Object */
2858 ATOM_VOLTAGE_CONTROL asControl; /* describ how to control */
2859 ATOM_VOLTAGE_FORMULA asFormula; /* Indicate How to convert real Voltage to VID */
2860} ATOM_VOLTAGE_OBJECT;
2861
2862typedef struct _ATOM_VOLTAGE_OBJECT_INFO {
2863 ATOM_COMMON_TABLE_HEADER sHeader;
2864 ATOM_VOLTAGE_OBJECT asVoltageObj[3]; /* Info for Voltage control */
2865} ATOM_VOLTAGE_OBJECT_INFO;
2866
2867typedef struct _ATOM_LEAKID_VOLTAGE {
2868 UCHAR ucLeakageId;
2869 UCHAR ucReserved;
2870 USHORT usVoltage;
2871} ATOM_LEAKID_VOLTAGE;
2872
2873typedef struct _ATOM_ASIC_PROFILE_VOLTAGE {
2874 UCHAR ucProfileId;
2875 UCHAR ucReserved;
2876 USHORT usSize;
2877 USHORT usEfuseSpareStartAddr;
2878 USHORT usFuseIndex[8]; /* from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, */
2879 ATOM_LEAKID_VOLTAGE asLeakVol[2]; /* Leakid and relatd voltage */
2880} ATOM_ASIC_PROFILE_VOLTAGE;
2881
2882/* ucProfileId */
2883#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1
2884#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1
2885#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2
2886
2887typedef struct _ATOM_ASIC_PROFILING_INFO {
2888 ATOM_COMMON_TABLE_HEADER asHeader;
2889 ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
2890} ATOM_ASIC_PROFILING_INFO;
2891
2892typedef struct _ATOM_POWER_SOURCE_OBJECT {
2893 UCHAR ucPwrSrcId; /* Power source */
2894 UCHAR ucPwrSensorType; /* GPIO, I2C or none */
2895 UCHAR ucPwrSensId; /* if GPIO detect, it is GPIO id, if I2C detect, it is I2C id */
2896 UCHAR ucPwrSensSlaveAddr; /* Slave address if I2C detect */
2897 UCHAR ucPwrSensRegIndex; /* I2C register Index if I2C detect */
2898 UCHAR ucPwrSensRegBitMask; /* detect which bit is used if I2C detect */
2899 UCHAR ucPwrSensActiveState; /* high active or low active */
2900 UCHAR ucReserve[3]; /* reserve */
2901 USHORT usSensPwr; /* in unit of watt */
2902} ATOM_POWER_SOURCE_OBJECT;
2903
2904typedef struct _ATOM_POWER_SOURCE_INFO {
2905 ATOM_COMMON_TABLE_HEADER asHeader;
2906 UCHAR asPwrbehave[16];
2907 ATOM_POWER_SOURCE_OBJECT asPwrObj[1];
2908} ATOM_POWER_SOURCE_INFO;
2909
2910/* Define ucPwrSrcId */
2911#define POWERSOURCE_PCIE_ID1 0x00
2912#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01
2913#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02
2914#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04
2915#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08
2916
2917/* define ucPwrSensorId */
2918#define POWER_SENSOR_ALWAYS 0x00
2919#define POWER_SENSOR_GPIO 0x01
2920#define POWER_SENSOR_I2C 0x02
2921
2922/**************************************************************************/
2923/* This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design */
2924/* Memory SS Info Table */
2925/* Define Memory Clock SS chip ID */
2926#define ICS91719 1
2927#define ICS91720 2
2928
2929/* Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol */
2930typedef struct _ATOM_I2C_DATA_RECORD {
2931 UCHAR ucNunberOfBytes; /* Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" */
2932 UCHAR ucI2CData[1]; /* I2C data in bytes, should be less than 16 bytes usually */
2933} ATOM_I2C_DATA_RECORD;
2934
2935/* Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information */
2936typedef struct _ATOM_I2C_DEVICE_SETUP_INFO {
2937 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* I2C line and HW/SW assisted cap. */
2938 UCHAR ucSSChipID; /* SS chip being used */
2939 UCHAR ucSSChipSlaveAddr; /* Slave Address to set up this SS chip */
2940 UCHAR ucNumOfI2CDataRecords; /* number of data block */
2941 ATOM_I2C_DATA_RECORD asI2CData[1];
2942} ATOM_I2C_DEVICE_SETUP_INFO;
2943
2944/* ========================================================================================== */
2945typedef struct _ATOM_ASIC_MVDD_INFO {
2946 ATOM_COMMON_TABLE_HEADER sHeader;
2947 ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
2948} ATOM_ASIC_MVDD_INFO;
2949
2950/* ========================================================================================== */
2951#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO
2952
2953/* ========================================================================================== */
2954/**************************************************************************/
2955
2956typedef struct _ATOM_ASIC_SS_ASSIGNMENT {
2957 ULONG ulTargetClockRange; /* Clock Out frequence (VCO ), in unit of 10Khz */
2958 USHORT usSpreadSpectrumPercentage; /* in unit of 0.01% */
2959 USHORT usSpreadRateInKhz; /* in unit of kHz, modulation freq */
2960 UCHAR ucClockIndication; /* Indicate which clock source needs SS */
2961 UCHAR ucSpreadSpectrumMode; /* Bit1=0 Down Spread,=1 Center Spread. */
2962 UCHAR ucReserved[2];
2963} ATOM_ASIC_SS_ASSIGNMENT;
2964
2965/* Define ucSpreadSpectrumType */
2966#define ASIC_INTERNAL_MEMORY_SS 1
2967#define ASIC_INTERNAL_ENGINE_SS 2
2968#define ASIC_INTERNAL_UVD_SS 3
2969
2970typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
2971 ATOM_COMMON_TABLE_HEADER sHeader;
2972 ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4];
2973} ATOM_ASIC_INTERNAL_SS_INFO;
2974
2975/* ==============================Scratch Pad Definition Portion=============================== */
2976#define ATOM_DEVICE_CONNECT_INFO_DEF 0
2977#define ATOM_ROM_LOCATION_DEF 1
2978#define ATOM_TV_STANDARD_DEF 2
2979#define ATOM_ACTIVE_INFO_DEF 3
2980#define ATOM_LCD_INFO_DEF 4
2981#define ATOM_DOS_REQ_INFO_DEF 5
2982#define ATOM_ACC_CHANGE_INFO_DEF 6
2983#define ATOM_DOS_MODE_INFO_DEF 7
2984#define ATOM_I2C_CHANNEL_STATUS_DEF 8
2985#define ATOM_I2C_CHANNEL_STATUS1_DEF 9
2986
2987/* BIOS_0_SCRATCH Definition */
2988#define ATOM_S0_CRT1_MONO 0x00000001L
2989#define ATOM_S0_CRT1_COLOR 0x00000002L
2990#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR)
2991
2992#define ATOM_S0_TV1_COMPOSITE_A 0x00000004L
2993#define ATOM_S0_TV1_SVIDEO_A 0x00000008L
2994#define ATOM_S0_TV1_MASK_A (ATOM_S0_TV1_COMPOSITE_A+ATOM_S0_TV1_SVIDEO_A)
2995
2996#define ATOM_S0_CV_A 0x00000010L
2997#define ATOM_S0_CV_DIN_A 0x00000020L
2998#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A)
2999
3000#define ATOM_S0_CRT2_MONO 0x00000100L
3001#define ATOM_S0_CRT2_COLOR 0x00000200L
3002#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR)
3003
3004#define ATOM_S0_TV1_COMPOSITE 0x00000400L
3005#define ATOM_S0_TV1_SVIDEO 0x00000800L
3006#define ATOM_S0_TV1_SCART 0x00004000L
3007#define ATOM_S0_TV1_MASK (ATOM_S0_TV1_COMPOSITE+ATOM_S0_TV1_SVIDEO+ATOM_S0_TV1_SCART)
3008
3009#define ATOM_S0_CV 0x00001000L
3010#define ATOM_S0_CV_DIN 0x00002000L
3011#define ATOM_S0_CV_MASK (ATOM_S0_CV+ATOM_S0_CV_DIN)
3012
3013#define ATOM_S0_DFP1 0x00010000L
3014#define ATOM_S0_DFP2 0x00020000L
3015#define ATOM_S0_LCD1 0x00040000L
3016#define ATOM_S0_LCD2 0x00080000L
3017#define ATOM_S0_TV2 0x00100000L
3018#define ATOM_S0_DFP3 0x00200000L
3019#define ATOM_S0_DFP4 0x00400000L
3020#define ATOM_S0_DFP5 0x00800000L
3021
3022#define ATOM_S0_DFP_MASK \
3023 (ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5)
3024
3025#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L /* If set, indicates we are running a PCIE asic with */
3026 /* the FAD/HDP reg access bug. Bit is read by DAL */
3027
3028#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L
3029#define ATOM_S0_THERMAL_STATE_SHIFT 26
3030
3031#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L
3032#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29
3033
3034#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1
3035#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
3036#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
3037
3038/* Byte aligned defintion for BIOS usage */
3039#define ATOM_S0_CRT1_MONOb0 0x01
3040#define ATOM_S0_CRT1_COLORb0 0x02
3041#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
3042
3043#define ATOM_S0_TV1_COMPOSITEb0 0x04
3044#define ATOM_S0_TV1_SVIDEOb0 0x08
3045#define ATOM_S0_TV1_MASKb0 (ATOM_S0_TV1_COMPOSITEb0+ATOM_S0_TV1_SVIDEOb0)
3046
3047#define ATOM_S0_CVb0 0x10
3048#define ATOM_S0_CV_DINb0 0x20
3049#define ATOM_S0_CV_MASKb0 (ATOM_S0_CVb0+ATOM_S0_CV_DINb0)
3050
3051#define ATOM_S0_CRT2_MONOb1 0x01
3052#define ATOM_S0_CRT2_COLORb1 0x02
3053#define ATOM_S0_CRT2_MASKb1 (ATOM_S0_CRT2_MONOb1+ATOM_S0_CRT2_COLORb1)
3054
3055#define ATOM_S0_TV1_COMPOSITEb1 0x04
3056#define ATOM_S0_TV1_SVIDEOb1 0x08
3057#define ATOM_S0_TV1_SCARTb1 0x40
3058#define ATOM_S0_TV1_MASKb1 (ATOM_S0_TV1_COMPOSITEb1+ATOM_S0_TV1_SVIDEOb1+ATOM_S0_TV1_SCARTb1)
3059
3060#define ATOM_S0_CVb1 0x10
3061#define ATOM_S0_CV_DINb1 0x20
3062#define ATOM_S0_CV_MASKb1 (ATOM_S0_CVb1+ATOM_S0_CV_DINb1)
3063
3064#define ATOM_S0_DFP1b2 0x01
3065#define ATOM_S0_DFP2b2 0x02
3066#define ATOM_S0_LCD1b2 0x04
3067#define ATOM_S0_LCD2b2 0x08
3068#define ATOM_S0_TV2b2 0x10
3069#define ATOM_S0_DFP3b2 0x20
3070
3071#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C
3072#define ATOM_S0_THERMAL_STATE_SHIFTb3 2
3073
3074#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0
3075#define ATOM_S0_LCD1_SHIFT 18
3076
3077/* BIOS_1_SCRATCH Definition */
3078#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL
3079#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L
3080
3081/* BIOS_2_SCRATCH Definition */
3082#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL
3083#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L
3084#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8
3085
3086#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
3087#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
3088#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
3089#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
3090#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
3091#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
3092#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
3093#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
3094#define ATOM_S2_CV_DPMS_STATE 0x01000000L
3095#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
3096#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
3097#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
3098
3099#define ATOM_S2_DFP_DPM_STATE \
3100 (ATOM_S2_DFP1_DPMS_STATE | ATOM_S2_DFP2_DPMS_STATE | \
3101 ATOM_S2_DFP3_DPMS_STATE | ATOM_S2_DFP4_DPMS_STATE | \
3102 ATOM_S2_DFP5_DPMS_STATE)
3103
3104#define ATOM_S2_DEVICE_DPMS_STATE \
3105 (ATOM_S2_CRT1_DPMS_STATE + ATOM_S2_LCD1_DPMS_STATE + \
3106 ATOM_S2_TV1_DPMS_STATE + ATOM_S2_DFP_DPMS_STATE + \
3107 ATOM_S2_CRT2_DPMS_STATE + ATOM_S2_LCD2_DPMS_STATE + \
3108 ATOM_S2_TV2_DPMS_STATE + ATOM_S2_CV_DPMS_STATE)
3109
3110#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L
3111#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26
3112#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L
3113
3114#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L
3115
3116#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0
3117#define ATOM_S2_DISPLAY_ROTATION_90_DEGREE 0x1
3118#define ATOM_S2_DISPLAY_ROTATION_180_DEGREE 0x2
3119#define ATOM_S2_DISPLAY_ROTATION_270_DEGREE 0x3
3120#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
3121#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L
3122
3123/* Byte aligned defintion for BIOS usage */
3124#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F
3125#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
3126#define ATOM_S2_CRT1_DPMS_STATEb2 0x01
3127#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
3128#define ATOM_S2_TV1_DPMS_STATEb2 0x04
3129#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
3130#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
3131#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
3132#define ATOM_S2_TV2_DPMS_STATEb2 0x40
3133#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
3134#define ATOM_S2_CV_DPMS_STATEb3 0x01
3135#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
3136#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
3137#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
3138
3139#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF
3140#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C
3141#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGEb3 0x10
3142#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20
3143#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0
3144
3145/* BIOS_3_SCRATCH Definition */
3146#define ATOM_S3_CRT1_ACTIVE 0x00000001L
3147#define ATOM_S3_LCD1_ACTIVE 0x00000002L
3148#define ATOM_S3_TV1_ACTIVE 0x00000004L
3149#define ATOM_S3_DFP1_ACTIVE 0x00000008L
3150#define ATOM_S3_CRT2_ACTIVE 0x00000010L
3151#define ATOM_S3_LCD2_ACTIVE 0x00000020L
3152#define ATOM_S3_TV2_ACTIVE 0x00000040L
3153#define ATOM_S3_DFP2_ACTIVE 0x00000080L
3154#define ATOM_S3_CV_ACTIVE 0x00000100L
3155#define ATOM_S3_DFP3_ACTIVE 0x00000200L
3156#define ATOM_S3_DFP4_ACTIVE 0x00000400L
3157#define ATOM_S3_DFP5_ACTIVE 0x00000800L
3158
3159#define ATOM_S3_DEVICE_ACTIVE_MASK 0x000003FFL
3160
3161#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L
3162#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L
3163
3164#define ATOM_S3_CRT1_CRTC_ACTIVE 0x00010000L
3165#define ATOM_S3_LCD1_CRTC_ACTIVE 0x00020000L
3166#define ATOM_S3_TV1_CRTC_ACTIVE 0x00040000L
3167#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L
3168#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L
3169#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L
3170#define ATOM_S3_TV2_CRTC_ACTIVE 0x00400000L
3171#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L
3172#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L
3173#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L
3174#define ATOM_S3_DFP4_CRTC_ACTIVE 0x04000000L
3175#define ATOM_S3_DFP5_CRTC_ACTIVE 0x08000000L
3176
3177#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
3178#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L
3179#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L
3180#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L
3181
3182/* Byte aligned defintion for BIOS usage */
3183#define ATOM_S3_CRT1_ACTIVEb0 0x01
3184#define ATOM_S3_LCD1_ACTIVEb0 0x02
3185#define ATOM_S3_TV1_ACTIVEb0 0x04
3186#define ATOM_S3_DFP1_ACTIVEb0 0x08
3187#define ATOM_S3_CRT2_ACTIVEb0 0x10
3188#define ATOM_S3_LCD2_ACTIVEb0 0x20
3189#define ATOM_S3_TV2_ACTIVEb0 0x40
3190#define ATOM_S3_DFP2_ACTIVEb0 0x80
3191#define ATOM_S3_CV_ACTIVEb1 0x01
3192#define ATOM_S3_DFP3_ACTIVEb1 0x02
3193#define ATOM_S3_DFP4_ACTIVEb1 0x04
3194#define ATOM_S3_DFP5_ACTIVEb1 0x08
3195
3196#define ATOM_S3_ACTIVE_CRTC1w0 0xFFF
3197
3198#define ATOM_S3_CRT1_CRTC_ACTIVEb2 0x01
3199#define ATOM_S3_LCD1_CRTC_ACTIVEb2 0x02
3200#define ATOM_S3_TV1_CRTC_ACTIVEb2 0x04
3201#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08
3202#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10
3203#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20
3204#define ATOM_S3_TV2_CRTC_ACTIVEb2 0x40
3205#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80
3206#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01
3207#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02
3208#define ATOM_S3_DFP4_CRTC_ACTIVEb3 0x04
3209#define ATOM_S3_DFP5_CRTC_ACTIVEb3 0x08
3210
3211#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF
3212
3213#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20
3214#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
3215#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
3216
3217/* BIOS_4_SCRATCH Definition */
3218#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL
3219#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L
3220#define ATOM_S4_LCD1_REFRESH_SHIFT 8
3221
3222/* Byte aligned defintion for BIOS usage */
3223#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF
3224#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0
3225#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0
3226
3227/* BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! */
3228#define ATOM_S5_DOS_REQ_CRT1b0 0x01
3229#define ATOM_S5_DOS_REQ_LCD1b0 0x02
3230#define ATOM_S5_DOS_REQ_TV1b0 0x04
3231#define ATOM_S5_DOS_REQ_DFP1b0 0x08
3232#define ATOM_S5_DOS_REQ_CRT2b0 0x10
3233#define ATOM_S5_DOS_REQ_LCD2b0 0x20
3234#define ATOM_S5_DOS_REQ_TV2b0 0x40
3235#define ATOM_S5_DOS_REQ_DFP2b0 0x80
3236#define ATOM_S5_DOS_REQ_CVb1 0x01
3237#define ATOM_S5_DOS_REQ_DFP3b1 0x02
3238#define ATOM_S5_DOS_REQ_DFP4b1 0x04
3239#define ATOM_S5_DOS_REQ_DFP5b1 0x08
3240
3241#define ATOM_S5_DOS_REQ_DEVICEw0 0x03FF
3242
3243#define ATOM_S5_DOS_REQ_CRT1 0x0001
3244#define ATOM_S5_DOS_REQ_LCD1 0x0002
3245#define ATOM_S5_DOS_REQ_TV1 0x0004
3246#define ATOM_S5_DOS_REQ_DFP1 0x0008
3247#define ATOM_S5_DOS_REQ_CRT2 0x0010
3248#define ATOM_S5_DOS_REQ_LCD2 0x0020
3249#define ATOM_S5_DOS_REQ_TV2 0x0040
3250#define ATOM_S5_DOS_REQ_DFP2 0x0080
3251#define ATOM_S5_DOS_REQ_CV 0x0100
3252#define ATOM_S5_DOS_REQ_DFP3 0x0200
3253#define ATOM_S5_DOS_REQ_DFP4 0x0400
3254#define ATOM_S5_DOS_REQ_DFP5 0x0800
3255
3256#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0
3257#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0
3258#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0
3259#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1
3260#define ATOM_S5_DOS_FORCE_DEVICEw1 \
3261 (ATOM_S5_DOS_FORCE_CRT1b2 + ATOM_S5_DOS_FORCE_TV1b2 + \
3262 ATOM_S5_DOS_FORCE_CRT2b2 + (ATOM_S5_DOS_FORCE_CVb3 << 8))
3263
3264/* BIOS_6_SCRATCH Definition */
3265#define ATOM_S6_DEVICE_CHANGE 0x00000001L
3266#define ATOM_S6_SCALER_CHANGE 0x00000002L
3267#define ATOM_S6_LID_CHANGE 0x00000004L
3268#define ATOM_S6_DOCKING_CHANGE 0x00000008L
3269#define ATOM_S6_ACC_MODE 0x00000010L
3270#define ATOM_S6_EXT_DESKTOP_MODE 0x00000020L
3271#define ATOM_S6_LID_STATE 0x00000040L
3272#define ATOM_S6_DOCK_STATE 0x00000080L
3273#define ATOM_S6_CRITICAL_STATE 0x00000100L
3274#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L
3275#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L
3276#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L
3277#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L /* Normal expansion Request bit for LCD */
3278#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L /* Aspect ratio expansion Request bit for LCD */
3279
3280#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L /* This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion */
3281#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L /* This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion */
3282
3283#define ATOM_S6_ACC_REQ_CRT1 0x00010000L
3284#define ATOM_S6_ACC_REQ_LCD1 0x00020000L
3285#define ATOM_S6_ACC_REQ_TV1 0x00040000L
3286#define ATOM_S6_ACC_REQ_DFP1 0x00080000L
3287#define ATOM_S6_ACC_REQ_CRT2 0x00100000L
3288#define ATOM_S6_ACC_REQ_LCD2 0x00200000L
3289#define ATOM_S6_ACC_REQ_TV2 0x00400000L
3290#define ATOM_S6_ACC_REQ_DFP2 0x00800000L
3291#define ATOM_S6_ACC_REQ_CV 0x01000000L
3292#define ATOM_S6_ACC_REQ_DFP3 0x02000000L
3293#define ATOM_S6_ACC_REQ_DFP4 0x04000000L
3294#define ATOM_S6_ACC_REQ_DFP5 0x08000000L
3295
3296#define ATOM_S6_ACC_REQ_MASK 0x0FFF0000L
3297#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE 0x10000000L
3298#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH 0x20000000L
3299#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L
3300#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L
3301
3302/* Byte aligned defintion for BIOS usage */
3303#define ATOM_S6_DEVICE_CHANGEb0 0x01
3304#define ATOM_S6_SCALER_CHANGEb0 0x02
3305#define ATOM_S6_LID_CHANGEb0 0x04
3306#define ATOM_S6_DOCKING_CHANGEb0 0x08
3307#define ATOM_S6_ACC_MODEb0 0x10
3308#define ATOM_S6_EXT_DESKTOP_MODEb0 0x20
3309#define ATOM_S6_LID_STATEb0 0x40
3310#define ATOM_S6_DOCK_STATEb0 0x80
3311#define ATOM_S6_CRITICAL_STATEb1 0x01
3312#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02
3313#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04
3314#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08
3315#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10
3316#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20
3317
3318#define ATOM_S6_ACC_REQ_CRT1b2 0x01
3319#define ATOM_S6_ACC_REQ_LCD1b2 0x02
3320#define ATOM_S6_ACC_REQ_TV1b2 0x04
3321#define ATOM_S6_ACC_REQ_DFP1b2 0x08
3322#define ATOM_S6_ACC_REQ_CRT2b2 0x10
3323#define ATOM_S6_ACC_REQ_LCD2b2 0x20
3324#define ATOM_S6_ACC_REQ_TV2b2 0x40
3325#define ATOM_S6_ACC_REQ_DFP2b2 0x80
3326#define ATOM_S6_ACC_REQ_CVb3 0x01
3327#define ATOM_S6_ACC_REQ_DFP3b3 0x02
3328#define ATOM_S6_ACC_REQ_DFP4b3 0x04
3329#define ATOM_S6_ACC_REQ_DFP5b3 0x08
3330
3331#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0
3332#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10
3333#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCHb3 0x20
3334#define ATOM_S6_VRI_BRIGHTNESS_CHANGEb3 0x40
3335#define ATOM_S6_CONFIG_DISPLAY_CHANGEb3 0x80
3336
3337#define ATOM_S6_DEVICE_CHANGE_SHIFT 0
3338#define ATOM_S6_SCALER_CHANGE_SHIFT 1
3339#define ATOM_S6_LID_CHANGE_SHIFT 2
3340#define ATOM_S6_DOCKING_CHANGE_SHIFT 3
3341#define ATOM_S6_ACC_MODE_SHIFT 4
3342#define ATOM_S6_EXT_DESKTOP_MODE_SHIFT 5
3343#define ATOM_S6_LID_STATE_SHIFT 6
3344#define ATOM_S6_DOCK_STATE_SHIFT 7
3345#define ATOM_S6_CRITICAL_STATE_SHIFT 8
3346#define ATOM_S6_HW_I2C_BUSY_STATE_SHIFT 9
3347#define ATOM_S6_THERMAL_STATE_CHANGE_SHIFT 10
3348#define ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT 11
3349#define ATOM_S6_REQ_SCALER_SHIFT 12
3350#define ATOM_S6_REQ_SCALER_ARATIO_SHIFT 13
3351#define ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT 14
3352#define ATOM_S6_I2C_STATE_CHANGE_SHIFT 15
3353#define ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT 28
3354#define ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH_SHIFT 29
3355#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30
3356#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31
3357
3358/* BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! */
3359#define ATOM_S7_DOS_MODE_TYPEb0 0x03
3360#define ATOM_S7_DOS_MODE_VGAb0 0x00
3361#define ATOM_S7_DOS_MODE_VESAb0 0x01
3362#define ATOM_S7_DOS_MODE_EXTb0 0x02
3363#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0 0x0C
3364#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0 0xF0
3365#define ATOM_S7_DOS_8BIT_DAC_ENb1 0x01
3366#define ATOM_S7_DOS_MODE_NUMBERw1 0x0FFFF
3367
3368#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
3369
3370/* BIOS_8_SCRATCH Definition */
3371#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF
3372#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000
3373
3374#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0
3375#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16
3376
3377/* BIOS_9_SCRATCH Definition */
3378#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK
3379#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF
3380#endif
3381#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK
3382#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000
3383#endif
3384#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT
3385#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0
3386#endif
3387#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT
3388#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16
3389#endif
3390
3391#define ATOM_FLAG_SET 0x20
3392#define ATOM_FLAG_CLEAR 0
3393#define CLEAR_ATOM_S6_ACC_MODE \
3394 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3395 ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
3396#define SET_ATOM_S6_DEVICE_CHANGE \
3397 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3398 ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
3399#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE \
3400 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3401 ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
3402#define SET_ATOM_S6_SCALER_CHANGE \
3403 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3404 ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
3405#define SET_ATOM_S6_LID_CHANGE \
3406 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3407 ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
3408
3409#define SET_ATOM_S6_LID_STATE \
3410 ((ATOM_ACC_CHANGE_INFO_DEF << 8) |\
3411 ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
3412#define CLEAR_ATOM_S6_LID_STATE \
3413 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3414 ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
3415
3416#define SET_ATOM_S6_DOCK_CHANGE \
3417 ((ATOM_ACC_CHANGE_INFO_DEF << 8)| \
3418 ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
3419#define SET_ATOM_S6_DOCK_STATE \
3420 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3421 ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
3422#define CLEAR_ATOM_S6_DOCK_STATE \
3423 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3424 ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
3425
3426#define SET_ATOM_S6_THERMAL_STATE_CHANGE \
3427 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3428 ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
3429#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE \
3430 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3431 ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
3432#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS \
3433 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3434 ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
3435
3436#define SET_ATOM_S6_CRITICAL_STATE \
3437 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3438 ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
3439#define CLEAR_ATOM_S6_CRITICAL_STATE \
3440 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3441 ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
3442
3443#define SET_ATOM_S6_REQ_SCALER \
3444 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3445 ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
3446#define CLEAR_ATOM_S6_REQ_SCALER \
3447 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3448 ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
3449
3450#define SET_ATOM_S6_REQ_SCALER_ARATIO \
3451 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3452 ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
3453#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO \
3454 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3455 ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
3456
3457#define SET_ATOM_S6_I2C_STATE_CHANGE \
3458 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3459 ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
3460
3461#define SET_ATOM_S6_DISPLAY_STATE_CHANGE \
3462 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3463 ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
3464
3465#define SET_ATOM_S6_DEVICE_RECONFIG \
3466 ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
3467 ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
3468#define CLEAR_ATOM_S0_LCD1 \
3469 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 ) | \
3470 ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
3471#define SET_ATOM_S7_DOS_8BIT_DAC_EN \
3472 ((ATOM_DOS_MODE_INFO_DEF << 8) | \
3473 ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
3474#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN \
3475 ((ATOM_DOS_MODE_INFO_DEF << 8) | \
3476 ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
3477
3478/****************************************************************************/
3479/* Portion II: Definitinos only used in Driver */
3480/****************************************************************************/
3481
3482/* Macros used by driver */
3483
3484#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char *)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES *)0)->FieldName)-(char *)0)/sizeof(USHORT))
3485
3486#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
3487#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
3488
3489#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION
3490#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION
3491
3492/****************************************************************************/
3493/* Portion III: Definitinos only used in VBIOS */
3494/****************************************************************************/
3495#define ATOM_DAC_SRC 0x80
3496#define ATOM_SRC_DAC1 0
3497#define ATOM_SRC_DAC2 0x80
3498
3499#ifdef UEFI_BUILD
3500#define USHORT UTEMP
3501#endif
3502
3503typedef struct _MEMORY_PLLINIT_PARAMETERS {
3504 ULONG ulTargetMemoryClock; /* In 10Khz unit */
3505 UCHAR ucAction; /* not define yet */
3506 UCHAR ucFbDiv_Hi; /* Fbdiv Hi byte */
3507 UCHAR ucFbDiv; /* FB value */
3508 UCHAR ucPostDiv; /* Post div */
3509} MEMORY_PLLINIT_PARAMETERS;
3510
3511#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS
3512
3513#define GPIO_PIN_WRITE 0x01
3514#define GPIO_PIN_READ 0x00
3515
3516typedef struct _GPIO_PIN_CONTROL_PARAMETERS {
3517 UCHAR ucGPIO_ID; /* return value, read from GPIO pins */
3518 UCHAR ucGPIOBitShift; /* define which bit in uGPIOBitVal need to be update */
3519 UCHAR ucGPIOBitVal; /* Set/Reset corresponding bit defined in ucGPIOBitMask */
3520 UCHAR ucAction; /* =GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write */
3521} GPIO_PIN_CONTROL_PARAMETERS;
3522
3523typedef struct _ENABLE_SCALER_PARAMETERS {
3524 UCHAR ucScaler; /* ATOM_SCALER1, ATOM_SCALER2 */
3525 UCHAR ucEnable; /* ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION */
3526 UCHAR ucTVStandard; /* */
3527 UCHAR ucPadding[1];
3528} ENABLE_SCALER_PARAMETERS;
3529#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS
3530
3531/* ucEnable: */
3532#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0
3533#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1
3534#define SCALER_ENABLE_2TAP_ALPHA_MODE 2
3535#define SCALER_ENABLE_MULTITAP_MODE 3
3536
3537typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS {
3538 ULONG usHWIconHorzVertPosn; /* Hardware Icon Vertical position */
3539 UCHAR ucHWIconVertOffset; /* Hardware Icon Vertical offset */
3540 UCHAR ucHWIconHorzOffset; /* Hardware Icon Horizontal offset */
3541 UCHAR ucSelection; /* ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 */
3542 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
3543} ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
3544
3545typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION {
3546 ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon;
3547 ENABLE_CRTC_PARAMETERS sReserved;
3548} ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
3549
3550typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS {
3551 USHORT usHight; /* Image Hight */
3552 USHORT usWidth; /* Image Width */
3553 UCHAR ucSurface; /* Surface 1 or 2 */
3554 UCHAR ucPadding[3];
3555} ENABLE_GRAPH_SURFACE_PARAMETERS;
3556
3557typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 {
3558 USHORT usHight; /* Image Hight */
3559 USHORT usWidth; /* Image Width */
3560 UCHAR ucSurface; /* Surface 1 or 2 */
3561 UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
3562 UCHAR ucPadding[2];
3563} ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
3564
3565typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION {
3566 ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
3567 ENABLE_YUV_PS_ALLOCATION sReserved; /* Don't set this one */
3568} ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
3569
3570typedef struct _MEMORY_CLEAN_UP_PARAMETERS {
3571 USHORT usMemoryStart; /* in 8Kb boundry, offset from memory base address */
3572 USHORT usMemorySize; /* 8Kb blocks aligned */
3573} MEMORY_CLEAN_UP_PARAMETERS;
3574#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
3575
3576typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS {
3577 USHORT usX_Size; /* When use as input parameter, usX_Size indicates which CRTC */
3578 USHORT usY_Size;
3579} GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
3580
3581typedef struct _INDIRECT_IO_ACCESS {
3582 ATOM_COMMON_TABLE_HEADER sHeader;
3583 UCHAR IOAccessSequence[256];
3584} INDIRECT_IO_ACCESS;
3585
3586#define INDIRECT_READ 0x00
3587#define INDIRECT_WRITE 0x80
3588
3589#define INDIRECT_IO_MM 0
3590#define INDIRECT_IO_PLL 1
3591#define INDIRECT_IO_MC 2
3592#define INDIRECT_IO_PCIE 3
3593#define INDIRECT_IO_PCIEP 4
3594#define INDIRECT_IO_NBMISC 5
3595
3596#define INDIRECT_IO_PLL_READ INDIRECT_IO_PLL | INDIRECT_READ
3597#define INDIRECT_IO_PLL_WRITE INDIRECT_IO_PLL | INDIRECT_WRITE
3598#define INDIRECT_IO_MC_READ INDIRECT_IO_MC | INDIRECT_READ
3599#define INDIRECT_IO_MC_WRITE INDIRECT_IO_MC | INDIRECT_WRITE
3600#define INDIRECT_IO_PCIE_READ INDIRECT_IO_PCIE | INDIRECT_READ
3601#define INDIRECT_IO_PCIE_WRITE INDIRECT_IO_PCIE | INDIRECT_WRITE
3602#define INDIRECT_IO_PCIEP_READ INDIRECT_IO_PCIEP | INDIRECT_READ
3603#define INDIRECT_IO_PCIEP_WRITE INDIRECT_IO_PCIEP | INDIRECT_WRITE
3604#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ
3605#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE
3606
3607typedef struct _ATOM_OEM_INFO {
3608 ATOM_COMMON_TABLE_HEADER sHeader;
3609 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
3610} ATOM_OEM_INFO;
3611
3612typedef struct _ATOM_TV_MODE {
3613 UCHAR ucVMode_Num; /* Video mode number */
3614 UCHAR ucTV_Mode_Num; /* Internal TV mode number */
3615} ATOM_TV_MODE;
3616
3617typedef struct _ATOM_BIOS_INT_TVSTD_MODE {
3618 ATOM_COMMON_TABLE_HEADER sHeader;
3619 USHORT usTV_Mode_LUT_Offset; /* Pointer to standard to internal number conversion table */
3620 USHORT usTV_FIFO_Offset; /* Pointer to FIFO entry table */
3621 USHORT usNTSC_Tbl_Offset; /* Pointer to SDTV_Mode_NTSC table */
3622 USHORT usPAL_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */
3623 USHORT usCV_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */
3624} ATOM_BIOS_INT_TVSTD_MODE;
3625
3626typedef struct _ATOM_TV_MODE_SCALER_PTR {
3627 USHORT ucFilter0_Offset; /* Pointer to filter format 0 coefficients */
3628 USHORT usFilter1_Offset; /* Pointer to filter format 0 coefficients */
3629 UCHAR ucTV_Mode_Num;
3630} ATOM_TV_MODE_SCALER_PTR;
3631
3632typedef struct _ATOM_STANDARD_VESA_TIMING {
3633 ATOM_COMMON_TABLE_HEADER sHeader;
3634 ATOM_DTD_FORMAT aModeTimings[16]; /* 16 is not the real array number, just for initial allocation */
3635} ATOM_STANDARD_VESA_TIMING;
3636
3637typedef struct _ATOM_STD_FORMAT {
3638 USHORT usSTD_HDisp;
3639 USHORT usSTD_VDisp;
3640 USHORT usSTD_RefreshRate;
3641 USHORT usReserved;
3642} ATOM_STD_FORMAT;
3643
3644typedef struct _ATOM_VESA_TO_EXTENDED_MODE {
3645 USHORT usVESA_ModeNumber;
3646 USHORT usExtendedModeNumber;
3647} ATOM_VESA_TO_EXTENDED_MODE;
3648
3649typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT {
3650 ATOM_COMMON_TABLE_HEADER sHeader;
3651 ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
3652} ATOM_VESA_TO_INTENAL_MODE_LUT;
3653
3654/*************** ATOM Memory Related Data Structure ***********************/
3655typedef struct _ATOM_MEMORY_VENDOR_BLOCK {
3656 UCHAR ucMemoryType;
3657 UCHAR ucMemoryVendor;
3658 UCHAR ucAdjMCId;
3659 UCHAR ucDynClkId;
3660 ULONG ulDllResetClkRange;
3661} ATOM_MEMORY_VENDOR_BLOCK;
3662
3663typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG {
3664#if ATOM_BIG_ENDIAN
3665 ULONG ucMemBlkId:8;
3666 ULONG ulMemClockRange:24;
3667#else
3668 ULONG ulMemClockRange:24;
3669 ULONG ucMemBlkId:8;
3670#endif
3671} ATOM_MEMORY_SETTING_ID_CONFIG;
3672
3673typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS {
3674 ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
3675 ULONG ulAccess;
3676} ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
3677
3678typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK {
3679 ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
3680 ULONG aulMemData[1];
3681} ATOM_MEMORY_SETTING_DATA_BLOCK;
3682
3683typedef struct _ATOM_INIT_REG_INDEX_FORMAT {
3684 USHORT usRegIndex; /* MC register index */
3685 UCHAR ucPreRegDataLength; /* offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf */
3686} ATOM_INIT_REG_INDEX_FORMAT;
3687
3688typedef struct _ATOM_INIT_REG_BLOCK {
3689 USHORT usRegIndexTblSize; /* size of asRegIndexBuf */
3690 USHORT usRegDataBlkSize; /* size of ATOM_MEMORY_SETTING_DATA_BLOCK */
3691 ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1];
3692 ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1];
3693} ATOM_INIT_REG_BLOCK;
3694
3695#define END_OF_REG_INDEX_BLOCK 0x0ffff
3696#define END_OF_REG_DATA_BLOCK 0x00000000
3697#define ATOM_INIT_REG_MASK_FLAG 0x80
3698#define CLOCK_RANGE_HIGHEST 0x00ffffff
3699
3700#define VALUE_DWORD SIZEOF ULONG
3701#define VALUE_SAME_AS_ABOVE 0
3702#define VALUE_MASK_DWORD 0x84
3703
3704#define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1)
3705#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1)
3706#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1)
3707
3708typedef struct _ATOM_MC_INIT_PARAM_TABLE {
3709 ATOM_COMMON_TABLE_HEADER sHeader;
3710 USHORT usAdjustARB_SEQDataOffset;
3711 USHORT usMCInitMemTypeTblOffset;
3712 USHORT usMCInitCommonTblOffset;
3713 USHORT usMCInitPowerDownTblOffset;
3714 ULONG ulARB_SEQDataBuf[32];
3715 ATOM_INIT_REG_BLOCK asMCInitMemType;
3716 ATOM_INIT_REG_BLOCK asMCInitCommon;
3717} ATOM_MC_INIT_PARAM_TABLE;
3718
3719#define _4Mx16 0x2
3720#define _4Mx32 0x3
3721#define _8Mx16 0x12
3722#define _8Mx32 0x13
3723#define _16Mx16 0x22
3724#define _16Mx32 0x23
3725#define _32Mx16 0x32
3726#define _32Mx32 0x33
3727#define _64Mx8 0x41
3728#define _64Mx16 0x42
3729
3730#define SAMSUNG 0x1
3731#define INFINEON 0x2
3732#define ELPIDA 0x3
3733#define ETRON 0x4
3734#define NANYA 0x5
3735#define HYNIX 0x6
3736#define MOSEL 0x7
3737#define WINBOND 0x8
3738#define ESMT 0x9
3739#define MICRON 0xF
3740
3741#define QIMONDA INFINEON
3742#define PROMOS MOSEL
3743
3744/* ///////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// */
3745
3746#define UCODE_ROM_START_ADDRESS 0x1c000
3747#define UCODE_SIGNATURE 0x4375434d /* 'MCuC' - MC uCode */
3748
3749/* uCode block header for reference */
3750
3751typedef struct _MCuCodeHeader {
3752 ULONG ulSignature;
3753 UCHAR ucRevision;
3754 UCHAR ucChecksum;
3755 UCHAR ucReserved1;
3756 UCHAR ucReserved2;
3757 USHORT usParametersLength;
3758 USHORT usUCodeLength;
3759 USHORT usReserved1;
3760 USHORT usReserved2;
3761} MCuCodeHeader;
3762
3763/* //////////////////////////////////////////////////////////////////////////////// */
3764
3765#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16
3766
3767#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF
3768typedef struct _ATOM_VRAM_MODULE_V1 {
3769 ULONG ulReserved;
3770 USHORT usEMRSValue;
3771 USHORT usMRSValue;
3772 USHORT usReserved;
3773 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
3774 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; */
3775 UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender */
3776 UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */
3777 UCHAR ucRow; /* Number of Row,in power of 2; */
3778 UCHAR ucColumn; /* Number of Column,in power of 2; */
3779 UCHAR ucBank; /* Nunber of Bank; */
3780 UCHAR ucRank; /* Number of Rank, in power of 2 */
3781 UCHAR ucChannelNum; /* Number of channel; */
3782 UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */
3783 UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */
3784 UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */
3785 UCHAR ucReserved[2];
3786} ATOM_VRAM_MODULE_V1;
3787
3788typedef struct _ATOM_VRAM_MODULE_V2 {
3789 ULONG ulReserved;
3790 ULONG ulFlags; /* To enable/disable functionalities based on memory type */
3791 ULONG ulEngineClock; /* Override of default engine clock for particular memory type */
3792 ULONG ulMemoryClock; /* Override of default memory clock for particular memory type */
3793 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
3794 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
3795 USHORT usEMRSValue;
3796 USHORT usMRSValue;
3797 USHORT usReserved;
3798 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
3799 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */
3800 UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */
3801 UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */
3802 UCHAR ucRow; /* Number of Row,in power of 2; */
3803 UCHAR ucColumn; /* Number of Column,in power of 2; */
3804 UCHAR ucBank; /* Nunber of Bank; */
3805 UCHAR ucRank; /* Number of Rank, in power of 2 */
3806 UCHAR ucChannelNum; /* Number of channel; */
3807 UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */
3808 UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */
3809 UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */
3810 UCHAR ucRefreshRateFactor;
3811 UCHAR ucReserved[3];
3812} ATOM_VRAM_MODULE_V2;
3813
3814typedef struct _ATOM_MEMORY_TIMING_FORMAT {
3815 ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */
3816 union {
3817 USHORT usMRS; /* mode register */
3818 USHORT usDDR3_MR0;
3819 };
3820 union {
3821 USHORT usEMRS; /* extended mode register */
3822 USHORT usDDR3_MR1;
3823 };
3824 UCHAR ucCL; /* CAS latency */
3825 UCHAR ucWL; /* WRITE Latency */
3826 UCHAR uctRAS; /* tRAS */
3827 UCHAR uctRC; /* tRC */
3828 UCHAR uctRFC; /* tRFC */
3829 UCHAR uctRCDR; /* tRCDR */
3830 UCHAR uctRCDW; /* tRCDW */
3831 UCHAR uctRP; /* tRP */
3832 UCHAR uctRRD; /* tRRD */
3833 UCHAR uctWR; /* tWR */
3834 UCHAR uctWTR; /* tWTR */
3835 UCHAR uctPDIX; /* tPDIX */
3836 UCHAR uctFAW; /* tFAW */
3837 UCHAR uctAOND; /* tAOND */
3838 union {
3839 struct {
3840 UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */
3841 UCHAR ucReserved;
3842 };
3843 USHORT usDDR3_MR2;
3844 };
3845} ATOM_MEMORY_TIMING_FORMAT;
3846
3847typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 {
3848 ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */
3849 USHORT usMRS; /* mode register */
3850 USHORT usEMRS; /* extended mode register */
3851 UCHAR ucCL; /* CAS latency */
3852 UCHAR ucWL; /* WRITE Latency */
3853 UCHAR uctRAS; /* tRAS */
3854 UCHAR uctRC; /* tRC */
3855 UCHAR uctRFC; /* tRFC */
3856 UCHAR uctRCDR; /* tRCDR */
3857 UCHAR uctRCDW; /* tRCDW */
3858 UCHAR uctRP; /* tRP */
3859 UCHAR uctRRD; /* tRRD */
3860 UCHAR uctWR; /* tWR */
3861 UCHAR uctWTR; /* tWTR */
3862 UCHAR uctPDIX; /* tPDIX */
3863 UCHAR uctFAW; /* tFAW */
3864 UCHAR uctAOND; /* tAOND */
3865 UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */
3866/* ///////////////////////GDDR parameters/////////////////////////////////// */
3867 UCHAR uctCCDL; /* */
3868 UCHAR uctCRCRL; /* */
3869 UCHAR uctCRCWL; /* */
3870 UCHAR uctCKE; /* */
3871 UCHAR uctCKRSE; /* */
3872 UCHAR uctCKRSX; /* */
3873 UCHAR uctFAW32; /* */
3874 UCHAR ucReserved1; /* */
3875 UCHAR ucReserved2; /* */
3876 UCHAR ucTerminator;
3877} ATOM_MEMORY_TIMING_FORMAT_V1;
3878
3879typedef struct _ATOM_MEMORY_FORMAT {
3880 ULONG ulDllDisClock; /* memory DLL will be disable when target memory clock is below this clock */
3881 union {
3882 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
3883 USHORT usDDR3_Reserved; /* Not used for DDR3 memory */
3884 };
3885 union {
3886 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
3887 USHORT usDDR3_MR3; /* Used for DDR3 memory */
3888 };
3889 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */
3890 UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */
3891 UCHAR ucRow; /* Number of Row,in power of 2; */
3892 UCHAR ucColumn; /* Number of Column,in power of 2; */
3893 UCHAR ucBank; /* Nunber of Bank; */
3894 UCHAR ucRank; /* Number of Rank, in power of 2 */
3895 UCHAR ucBurstSize; /* burst size, 0= burst size=4 1= burst size=8 */
3896 UCHAR ucDllDisBit; /* position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) */
3897 UCHAR ucRefreshRateFactor; /* memory refresh rate in unit of ms */
3898 UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
3899 UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
3900 UCHAR ucMemAttrib; /* Memory Device Addribute, like RDBI/WDBI etc */
3901 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
3902} ATOM_MEMORY_FORMAT;
3903
3904typedef struct _ATOM_VRAM_MODULE_V3 {
3905 ULONG ulChannelMapCfg; /* board dependent paramenter:Channel combination */
3906 USHORT usSize; /* size of ATOM_VRAM_MODULE_V3 */
3907 USHORT usDefaultMVDDQ; /* board dependent parameter:Default Memory Core Voltage */
3908 USHORT usDefaultMVDDC; /* board dependent parameter:Default Memory IO Voltage */
3909 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
3910 UCHAR ucChannelNum; /* board dependent parameter:Number of channel; */
3911 UCHAR ucChannelSize; /* board dependent parameter:32bit or 64bit */
3912 UCHAR ucVREFI; /* board dependnt parameter: EXT or INT +160mv to -140mv */
3913 UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
3914 UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
3915 ATOM_MEMORY_FORMAT asMemory; /* describ all of video memory parameters from memory spec */
3916} ATOM_VRAM_MODULE_V3;
3917
3918/* ATOM_VRAM_MODULE_V3.ucNPL_RT */
3919#define NPL_RT_MASK 0x0f
3920#define BATTERY_ODT_MASK 0xc0
3921
3922#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3
3923
3924typedef struct _ATOM_VRAM_MODULE_V4 {
3925 ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */
3926 USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */
3927 USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
3928 /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */
3929 USHORT usReserved;
3930 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
3931 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */
3932 UCHAR ucChannelNum; /* Number of channels present in this module config */
3933 UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */
3934 UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
3935 UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
3936 UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */
3937 UCHAR ucVREFI; /* board dependent parameter */
3938 UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
3939 UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
3940 UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
3941 /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */
3942 UCHAR ucReserved[3];
3943
3944/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */
3945 union {
3946 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
3947 USHORT usDDR3_Reserved;
3948 };
3949 union {
3950 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
3951 USHORT usDDR3_MR3; /* Used for DDR3 memory */
3952 };
3953 UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */
3954 UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */
3955 UCHAR ucReserved2[2];
3956 ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
3957} ATOM_VRAM_MODULE_V4;
3958
3959#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3
3960#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1
3961#define VRAM_MODULE_V4_MISC_BL_MASK 0x4
3962#define VRAM_MODULE_V4_MISC_BL8 0x4
3963#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10
3964
3965typedef struct _ATOM_VRAM_MODULE_V5 {
3966 ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */
3967 USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */
3968 USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
3969 /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */
3970 USHORT usReserved;
3971 UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
3972 UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */
3973 UCHAR ucChannelNum; /* Number of channels present in this module config */
3974 UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */
3975 UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
3976 UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
3977 UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */
3978 UCHAR ucVREFI; /* board dependent parameter */
3979 UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
3980 UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
3981 UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
3982 /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */
3983 UCHAR ucReserved[3];
3984
3985/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */
3986 USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
3987 USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
3988 UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */
3989 UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */
3990 UCHAR ucFIFODepth; /* FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth */
3991 UCHAR ucCDR_Bandwidth; /* [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth */
3992 ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
3993} ATOM_VRAM_MODULE_V5;
3994
3995typedef struct _ATOM_VRAM_INFO_V2 {
3996 ATOM_COMMON_TABLE_HEADER sHeader;
3997 UCHAR ucNumOfVRAMModule;
3998 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
3999} ATOM_VRAM_INFO_V2;
4000
4001typedef struct _ATOM_VRAM_INFO_V3 {
4002 ATOM_COMMON_TABLE_HEADER sHeader;
4003 USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
4004 USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
4005 USHORT usRerseved;
4006 UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */
4007 UCHAR ucNumOfVRAMModule;
4008 ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
4009 ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */
4010 /* ATOM_INIT_REG_BLOCK aMemAdjust; */
4011} ATOM_VRAM_INFO_V3;
4012
4013#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3
4014
4015typedef struct _ATOM_VRAM_INFO_V4 {
4016 ATOM_COMMON_TABLE_HEADER sHeader;
4017 USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
4018 USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
4019 USHORT usRerseved;
4020 UCHAR ucMemDQ7_0ByteRemap; /* DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 */
4021 ULONG ulMemDQ7_0BitRemap; /* each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] */
4022 UCHAR ucReservde[4];
4023 UCHAR ucNumOfVRAMModule;
4024 ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
4025 ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */
4026 /* ATOM_INIT_REG_BLOCK aMemAdjust; */
4027} ATOM_VRAM_INFO_V4;
4028
4029typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO {
4030 ATOM_COMMON_TABLE_HEADER sHeader;
4031 UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */
4032} ATOM_VRAM_GPIO_DETECTION_INFO;
4033
4034typedef struct _ATOM_MEMORY_TRAINING_INFO {
4035 ATOM_COMMON_TABLE_HEADER sHeader;
4036 UCHAR ucTrainingLoop;
4037 UCHAR ucReserved[3];
4038 ATOM_INIT_REG_BLOCK asMemTrainingSetting;
4039} ATOM_MEMORY_TRAINING_INFO;
4040
4041typedef struct SW_I2C_CNTL_DATA_PARAMETERS {
4042 UCHAR ucControl;
4043 UCHAR ucData;
4044 UCHAR ucSatus;
4045 UCHAR ucTemp;
4046} SW_I2C_CNTL_DATA_PARAMETERS;
4047
4048#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS
4049
4050typedef struct _SW_I2C_IO_DATA_PARAMETERS {
4051 USHORT GPIO_Info;
4052 UCHAR ucAct;
4053 UCHAR ucData;
4054} SW_I2C_IO_DATA_PARAMETERS;
4055
4056#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS
4057
4058/****************************SW I2C CNTL DEFINITIONS**********************/
4059#define SW_I2C_IO_RESET 0
4060#define SW_I2C_IO_GET 1
4061#define SW_I2C_IO_DRIVE 2
4062#define SW_I2C_IO_SET 3
4063#define SW_I2C_IO_START 4
4064
4065#define SW_I2C_IO_CLOCK 0
4066#define SW_I2C_IO_DATA 0x80
4067
4068#define SW_I2C_IO_ZERO 0
4069#define SW_I2C_IO_ONE 0x100
4070
4071#define SW_I2C_CNTL_READ 0
4072#define SW_I2C_CNTL_WRITE 1
4073#define SW_I2C_CNTL_START 2
4074#define SW_I2C_CNTL_STOP 3
4075#define SW_I2C_CNTL_OPEN 4
4076#define SW_I2C_CNTL_CLOSE 5
4077#define SW_I2C_CNTL_WRITE1BIT 6
4078
4079/* ==============================VESA definition Portion=============================== */
4080#define VESA_OEM_PRODUCT_REV '01.00'
4081#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB /* refer to VBE spec p.32, no TTY support */
4082#define VESA_MODE_WIN_ATTRIBUTE 7
4083#define VESA_WIN_SIZE 64
4084
4085typedef struct _PTR_32_BIT_STRUCTURE {
4086 USHORT Offset16;
4087 USHORT Segment16;
4088} PTR_32_BIT_STRUCTURE;
4089
4090typedef union _PTR_32_BIT_UNION {
4091 PTR_32_BIT_STRUCTURE SegmentOffset;
4092 ULONG Ptr32_Bit;
4093} PTR_32_BIT_UNION;
4094
4095typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE {
4096 UCHAR VbeSignature[4];
4097 USHORT VbeVersion;
4098 PTR_32_BIT_UNION OemStringPtr;
4099 UCHAR Capabilities[4];
4100 PTR_32_BIT_UNION VideoModePtr;
4101 USHORT TotalMemory;
4102} VBE_1_2_INFO_BLOCK_UPDATABLE;
4103
4104typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE {
4105 VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock;
4106 USHORT OemSoftRev;
4107 PTR_32_BIT_UNION OemVendorNamePtr;
4108 PTR_32_BIT_UNION OemProductNamePtr;
4109 PTR_32_BIT_UNION OemProductRevPtr;
4110} VBE_2_0_INFO_BLOCK_UPDATABLE;
4111
4112typedef union _VBE_VERSION_UNION {
4113 VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock;
4114 VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock;
4115} VBE_VERSION_UNION;
4116
4117typedef struct _VBE_INFO_BLOCK {
4118 VBE_VERSION_UNION UpdatableVBE_Info;
4119 UCHAR Reserved[222];
4120 UCHAR OemData[256];
4121} VBE_INFO_BLOCK;
4122
4123typedef struct _VBE_FP_INFO {
4124 USHORT HSize;
4125 USHORT VSize;
4126 USHORT FPType;
4127 UCHAR RedBPP;
4128 UCHAR GreenBPP;
4129 UCHAR BlueBPP;
4130 UCHAR ReservedBPP;
4131 ULONG RsvdOffScrnMemSize;
4132 ULONG RsvdOffScrnMEmPtr;
4133 UCHAR Reserved[14];
4134} VBE_FP_INFO;
4135
4136typedef struct _VESA_MODE_INFO_BLOCK {
4137/* Mandatory information for all VBE revisions */
4138 USHORT ModeAttributes; /* dw ? ; mode attributes */
4139 UCHAR WinAAttributes; /* db ? ; window A attributes */
4140 UCHAR WinBAttributes; /* db ? ; window B attributes */
4141 USHORT WinGranularity; /* dw ? ; window granularity */
4142 USHORT WinSize; /* dw ? ; window size */
4143 USHORT WinASegment; /* dw ? ; window A start segment */
4144 USHORT WinBSegment; /* dw ? ; window B start segment */
4145 ULONG WinFuncPtr; /* dd ? ; real mode pointer to window function */
4146 USHORT BytesPerScanLine; /* dw ? ; bytes per scan line */
4147
4148/* ; Mandatory information for VBE 1.2 and above */
4149 USHORT XResolution; /* dw ? ; horizontal resolution in pixels or characters */
4150 USHORT YResolution; /* dw ? ; vertical resolution in pixels or characters */
4151 UCHAR XCharSize; /* db ? ; character cell width in pixels */
4152 UCHAR YCharSize; /* db ? ; character cell height in pixels */
4153 UCHAR NumberOfPlanes; /* db ? ; number of memory planes */
4154 UCHAR BitsPerPixel; /* db ? ; bits per pixel */
4155 UCHAR NumberOfBanks; /* db ? ; number of banks */
4156 UCHAR MemoryModel; /* db ? ; memory model type */
4157 UCHAR BankSize; /* db ? ; bank size in KB */
4158 UCHAR NumberOfImagePages; /* db ? ; number of images */
4159 UCHAR ReservedForPageFunction; /* db 1 ; reserved for page function */
4160
4161/* ; Direct Color fields(required for direct/6 and YUV/7 memory models) */
4162 UCHAR RedMaskSize; /* db ? ; size of direct color red mask in bits */
4163 UCHAR RedFieldPosition; /* db ? ; bit position of lsb of red mask */
4164 UCHAR GreenMaskSize; /* db ? ; size of direct color green mask in bits */
4165 UCHAR GreenFieldPosition; /* db ? ; bit position of lsb of green mask */
4166 UCHAR BlueMaskSize; /* db ? ; size of direct color blue mask in bits */
4167 UCHAR BlueFieldPosition; /* db ? ; bit position of lsb of blue mask */
4168 UCHAR RsvdMaskSize; /* db ? ; size of direct color reserved mask in bits */
4169 UCHAR RsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask */
4170 UCHAR DirectColorModeInfo; /* db ? ; direct color mode attributes */
4171
4172/* ; Mandatory information for VBE 2.0 and above */
4173 ULONG PhysBasePtr; /* dd ? ; physical address for flat memory frame buffer */
4174 ULONG Reserved_1; /* dd 0 ; reserved - always set to 0 */
4175 USHORT Reserved_2; /* dw 0 ; reserved - always set to 0 */
4176
4177/* ; Mandatory information for VBE 3.0 and above */
4178 USHORT LinBytesPerScanLine; /* dw ? ; bytes per scan line for linear modes */
4179 UCHAR BnkNumberOfImagePages; /* db ? ; number of images for banked modes */
4180 UCHAR LinNumberOfImagPages; /* db ? ; number of images for linear modes */
4181 UCHAR LinRedMaskSize; /* db ? ; size of direct color red mask(linear modes) */
4182 UCHAR LinRedFieldPosition; /* db ? ; bit position of lsb of red mask(linear modes) */
4183 UCHAR LinGreenMaskSize; /* db ? ; size of direct color green mask(linear modes) */
4184 UCHAR LinGreenFieldPosition; /* db ? ; bit position of lsb of green mask(linear modes) */
4185 UCHAR LinBlueMaskSize; /* db ? ; size of direct color blue mask(linear modes) */
4186 UCHAR LinBlueFieldPosition; /* db ? ; bit position of lsb of blue mask(linear modes) */
4187 UCHAR LinRsvdMaskSize; /* db ? ; size of direct color reserved mask(linear modes) */
4188 UCHAR LinRsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask(linear modes) */
4189 ULONG MaxPixelClock; /* dd ? ; maximum pixel clock(in Hz) for graphics mode */
4190 UCHAR Reserved; /* db 190 dup (0) */
4191} VESA_MODE_INFO_BLOCK;
4192
4193/* BIOS function CALLS */
4194#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 /* ATI Extended Function code */
4195#define ATOM_BIOS_FUNCTION_COP_MODE 0x00
4196#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04
4197#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05
4198#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06
4199#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B
4200#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E
4201#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F
4202#define ATOM_BIOS_FUNCTION_STV_STD 0x16
4203#define ATOM_BIOS_FUNCTION_DEVICE_DET 0x17
4204#define ATOM_BIOS_FUNCTION_DEVICE_SWITCH 0x18
4205
4206#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82
4207#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83
4208#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84
4209#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A
4210#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B
4211#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 /* Sub function 80 */
4212#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 /* Sub function 80 */
4213
4214#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D
4215#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E
4216#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F
4217#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 /* Sub function 03 */
4218#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 /* Sub function 7 */
4219#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 /* Notify caller the current thermal state */
4220#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 /* Notify caller the current critical state */
4221#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 /* Sub function 85 */
4222#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900 /* Sub function 89 */
4223#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 /* Notify caller that ADC is supported */
4224
4225#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 /* Set DPMS */
4226#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 /* BL: Sub function 01 */
4227#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 /* BL: Sub function 02 */
4228#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 /* BH Parameter for DPMS ON. */
4229#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 /* BH Parameter for DPMS STANDBY */
4230#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 /* BH Parameter for DPMS SUSPEND */
4231#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 /* BH Parameter for DPMS OFF */
4232#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 /* BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) */
4233
4234#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L
4235#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L
4236#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL
4237
4238/* structure used for VBIOS only */
4239
4240/* DispOutInfoTable */
4241typedef struct _ASIC_TRANSMITTER_INFO {
4242 USHORT usTransmitterObjId;
4243 USHORT usSupportDevice;
4244 UCHAR ucTransmitterCmdTblId;
4245 UCHAR ucConfig;
4246 UCHAR ucEncoderID; /* available 1st encoder ( default ) */
4247 UCHAR ucOptionEncoderID; /* available 2nd encoder ( optional ) */
4248 UCHAR uc2ndEncoderID;
4249 UCHAR ucReserved;
4250} ASIC_TRANSMITTER_INFO;
4251
4252typedef struct _ASIC_ENCODER_INFO {
4253 UCHAR ucEncoderID;
4254 UCHAR ucEncoderConfig;
4255 USHORT usEncoderCmdTblId;
4256} ASIC_ENCODER_INFO;
4257
4258typedef struct _ATOM_DISP_OUT_INFO {
4259 ATOM_COMMON_TABLE_HEADER sHeader;
4260 USHORT ptrTransmitterInfo;
4261 USHORT ptrEncoderInfo;
4262 ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
4263 ASIC_ENCODER_INFO asEncoderInfo[1];
4264} ATOM_DISP_OUT_INFO;
4265
4266/* DispDevicePriorityInfo */
4267typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO {
4268 ATOM_COMMON_TABLE_HEADER sHeader;
4269 USHORT asDevicePriority[16];
4270} ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
4271
4272/* ProcessAuxChannelTransactionTable */
4273typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS {
4274 USHORT lpAuxRequest;
4275 USHORT lpDataOut;
4276 UCHAR ucChannelID;
4277 union {
4278 UCHAR ucReplyStatus;
4279 UCHAR ucDelay;
4280 };
4281 UCHAR ucDataOutLen;
4282 UCHAR ucReserved;
4283} PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
4284
4285#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
4286
4287/* GetSinkType */
4288
4289typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
4290 USHORT ucLinkClock;
4291 union {
4292 UCHAR ucConfig; /* for DP training command */
4293 UCHAR ucI2cId; /* use for GET_SINK_TYPE command */
4294 };
4295 UCHAR ucAction;
4296 UCHAR ucStatus;
4297 UCHAR ucLaneNum;
4298 UCHAR ucReserved[2];
4299} DP_ENCODER_SERVICE_PARAMETERS;
4300
4301/* ucAction */
4302#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01
4303#define ATOM_DP_ACTION_TRAINING_START 0x02
4304#define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03
4305#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04
4306#define ATOM_DP_ACTION_SET_VSWING_PREEMP 0x05
4307#define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06
4308#define ATOM_DP_ACTION_BLANKING 0x07
4309
4310/* ucConfig */
4311#define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03
4312#define ATOM_DP_CONFIG_DIG1_ENCODER 0x00
4313#define ATOM_DP_CONFIG_DIG2_ENCODER 0x01
4314#define ATOM_DP_CONFIG_EXTERNAL_ENCODER 0x02
4315#define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04
4316#define ATOM_DP_CONFIG_LINK_A 0x00
4317#define ATOM_DP_CONFIG_LINK_B 0x04
4318
4319#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
4320
4321/* DP_TRAINING_TABLE */
4322#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
4323#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 )
4324#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16)
4325#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24)
4326#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32)
4327#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40)
4328#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48)
4329#define DP_I2C_AUX_DDC_WRITE_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 60)
4330#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64)
4331#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72)
4332#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76)
4333#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80)
4334
4335typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS {
4336 UCHAR ucI2CSpeed;
4337 union {
4338 UCHAR ucRegIndex;
4339 UCHAR ucStatus;
4340 };
4341 USHORT lpI2CDataOut;
4342 UCHAR ucFlag;
4343 UCHAR ucTransBytes;
4344 UCHAR ucSlaveAddr;
4345 UCHAR ucLineNumber;
4346} PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
4347
4348#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
4349
4350/* ucFlag */
4351#define HW_I2C_WRITE 1
4352#define HW_I2C_READ 0
4353
4354/****************************************************************************/
4355/* Portion VI: Definitinos being oboselete */
4356/****************************************************************************/
4357
4358/* ========================================================================================== */
4359/* Remove the definitions below when driver is ready! */
4360typedef struct _ATOM_DAC_INFO {
4361 ATOM_COMMON_TABLE_HEADER sHeader;
4362 USHORT usMaxFrequency; /* in 10kHz unit */
4363 USHORT usReserved;
4364} ATOM_DAC_INFO;
4365
4366typedef struct _COMPASSIONATE_DATA {
4367 ATOM_COMMON_TABLE_HEADER sHeader;
4368
4369 /* ============================== DAC1 portion */
4370 UCHAR ucDAC1_BG_Adjustment;
4371 UCHAR ucDAC1_DAC_Adjustment;
4372 USHORT usDAC1_FORCE_Data;
4373 /* ============================== DAC2 portion */
4374 UCHAR ucDAC2_CRT2_BG_Adjustment;
4375 UCHAR ucDAC2_CRT2_DAC_Adjustment;
4376 USHORT usDAC2_CRT2_FORCE_Data;
4377 USHORT usDAC2_CRT2_MUX_RegisterIndex;
4378 UCHAR ucDAC2_CRT2_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
4379 UCHAR ucDAC2_NTSC_BG_Adjustment;
4380 UCHAR ucDAC2_NTSC_DAC_Adjustment;
4381 USHORT usDAC2_TV1_FORCE_Data;
4382 USHORT usDAC2_TV1_MUX_RegisterIndex;
4383 UCHAR ucDAC2_TV1_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
4384 UCHAR ucDAC2_CV_BG_Adjustment;
4385 UCHAR ucDAC2_CV_DAC_Adjustment;
4386 USHORT usDAC2_CV_FORCE_Data;
4387 USHORT usDAC2_CV_MUX_RegisterIndex;
4388 UCHAR ucDAC2_CV_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
4389 UCHAR ucDAC2_PAL_BG_Adjustment;
4390 UCHAR ucDAC2_PAL_DAC_Adjustment;
4391 USHORT usDAC2_TV2_FORCE_Data;
4392} COMPASSIONATE_DATA;
4393
4394/****************************Supported Device Info Table Definitions**********************/
4395/* ucConnectInfo: */
4396/* [7:4] - connector type */
4397/* = 1 - VGA connector */
4398/* = 2 - DVI-I */
4399/* = 3 - DVI-D */
4400/* = 4 - DVI-A */
4401/* = 5 - SVIDEO */
4402/* = 6 - COMPOSITE */
4403/* = 7 - LVDS */
4404/* = 8 - DIGITAL LINK */
4405/* = 9 - SCART */
4406/* = 0xA - HDMI_type A */
4407/* = 0xB - HDMI_type B */
4408/* = 0xE - Special case1 (DVI+DIN) */
4409/* Others=TBD */
4410/* [3:0] - DAC Associated */
4411/* = 0 - no DAC */
4412/* = 1 - DACA */
4413/* = 2 - DACB */
4414/* = 3 - External DAC */
4415/* Others=TBD */
4416/* */
4417
4418typedef struct _ATOM_CONNECTOR_INFO {
4419#if ATOM_BIG_ENDIAN
4420 UCHAR bfConnectorType:4;
4421 UCHAR bfAssociatedDAC:4;
4422#else
4423 UCHAR bfAssociatedDAC:4;
4424 UCHAR bfConnectorType:4;
4425#endif
4426} ATOM_CONNECTOR_INFO;
4427
4428typedef union _ATOM_CONNECTOR_INFO_ACCESS {
4429 ATOM_CONNECTOR_INFO sbfAccess;
4430 UCHAR ucAccess;
4431} ATOM_CONNECTOR_INFO_ACCESS;
4432
4433typedef struct _ATOM_CONNECTOR_INFO_I2C {
4434 ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
4435 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
4436} ATOM_CONNECTOR_INFO_I2C;
4437
4438typedef struct _ATOM_SUPPORTED_DEVICES_INFO {
4439 ATOM_COMMON_TABLE_HEADER sHeader;
4440 USHORT usDeviceSupport;
4441 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
4442} ATOM_SUPPORTED_DEVICES_INFO;
4443
4444#define NO_INT_SRC_MAPPED 0xFF
4445
4446typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP {
4447 UCHAR ucIntSrcBitmap;
4448} ATOM_CONNECTOR_INC_SRC_BITMAP;
4449
4450typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 {
4451 ATOM_COMMON_TABLE_HEADER sHeader;
4452 USHORT usDeviceSupport;
4453 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
4454 ATOM_CONNECTOR_INC_SRC_BITMAP
4455 asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
4456} ATOM_SUPPORTED_DEVICES_INFO_2;
4457
4458typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 {
4459 ATOM_COMMON_TABLE_HEADER sHeader;
4460 USHORT usDeviceSupport;
4461 ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
4462 ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
4463} ATOM_SUPPORTED_DEVICES_INFO_2d1;
4464
4465#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1
4466
4467typedef struct _ATOM_MISC_CONTROL_INFO {
4468 USHORT usFrequency;
4469 UCHAR ucPLL_ChargePump; /* PLL charge-pump gain control */
4470 UCHAR ucPLL_DutyCycle; /* PLL duty cycle control */
4471 UCHAR ucPLL_VCO_Gain; /* PLL VCO gain control */
4472 UCHAR ucPLL_VoltageSwing; /* PLL driver voltage swing control */
4473} ATOM_MISC_CONTROL_INFO;
4474
4475#define ATOM_MAX_MISC_INFO 4
4476
4477typedef struct _ATOM_TMDS_INFO {
4478 ATOM_COMMON_TABLE_HEADER sHeader;
4479 USHORT usMaxFrequency; /* in 10Khz */
4480 ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO];
4481} ATOM_TMDS_INFO;
4482
4483typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE {
4484 UCHAR ucTVStandard; /* Same as TV standards defined above, */
4485 UCHAR ucPadding[1];
4486} ATOM_ENCODER_ANALOG_ATTRIBUTE;
4487
4488typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE {
4489 UCHAR ucAttribute; /* Same as other digital encoder attributes defined above */
4490 UCHAR ucPadding[1];
4491} ATOM_ENCODER_DIGITAL_ATTRIBUTE;
4492
4493typedef union _ATOM_ENCODER_ATTRIBUTE {
4494 ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
4495 ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
4496} ATOM_ENCODER_ATTRIBUTE;
4497
4498typedef struct _DVO_ENCODER_CONTROL_PARAMETERS {
4499 USHORT usPixelClock;
4500 USHORT usEncoderID;
4501 UCHAR ucDeviceType; /* Use ATOM_DEVICE_xxx1_Index to indicate device type only. */
4502 UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */
4503 ATOM_ENCODER_ATTRIBUTE usDevAttr;
4504} DVO_ENCODER_CONTROL_PARAMETERS;
4505
4506typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
4507 DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
4508 WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
4509} DVO_ENCODER_CONTROL_PS_ALLOCATION;
4510
4511#define ATOM_XTMDS_ASIC_SI164_ID 1
4512#define ATOM_XTMDS_ASIC_SI178_ID 2
4513#define ATOM_XTMDS_ASIC_TFP513_ID 3
4514#define ATOM_XTMDS_SUPPORTED_SINGLELINK 0x00000001
4515#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002
4516#define ATOM_XTMDS_MVPU_FPGA 0x00000004
4517
4518typedef struct _ATOM_XTMDS_INFO {
4519 ATOM_COMMON_TABLE_HEADER sHeader;
4520 USHORT usSingleLinkMaxFrequency;
4521 ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* Point the ID on which I2C is used to control external chip */
4522 UCHAR ucXtransimitterID;
4523 UCHAR ucSupportedLink; /* Bit field, bit0=1, single link supported;bit1=1,dual link supported */
4524 UCHAR ucSequnceAlterID; /* Even with the same external TMDS asic, it's possible that the program seqence alters */
4525 /* due to design. This ID is used to alert driver that the sequence is not "standard"! */
4526 UCHAR ucMasterAddress; /* Address to control Master xTMDS Chip */
4527 UCHAR ucSlaveAddress; /* Address to control Slave xTMDS Chip */
4528} ATOM_XTMDS_INFO;
4529
4530typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
4531 UCHAR ucEnable; /* ATOM_ENABLE=On or ATOM_DISABLE=Off */
4532 UCHAR ucDevice; /* ATOM_DEVICE_DFP1_INDEX.... */
4533 UCHAR ucPadding[2];
4534} DFP_DPMS_STATUS_CHANGE_PARAMETERS;
4535
4536/****************************Legacy Power Play Table Definitions **********************/
4537
4538/* Definitions for ulPowerPlayMiscInfo */
4539#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L
4540#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L
4541#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L
4542
4543#define ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT 0x00000004L
4544#define ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH 0x00000008L
4545
4546#define ATOM_PM_MISCINFO_LOAD_PERFORMANCE_EN 0x00000010L
4547
4548#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L
4549#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L
4550#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L /* When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program */
4551
4552#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L
4553#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L
4554#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L
4555#define ATOM_PM_MISCINFO_LOAD_BALANCE_EN 0x00000800L
4556#define ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE 0x00001000L
4557#define ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE 0x00002000L
4558#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L
4559
4560#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L
4561#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L
4562#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L
4563#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L
4564#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L
4565
4566#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L /* 0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved */
4567#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20
4568
4569#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L
4570#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L
4571#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L
4572#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L /* When set, Dynamic */
4573#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L /* When set, Dynamic */
4574#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L /* When set, This mode is for acceleated 3D mode */
4575
4576#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L /* 1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) */
4577#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28
4578#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L
4579
4580#define ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE 0x00000001L
4581#define ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT 0x00000002L
4582#define ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN 0x00000004L
4583#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L
4584#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L
4585#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L
4586#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L /* If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. */
4587 /* If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback */
4588#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L
4589#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L
4590#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L
4591
4592/* ucTableFormatRevision=1 */
4593/* ucTableContentRevision=1 */
4594typedef struct _ATOM_POWERMODE_INFO {
4595 ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
4596 ULONG ulReserved1; /* must set to 0 */
4597 ULONG ulReserved2; /* must set to 0 */
4598 USHORT usEngineClock;
4599 USHORT usMemoryClock;
4600 UCHAR ucVoltageDropIndex; /* index to GPIO table */
4601 UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
4602 UCHAR ucMinTemperature;
4603 UCHAR ucMaxTemperature;
4604 UCHAR ucNumPciELanes; /* number of PCIE lanes */
4605} ATOM_POWERMODE_INFO;
4606
4607/* ucTableFormatRevision=2 */
4608/* ucTableContentRevision=1 */
4609typedef struct _ATOM_POWERMODE_INFO_V2 {
4610 ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
4611 ULONG ulMiscInfo2;
4612 ULONG ulEngineClock;
4613 ULONG ulMemoryClock;
4614 UCHAR ucVoltageDropIndex; /* index to GPIO table */
4615 UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
4616 UCHAR ucMinTemperature;
4617 UCHAR ucMaxTemperature;
4618 UCHAR ucNumPciELanes; /* number of PCIE lanes */
4619} ATOM_POWERMODE_INFO_V2;
4620
4621/* ucTableFormatRevision=2 */
4622/* ucTableContentRevision=2 */
4623typedef struct _ATOM_POWERMODE_INFO_V3 {
4624 ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
4625 ULONG ulMiscInfo2;
4626 ULONG ulEngineClock;
4627 ULONG ulMemoryClock;
4628 UCHAR ucVoltageDropIndex; /* index to Core (VDDC) votage table */
4629 UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
4630 UCHAR ucMinTemperature;
4631 UCHAR ucMaxTemperature;
4632 UCHAR ucNumPciELanes; /* number of PCIE lanes */
4633 UCHAR ucVDDCI_VoltageDropIndex; /* index to VDDCI votage table */
4634} ATOM_POWERMODE_INFO_V3;
4635
4636#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8
4637
4638#define ATOM_PP_OVERDRIVE_INTBITMAP_AUXWIN 0x01
4639#define ATOM_PP_OVERDRIVE_INTBITMAP_OVERDRIVE 0x02
4640
4641#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM63 0x01
4642#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1032 0x02
4643#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ADM1030 0x03
4644#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04
4645#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05
4646#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06
4647#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 /* Andigilog */
4648
4649typedef struct _ATOM_POWERPLAY_INFO {
4650 ATOM_COMMON_TABLE_HEADER sHeader;
4651 UCHAR ucOverdriveThermalController;
4652 UCHAR ucOverdriveI2cLine;
4653 UCHAR ucOverdriveIntBitmap;
4654 UCHAR ucOverdriveControllerAddress;
4655 UCHAR ucSizeOfPowerModeEntry;
4656 UCHAR ucNumOfPowerModeEntries;
4657 ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
4658} ATOM_POWERPLAY_INFO;
4659
4660typedef struct _ATOM_POWERPLAY_INFO_V2 {
4661 ATOM_COMMON_TABLE_HEADER sHeader;
4662 UCHAR ucOverdriveThermalController;
4663 UCHAR ucOverdriveI2cLine;
4664 UCHAR ucOverdriveIntBitmap;
4665 UCHAR ucOverdriveControllerAddress;
4666 UCHAR ucSizeOfPowerModeEntry;
4667 UCHAR ucNumOfPowerModeEntries;
4668 ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
4669} ATOM_POWERPLAY_INFO_V2;
4670
4671typedef struct _ATOM_POWERPLAY_INFO_V3 {
4672 ATOM_COMMON_TABLE_HEADER sHeader;
4673 UCHAR ucOverdriveThermalController;
4674 UCHAR ucOverdriveI2cLine;
4675 UCHAR ucOverdriveIntBitmap;
4676 UCHAR ucOverdriveControllerAddress;
4677 UCHAR ucSizeOfPowerModeEntry;
4678 UCHAR ucNumOfPowerModeEntries;
4679 ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
4680} ATOM_POWERPLAY_INFO_V3;
4681
4682/**************************************************************************/
4683
4684/* Following definitions are for compatiblity issue in different SW components. */
4685#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
4686#define Object_Info Object_Header
4687#define AdjustARB_SEQ MC_InitParameter
4688#define VRAM_GPIO_DetectionInfo VoltageObjectInfo
4689#define ASIC_VDDCI_Info ASIC_ProfilingInfo
4690#define ASIC_MVDDQ_Info MemoryTrainingInfo
4691#define SS_Info PPLL_SS_Info
4692#define ASIC_MVDDC_Info ASIC_InternalSS_Info
4693#define DispDevicePriorityInfo SaveRestoreInfo
4694#define DispOutInfo TV_VideoMode
4695
4696#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE
4697#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE
4698
4699/* New device naming, remove them when both DAL/VBIOS is ready */
4700#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS
4701#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS
4702
4703#define DFP1X_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS
4704#define DFP1X_OUTPUT_CONTROL_PS_ALLOCATION DFP1X_OUTPUT_CONTROL_PARAMETERS
4705
4706#define DFP1I_OUTPUT_CONTROL_PARAMETERS DFP1_OUTPUT_CONTROL_PARAMETERS
4707#define DFP1I_OUTPUT_CONTROL_PS_ALLOCATION DFP1_OUTPUT_CONTROL_PS_ALLOCATION
4708
4709#define ATOM_DEVICE_DFP1I_SUPPORT ATOM_DEVICE_DFP1_SUPPORT
4710#define ATOM_DEVICE_DFP1X_SUPPORT ATOM_DEVICE_DFP2_SUPPORT
4711
4712#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX
4713#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX
4714
4715#define ATOM_DEVICE_DFP2I_INDEX 0x00000009
4716#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX)
4717
4718#define ATOM_S0_DFP1I ATOM_S0_DFP1
4719#define ATOM_S0_DFP1X ATOM_S0_DFP2
4720
4721#define ATOM_S0_DFP2I 0x00200000L
4722#define ATOM_S0_DFP2Ib2 0x20
4723
4724#define ATOM_S2_DFP1I_DPMS_STATE ATOM_S2_DFP1_DPMS_STATE
4725#define ATOM_S2_DFP1X_DPMS_STATE ATOM_S2_DFP2_DPMS_STATE
4726
4727#define ATOM_S2_DFP2I_DPMS_STATE 0x02000000L
4728#define ATOM_S2_DFP2I_DPMS_STATEb3 0x02
4729
4730#define ATOM_S3_DFP2I_ACTIVEb1 0x02
4731
4732#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE
4733#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE
4734
4735#define ATOM_S3_DFP2I_ACTIVE 0x00000200L
4736
4737#define ATOM_S3_DFP1I_CRTC_ACTIVE ATOM_S3_DFP1_CRTC_ACTIVE
4738#define ATOM_S3_DFP1X_CRTC_ACTIVE ATOM_S3_DFP2_CRTC_ACTIVE
4739#define ATOM_S3_DFP2I_CRTC_ACTIVE 0x02000000L
4740
4741#define ATOM_S3_DFP2I_CRTC_ACTIVEb3 0x02
4742#define ATOM_S5_DOS_REQ_DFP2Ib1 0x02
4743
4744#define ATOM_S5_DOS_REQ_DFP2I 0x0200
4745#define ATOM_S6_ACC_REQ_DFP1I ATOM_S6_ACC_REQ_DFP1
4746#define ATOM_S6_ACC_REQ_DFP1X ATOM_S6_ACC_REQ_DFP2
4747
4748#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02
4749#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L
4750
4751#define TMDS1XEncoderControl DVOEncoderControl
4752#define DFP1XOutputControl DVOOutputControl
4753
4754#define ExternalDFPOutputControl DFP1XOutputControl
4755#define EnableExternalTMDS_Encoder TMDS1XEncoderControl
4756
4757#define DFP1IOutputControl TMDSAOutputControl
4758#define DFP2IOutputControl LVTMAOutputControl
4759
4760#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS
4761#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
4762
4763#define DAC2_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS
4764#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
4765
4766#define ucDac1Standard ucDacStandard
4767#define ucDac2Standard ucDacStandard
4768
4769#define TMDS1EncoderControl TMDSAEncoderControl
4770#define TMDS2EncoderControl LVTMAEncoderControl
4771
4772#define DFP1OutputControl TMDSAOutputControl
4773#define DFP2OutputControl LVTMAOutputControl
4774#define CRT1OutputControl DAC1OutputControl
4775#define CRT2OutputControl DAC2OutputControl
4776
4777/* These two lines will be removed for sure in a few days, will follow up with Michael V. */
4778#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL
4779#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL
4780
4781/*********************************************************************************/
4782
4783#pragma pack() /* BIOS data must use byte aligment */
4784
4785#endif /* _ATOMBIOS_H */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
new file mode 100644
index 000000000000..c0080cc9bf8d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -0,0 +1,695 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h>
28#include <drm/radeon_drm.h>
29#include "radeon_fixed.h"
30#include "radeon.h"
31#include "atom.h"
32#include "atom-bits.h"
33
34static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
35{
36 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
37 struct drm_device *dev = crtc->dev;
38 struct radeon_device *rdev = dev->dev_private;
39 int index =
40 GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
41 ENABLE_CRTC_PS_ALLOCATION args;
42
43 memset(&args, 0, sizeof(args));
44
45 args.ucCRTC = radeon_crtc->crtc_id;
46 args.ucEnable = lock;
47
48 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
49}
50
51static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
52{
53 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
54 struct drm_device *dev = crtc->dev;
55 struct radeon_device *rdev = dev->dev_private;
56 int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
57 ENABLE_CRTC_PS_ALLOCATION args;
58
59 memset(&args, 0, sizeof(args));
60
61 args.ucCRTC = radeon_crtc->crtc_id;
62 args.ucEnable = state;
63
64 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
65}
66
67static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
68{
69 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
70 struct drm_device *dev = crtc->dev;
71 struct radeon_device *rdev = dev->dev_private;
72 int index = GetIndexIntoMasterTable(COMMAND, EnableCRTCMemReq);
73 ENABLE_CRTC_PS_ALLOCATION args;
74
75 memset(&args, 0, sizeof(args));
76
77 args.ucCRTC = radeon_crtc->crtc_id;
78 args.ucEnable = state;
79
80 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
81}
82
83static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
84{
85 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
86 struct drm_device *dev = crtc->dev;
87 struct radeon_device *rdev = dev->dev_private;
88 int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
89 BLANK_CRTC_PS_ALLOCATION args;
90
91 memset(&args, 0, sizeof(args));
92
93 args.ucCRTC = radeon_crtc->crtc_id;
94 args.ucBlanking = state;
95
96 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
97}
98
99void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
100{
101 struct drm_device *dev = crtc->dev;
102 struct radeon_device *rdev = dev->dev_private;
103
104 switch (mode) {
105 case DRM_MODE_DPMS_ON:
106 if (ASIC_IS_DCE3(rdev))
107 atombios_enable_crtc_memreq(crtc, 1);
108 atombios_enable_crtc(crtc, 1);
109 atombios_blank_crtc(crtc, 0);
110 break;
111 case DRM_MODE_DPMS_STANDBY:
112 case DRM_MODE_DPMS_SUSPEND:
113 case DRM_MODE_DPMS_OFF:
114 atombios_blank_crtc(crtc, 1);
115 atombios_enable_crtc(crtc, 0);
116 if (ASIC_IS_DCE3(rdev))
117 atombios_enable_crtc_memreq(crtc, 0);
118 break;
119 }
120
121 if (mode != DRM_MODE_DPMS_OFF) {
122 radeon_crtc_load_lut(crtc);
123 }
124}
125
126static void
127atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
128 SET_CRTC_USING_DTD_TIMING_PARAMETERS * crtc_param)
129{
130 struct drm_device *dev = crtc->dev;
131 struct radeon_device *rdev = dev->dev_private;
132 SET_CRTC_USING_DTD_TIMING_PARAMETERS conv_param;
133 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
134
135 conv_param.usH_Size = cpu_to_le16(crtc_param->usH_Size);
136 conv_param.usH_Blanking_Time =
137 cpu_to_le16(crtc_param->usH_Blanking_Time);
138 conv_param.usV_Size = cpu_to_le16(crtc_param->usV_Size);
139 conv_param.usV_Blanking_Time =
140 cpu_to_le16(crtc_param->usV_Blanking_Time);
141 conv_param.usH_SyncOffset = cpu_to_le16(crtc_param->usH_SyncOffset);
142 conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
143 conv_param.usV_SyncOffset = cpu_to_le16(crtc_param->usV_SyncOffset);
144 conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
145 conv_param.susModeMiscInfo.usAccess =
146 cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
147 conv_param.ucCRTC = crtc_param->ucCRTC;
148
149 printk("executing set crtc dtd timing\n");
150 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
151}
152
153void atombios_crtc_set_timing(struct drm_crtc *crtc,
154 SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *
155 crtc_param)
156{
157 struct drm_device *dev = crtc->dev;
158 struct radeon_device *rdev = dev->dev_private;
159 SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION conv_param;
160 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_Timing);
161
162 conv_param.usH_Total = cpu_to_le16(crtc_param->usH_Total);
163 conv_param.usH_Disp = cpu_to_le16(crtc_param->usH_Disp);
164 conv_param.usH_SyncStart = cpu_to_le16(crtc_param->usH_SyncStart);
165 conv_param.usH_SyncWidth = cpu_to_le16(crtc_param->usH_SyncWidth);
166 conv_param.usV_Total = cpu_to_le16(crtc_param->usV_Total);
167 conv_param.usV_Disp = cpu_to_le16(crtc_param->usV_Disp);
168 conv_param.usV_SyncStart = cpu_to_le16(crtc_param->usV_SyncStart);
169 conv_param.usV_SyncWidth = cpu_to_le16(crtc_param->usV_SyncWidth);
170 conv_param.susModeMiscInfo.usAccess =
171 cpu_to_le16(crtc_param->susModeMiscInfo.usAccess);
172 conv_param.ucCRTC = crtc_param->ucCRTC;
173 conv_param.ucOverscanRight = crtc_param->ucOverscanRight;
174 conv_param.ucOverscanLeft = crtc_param->ucOverscanLeft;
175 conv_param.ucOverscanBottom = crtc_param->ucOverscanBottom;
176 conv_param.ucOverscanTop = crtc_param->ucOverscanTop;
177 conv_param.ucReserved = crtc_param->ucReserved;
178
179 printk("executing set crtc timing\n");
180 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&conv_param);
181}
182
183void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
184{
185 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
186 struct drm_device *dev = crtc->dev;
187 struct radeon_device *rdev = dev->dev_private;
188 struct drm_encoder *encoder = NULL;
189 struct radeon_encoder *radeon_encoder = NULL;
190 uint8_t frev, crev;
191 int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
192 SET_PIXEL_CLOCK_PS_ALLOCATION args;
193 PIXEL_CLOCK_PARAMETERS *spc1_ptr;
194 PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
195 PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
196 uint32_t sclock = mode->clock;
197 uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
198 struct radeon_pll *pll;
199 int pll_flags = 0;
200
201 memset(&args, 0, sizeof(args));
202
203 if (ASIC_IS_AVIVO(rdev)) {
204 uint32_t ss_cntl;
205
206 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
207 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
208 else
209 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
210
211 /* disable spread spectrum clocking for now -- thanks Hedy Lamarr */
212 if (radeon_crtc->crtc_id == 0) {
213 ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
214 WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl & ~1);
215 } else {
216 ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
217 WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl & ~1);
218 }
219 } else {
220 pll_flags |= RADEON_PLL_LEGACY;
221
222 if (mode->clock > 200000) /* range limits??? */
223 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
224 else
225 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
226
227 }
228
229 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
230 if (encoder->crtc == crtc) {
231 if (!ASIC_IS_AVIVO(rdev)) {
232 if (encoder->encoder_type !=
233 DRM_MODE_ENCODER_DAC)
234 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
235 if (!ASIC_IS_AVIVO(rdev)
236 && (encoder->encoder_type ==
237 DRM_MODE_ENCODER_LVDS))
238 pll_flags |= RADEON_PLL_USE_REF_DIV;
239 }
240 radeon_encoder = to_radeon_encoder(encoder);
241 }
242 }
243
244 if (radeon_crtc->crtc_id == 0)
245 pll = &rdev->clock.p1pll;
246 else
247 pll = &rdev->clock.p2pll;
248
249 radeon_compute_pll(pll, mode->clock, &sclock, &fb_div, &frac_fb_div,
250 &ref_div, &post_div, pll_flags);
251
252 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
253 &crev);
254
255 switch (frev) {
256 case 1:
257 switch (crev) {
258 case 1:
259 spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput;
260 spc1_ptr->usPixelClock = cpu_to_le16(sclock);
261 spc1_ptr->usRefDiv = cpu_to_le16(ref_div);
262 spc1_ptr->usFbDiv = cpu_to_le16(fb_div);
263 spc1_ptr->ucFracFbDiv = frac_fb_div;
264 spc1_ptr->ucPostDiv = post_div;
265 spc1_ptr->ucPpll =
266 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
267 spc1_ptr->ucCRTC = radeon_crtc->crtc_id;
268 spc1_ptr->ucRefDivSrc = 1;
269 break;
270 case 2:
271 spc2_ptr =
272 (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput;
273 spc2_ptr->usPixelClock = cpu_to_le16(sclock);
274 spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
275 spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
276 spc2_ptr->ucFracFbDiv = frac_fb_div;
277 spc2_ptr->ucPostDiv = post_div;
278 spc2_ptr->ucPpll =
279 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
280 spc2_ptr->ucCRTC = radeon_crtc->crtc_id;
281 spc2_ptr->ucRefDivSrc = 1;
282 break;
283 case 3:
284 if (!encoder)
285 return;
286 spc3_ptr =
287 (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput;
288 spc3_ptr->usPixelClock = cpu_to_le16(sclock);
289 spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
290 spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
291 spc3_ptr->ucFracFbDiv = frac_fb_div;
292 spc3_ptr->ucPostDiv = post_div;
293 spc3_ptr->ucPpll =
294 radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
295 spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2);
296 spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id;
297 spc3_ptr->ucEncoderMode =
298 atombios_get_encoder_mode(encoder);
299 break;
300 default:
301 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
302 return;
303 }
304 break;
305 default:
306 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
307 return;
308 }
309
310 printk("executing set pll\n");
311 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
312}
313
314int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
315 struct drm_framebuffer *old_fb)
316{
317 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
318 struct drm_device *dev = crtc->dev;
319 struct radeon_device *rdev = dev->dev_private;
320 struct radeon_framebuffer *radeon_fb;
321 struct drm_gem_object *obj;
322 struct drm_radeon_gem_object *obj_priv;
323 uint64_t fb_location;
324 uint32_t fb_format, fb_pitch_pixels;
325
326 if (!crtc->fb)
327 return -EINVAL;
328
329 radeon_fb = to_radeon_framebuffer(crtc->fb);
330
331 obj = radeon_fb->obj;
332 obj_priv = obj->driver_private;
333
334 if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) {
335 return -EINVAL;
336 }
337
338 switch (crtc->fb->bits_per_pixel) {
339 case 15:
340 fb_format =
341 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
342 AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
343 break;
344 case 16:
345 fb_format =
346 AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
347 AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
348 break;
349 case 24:
350 case 32:
351 fb_format =
352 AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
353 AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
354 break;
355 default:
356 DRM_ERROR("Unsupported screen depth %d\n",
357 crtc->fb->bits_per_pixel);
358 return -EINVAL;
359 }
360
361 /* TODO tiling */
362 if (radeon_crtc->crtc_id == 0)
363 WREG32(AVIVO_D1VGA_CONTROL, 0);
364 else
365 WREG32(AVIVO_D2VGA_CONTROL, 0);
366 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
367 (u32) fb_location);
368 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
369 radeon_crtc->crtc_offset, (u32) fb_location);
370 WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
371
372 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
373 WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
374 WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
375 WREG32(AVIVO_D1GRPH_Y_START + radeon_crtc->crtc_offset, 0);
376 WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
377 WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
378
379 fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
380 WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
381 WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
382
383 WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
384 crtc->mode.vdisplay);
385 x &= ~3;
386 y &= ~1;
387 WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
388 (x << 16) | y);
389 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
390 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
391
392 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
393 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
394 AVIVO_D1MODE_INTERLEAVE_EN);
395 else
396 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
397
398 if (old_fb && old_fb != crtc->fb) {
399 radeon_fb = to_radeon_framebuffer(old_fb);
400 radeon_gem_object_unpin(radeon_fb->obj);
401 }
402 return 0;
403}
404
405int atombios_crtc_mode_set(struct drm_crtc *crtc,
406 struct drm_display_mode *mode,
407 struct drm_display_mode *adjusted_mode,
408 int x, int y, struct drm_framebuffer *old_fb)
409{
410 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
411 struct drm_device *dev = crtc->dev;
412 struct radeon_device *rdev = dev->dev_private;
413 struct drm_encoder *encoder;
414 SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION crtc_timing;
415
416 /* TODO color tiling */
417 memset(&crtc_timing, 0, sizeof(crtc_timing));
418
419 /* TODO tv */
420 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
421
422 }
423
424 crtc_timing.ucCRTC = radeon_crtc->crtc_id;
425 crtc_timing.usH_Total = adjusted_mode->crtc_htotal;
426 crtc_timing.usH_Disp = adjusted_mode->crtc_hdisplay;
427 crtc_timing.usH_SyncStart = adjusted_mode->crtc_hsync_start;
428 crtc_timing.usH_SyncWidth =
429 adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
430
431 crtc_timing.usV_Total = adjusted_mode->crtc_vtotal;
432 crtc_timing.usV_Disp = adjusted_mode->crtc_vdisplay;
433 crtc_timing.usV_SyncStart = adjusted_mode->crtc_vsync_start;
434 crtc_timing.usV_SyncWidth =
435 adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
436
437 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
438 crtc_timing.susModeMiscInfo.usAccess |= ATOM_VSYNC_POLARITY;
439
440 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
441 crtc_timing.susModeMiscInfo.usAccess |= ATOM_HSYNC_POLARITY;
442
443 if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
444 crtc_timing.susModeMiscInfo.usAccess |= ATOM_COMPOSITESYNC;
445
446 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
447 crtc_timing.susModeMiscInfo.usAccess |= ATOM_INTERLACE;
448
449 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
450 crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE;
451
452 atombios_crtc_set_pll(crtc, adjusted_mode);
453 atombios_crtc_set_timing(crtc, &crtc_timing);
454
455 if (ASIC_IS_AVIVO(rdev))
456 atombios_crtc_set_base(crtc, x, y, old_fb);
457 else {
458 if (radeon_crtc->crtc_id == 0) {
459 SET_CRTC_USING_DTD_TIMING_PARAMETERS crtc_dtd_timing;
460 memset(&crtc_dtd_timing, 0, sizeof(crtc_dtd_timing));
461
462 /* setup FP shadow regs on R4xx */
463 crtc_dtd_timing.ucCRTC = radeon_crtc->crtc_id;
464 crtc_dtd_timing.usH_Size = adjusted_mode->crtc_hdisplay;
465 crtc_dtd_timing.usV_Size = adjusted_mode->crtc_vdisplay;
466 crtc_dtd_timing.usH_Blanking_Time =
467 adjusted_mode->crtc_hblank_end -
468 adjusted_mode->crtc_hdisplay;
469 crtc_dtd_timing.usV_Blanking_Time =
470 adjusted_mode->crtc_vblank_end -
471 adjusted_mode->crtc_vdisplay;
472 crtc_dtd_timing.usH_SyncOffset =
473 adjusted_mode->crtc_hsync_start -
474 adjusted_mode->crtc_hdisplay;
475 crtc_dtd_timing.usV_SyncOffset =
476 adjusted_mode->crtc_vsync_start -
477 adjusted_mode->crtc_vdisplay;
478 crtc_dtd_timing.usH_SyncWidth =
479 adjusted_mode->crtc_hsync_end -
480 adjusted_mode->crtc_hsync_start;
481 crtc_dtd_timing.usV_SyncWidth =
482 adjusted_mode->crtc_vsync_end -
483 adjusted_mode->crtc_vsync_start;
484 /* crtc_dtd_timing.ucH_Border = adjusted_mode->crtc_hborder; */
485 /* crtc_dtd_timing.ucV_Border = adjusted_mode->crtc_vborder; */
486
487 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
488 crtc_dtd_timing.susModeMiscInfo.usAccess |=
489 ATOM_VSYNC_POLARITY;
490
491 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
492 crtc_dtd_timing.susModeMiscInfo.usAccess |=
493 ATOM_HSYNC_POLARITY;
494
495 if (adjusted_mode->flags & DRM_MODE_FLAG_CSYNC)
496 crtc_dtd_timing.susModeMiscInfo.usAccess |=
497 ATOM_COMPOSITESYNC;
498
499 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
500 crtc_dtd_timing.susModeMiscInfo.usAccess |=
501 ATOM_INTERLACE;
502
503 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
504 crtc_dtd_timing.susModeMiscInfo.usAccess |=
505 ATOM_DOUBLE_CLOCK_MODE;
506
507 atombios_set_crtc_dtd_timing(crtc, &crtc_dtd_timing);
508 }
509 radeon_crtc_set_base(crtc, x, y, old_fb);
510 radeon_legacy_atom_set_surface(crtc);
511 }
512 return 0;
513}
514
515static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
516 struct drm_display_mode *mode,
517 struct drm_display_mode *adjusted_mode)
518{
519 return true;
520}
521
522static void atombios_crtc_prepare(struct drm_crtc *crtc)
523{
524 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
525 atombios_lock_crtc(crtc, 1);
526}
527
528static void atombios_crtc_commit(struct drm_crtc *crtc)
529{
530 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
531 atombios_lock_crtc(crtc, 0);
532}
533
534static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
535 .dpms = atombios_crtc_dpms,
536 .mode_fixup = atombios_crtc_mode_fixup,
537 .mode_set = atombios_crtc_mode_set,
538 .mode_set_base = atombios_crtc_set_base,
539 .prepare = atombios_crtc_prepare,
540 .commit = atombios_crtc_commit,
541};
542
543void radeon_atombios_init_crtc(struct drm_device *dev,
544 struct radeon_crtc *radeon_crtc)
545{
546 if (radeon_crtc->crtc_id == 1)
547 radeon_crtc->crtc_offset =
548 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
549 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
550}
551
552void radeon_init_disp_bw_avivo(struct drm_device *dev,
553 struct drm_display_mode *mode1,
554 uint32_t pixel_bytes1,
555 struct drm_display_mode *mode2,
556 uint32_t pixel_bytes2)
557{
558 struct radeon_device *rdev = dev->dev_private;
559 fixed20_12 min_mem_eff;
560 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff;
561 fixed20_12 sclk_ff, mclk_ff;
562 uint32_t dc_lb_memory_split, temp;
563
564 min_mem_eff.full = rfixed_const_8(0);
565 if (rdev->disp_priority == 2) {
566 uint32_t mc_init_misc_lat_timer = 0;
567 if (rdev->family == CHIP_RV515)
568 mc_init_misc_lat_timer =
569 RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER);
570 else if (rdev->family == CHIP_RS690)
571 mc_init_misc_lat_timer =
572 RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER);
573
574 mc_init_misc_lat_timer &=
575 ~(R300_MC_DISP1R_INIT_LAT_MASK <<
576 R300_MC_DISP1R_INIT_LAT_SHIFT);
577 mc_init_misc_lat_timer &=
578 ~(R300_MC_DISP0R_INIT_LAT_MASK <<
579 R300_MC_DISP0R_INIT_LAT_SHIFT);
580
581 if (mode2)
582 mc_init_misc_lat_timer |=
583 (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
584 if (mode1)
585 mc_init_misc_lat_timer |=
586 (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
587
588 if (rdev->family == CHIP_RV515)
589 WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER,
590 mc_init_misc_lat_timer);
591 else if (rdev->family == CHIP_RS690)
592 WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER,
593 mc_init_misc_lat_timer);
594 }
595
596 /*
597 * determine is there is enough bw for current mode
598 */
599 temp_ff.full = rfixed_const(100);
600 mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
601 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
602 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
603 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
604
605 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
606 temp_ff.full = rfixed_const(temp);
607 mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
608 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
609
610 pix_clk.full = 0;
611 pix_clk2.full = 0;
612 peak_disp_bw.full = 0;
613 if (mode1) {
614 temp_ff.full = rfixed_const(1000);
615 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
616 pix_clk.full = rfixed_div(pix_clk, temp_ff);
617 temp_ff.full = rfixed_const(pixel_bytes1);
618 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
619 }
620 if (mode2) {
621 temp_ff.full = rfixed_const(1000);
622 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
623 pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
624 temp_ff.full = rfixed_const(pixel_bytes2);
625 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
626 }
627
628 if (peak_disp_bw.full >= mem_bw.full) {
629 DRM_ERROR
630 ("You may not have enough display bandwidth for current mode\n"
631 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
632 printk("peak disp bw %d, mem_bw %d\n",
633 rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw));
634 }
635
636 /*
637 * Line Buffer Setup
638 * There is a single line buffer shared by both display controllers.
639 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display
640 * controllers. The paritioning can either be done manually or via one of four
641 * preset allocations specified in bits 1:0:
642 * 0 - line buffer is divided in half and shared between each display controller
643 * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
644 * 2 - D1 gets the whole buffer
645 * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
646 * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode.
647 * In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits
648 * 14:4; D2 allocation follows D1.
649 */
650
651 /* is auto or manual better ? */
652 dc_lb_memory_split =
653 RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK;
654 dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
655#if 1
656 /* auto */
657 if (mode1 && mode2) {
658 if (mode1->hdisplay > mode2->hdisplay) {
659 if (mode1->hdisplay > 2560)
660 dc_lb_memory_split |=
661 AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
662 else
663 dc_lb_memory_split |=
664 AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
665 } else if (mode2->hdisplay > mode1->hdisplay) {
666 if (mode2->hdisplay > 2560)
667 dc_lb_memory_split |=
668 AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
669 else
670 dc_lb_memory_split |=
671 AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
672 } else
673 dc_lb_memory_split |=
674 AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
675 } else if (mode1) {
676 dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY;
677 } else if (mode2) {
678 dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
679 }
680#else
681 /* manual */
682 dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE;
683 dc_lb_memory_split &=
684 ~(AVIVO_DC_LB_DISP1_END_ADR_MASK <<
685 AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
686 if (mode1) {
687 dc_lb_memory_split |=
688 ((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK)
689 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
690 } else if (mode2) {
691 dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT);
692 }
693#endif
694 WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split);
695}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
new file mode 100644
index 000000000000..5225f5be7ea7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -0,0 +1,1524 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "drm.h"
31#include "radeon_drm.h"
32#include "radeon_microcode.h"
33#include "radeon_reg.h"
34#include "radeon.h"
35
36/* This files gather functions specifics to:
37 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
38 *
39 * Some of these functions might be used by newer ASICs.
40 */
41void r100_hdp_reset(struct radeon_device *rdev);
42void r100_gpu_init(struct radeon_device *rdev);
43int r100_gui_wait_for_idle(struct radeon_device *rdev);
44int r100_mc_wait_for_idle(struct radeon_device *rdev);
45void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
46void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
47int r100_debugfs_mc_info_init(struct radeon_device *rdev);
48
49
50/*
51 * PCI GART
52 */
53void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
54{
55 /* TODO: can we do somethings here ? */
56 /* It seems hw only cache one entry so we should discard this
57 * entry otherwise if first GPU GART read hit this entry it
58 * could end up in wrong address. */
59}
60
61int r100_pci_gart_enable(struct radeon_device *rdev)
62{
63 uint32_t tmp;
64 int r;
65
66 /* Initialize common gart structure */
67 r = radeon_gart_init(rdev);
68 if (r) {
69 return r;
70 }
71 if (rdev->gart.table.ram.ptr == NULL) {
72 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
73 r = radeon_gart_table_ram_alloc(rdev);
74 if (r) {
75 return r;
76 }
77 }
78 /* discard memory request outside of configured range */
79 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
80 WREG32(RADEON_AIC_CNTL, tmp);
81 /* set address range for PCI address translate */
82 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
83 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
84 WREG32(RADEON_AIC_HI_ADDR, tmp);
85 /* Enable bus mastering */
86 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
87 WREG32(RADEON_BUS_CNTL, tmp);
88 /* set PCI GART page-table base address */
89 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
90 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
91 WREG32(RADEON_AIC_CNTL, tmp);
92 r100_pci_gart_tlb_flush(rdev);
93 rdev->gart.ready = true;
94 return 0;
95}
96
97void r100_pci_gart_disable(struct radeon_device *rdev)
98{
99 uint32_t tmp;
100
101 /* discard memory request outside of configured range */
102 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
103 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
104 WREG32(RADEON_AIC_LO_ADDR, 0);
105 WREG32(RADEON_AIC_HI_ADDR, 0);
106}
107
108int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
109{
110 if (i < 0 || i > rdev->gart.num_gpu_pages) {
111 return -EINVAL;
112 }
113 rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr);
114 return 0;
115}
116
117int r100_gart_enable(struct radeon_device *rdev)
118{
119 if (rdev->flags & RADEON_IS_AGP) {
120 r100_pci_gart_disable(rdev);
121 return 0;
122 }
123 return r100_pci_gart_enable(rdev);
124}
125
126
127/*
128 * MC
129 */
130void r100_mc_disable_clients(struct radeon_device *rdev)
131{
132 uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
133
134 /* FIXME: is this function correct for rs100,rs200,rs300 ? */
135 if (r100_gui_wait_for_idle(rdev)) {
136 printk(KERN_WARNING "Failed to wait GUI idle while "
137 "programming pipes. Bad things might happen.\n");
138 }
139
140 /* stop display and memory access */
141 ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
142 WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
143 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
144 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
145 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
146
147 r100_gpu_wait_for_vsync(rdev);
148
149 WREG32(RADEON_CRTC_GEN_CNTL,
150 (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
151 RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
152
153 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
154 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
155
156 r100_gpu_wait_for_vsync2(rdev);
157 WREG32(RADEON_CRTC2_GEN_CNTL,
158 (crtc2_gen_cntl &
159 ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
160 RADEON_CRTC2_DISP_REQ_EN_B);
161 }
162
163 udelay(500);
164}
165
166void r100_mc_setup(struct radeon_device *rdev)
167{
168 uint32_t tmp;
169 int r;
170
171 r = r100_debugfs_mc_info_init(rdev);
172 if (r) {
173 DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
174 }
175 /* Write VRAM size in case we are limiting it */
176 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
177 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
178 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
179 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
180 WREG32(RADEON_MC_FB_LOCATION, tmp);
181
182 /* Enable bus mastering */
183 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
184 WREG32(RADEON_BUS_CNTL, tmp);
185
186 if (rdev->flags & RADEON_IS_AGP) {
187 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
188 tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
189 tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
190 WREG32(RADEON_MC_AGP_LOCATION, tmp);
191 WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
192 } else {
193 WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
194 WREG32(RADEON_AGP_BASE, 0);
195 }
196
197 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
198 tmp |= (7 << 28);
199 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
200 (void)RREG32(RADEON_HOST_PATH_CNTL);
201 WREG32(RADEON_HOST_PATH_CNTL, tmp);
202 (void)RREG32(RADEON_HOST_PATH_CNTL);
203}
204
205int r100_mc_init(struct radeon_device *rdev)
206{
207 int r;
208
209 if (r100_debugfs_rbbm_init(rdev)) {
210 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
211 }
212
213 r100_gpu_init(rdev);
214 /* Disable gart which also disable out of gart access */
215 r100_pci_gart_disable(rdev);
216
217 /* Setup GPU memory space */
218 rdev->mc.vram_location = 0xFFFFFFFFUL;
219 rdev->mc.gtt_location = 0xFFFFFFFFUL;
220 if (rdev->flags & RADEON_IS_AGP) {
221 r = radeon_agp_init(rdev);
222 if (r) {
223 printk(KERN_WARNING "[drm] Disabling AGP\n");
224 rdev->flags &= ~RADEON_IS_AGP;
225 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
226 } else {
227 rdev->mc.gtt_location = rdev->mc.agp_base;
228 }
229 }
230 r = radeon_mc_setup(rdev);
231 if (r) {
232 return r;
233 }
234
235 r100_mc_disable_clients(rdev);
236 if (r100_mc_wait_for_idle(rdev)) {
237 printk(KERN_WARNING "Failed to wait MC idle while "
238 "programming pipes. Bad things might happen.\n");
239 }
240
241 r100_mc_setup(rdev);
242 return 0;
243}
244
245void r100_mc_fini(struct radeon_device *rdev)
246{
247 r100_pci_gart_disable(rdev);
248 radeon_gart_table_ram_free(rdev);
249 radeon_gart_fini(rdev);
250}
251
252
253/*
254 * Fence emission
255 */
256void r100_fence_ring_emit(struct radeon_device *rdev,
257 struct radeon_fence *fence)
258{
259 /* Who ever call radeon_fence_emit should call ring_lock and ask
260 * for enough space (today caller are ib schedule and buffer move) */
261 /* Wait until IDLE & CLEAN */
262 radeon_ring_write(rdev, PACKET0(0x1720, 0));
263 radeon_ring_write(rdev, (1 << 16) | (1 << 17));
264 /* Emit fence sequence & fire IRQ */
265 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
266 radeon_ring_write(rdev, fence->seq);
267 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
268 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
269}
270
271
272/*
273 * Writeback
274 */
275int r100_wb_init(struct radeon_device *rdev)
276{
277 int r;
278
279 if (rdev->wb.wb_obj == NULL) {
280 r = radeon_object_create(rdev, NULL, 4096,
281 true,
282 RADEON_GEM_DOMAIN_GTT,
283 false, &rdev->wb.wb_obj);
284 if (r) {
285 DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
286 return r;
287 }
288 r = radeon_object_pin(rdev->wb.wb_obj,
289 RADEON_GEM_DOMAIN_GTT,
290 &rdev->wb.gpu_addr);
291 if (r) {
292 DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
293 return r;
294 }
295 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
296 if (r) {
297 DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
298 return r;
299 }
300 }
301 WREG32(0x774, rdev->wb.gpu_addr);
302 WREG32(0x70C, rdev->wb.gpu_addr + 1024);
303 WREG32(0x770, 0xff);
304 return 0;
305}
306
307void r100_wb_fini(struct radeon_device *rdev)
308{
309 if (rdev->wb.wb_obj) {
310 radeon_object_kunmap(rdev->wb.wb_obj);
311 radeon_object_unpin(rdev->wb.wb_obj);
312 radeon_object_unref(&rdev->wb.wb_obj);
313 rdev->wb.wb = NULL;
314 rdev->wb.wb_obj = NULL;
315 }
316}
317
318int r100_copy_blit(struct radeon_device *rdev,
319 uint64_t src_offset,
320 uint64_t dst_offset,
321 unsigned num_pages,
322 struct radeon_fence *fence)
323{
324 uint32_t cur_pages;
325 uint32_t stride_bytes = PAGE_SIZE;
326 uint32_t pitch;
327 uint32_t stride_pixels;
328 unsigned ndw;
329 int num_loops;
330 int r = 0;
331
332 /* radeon limited to 16k stride */
333 stride_bytes &= 0x3fff;
334 /* radeon pitch is /64 */
335 pitch = stride_bytes / 64;
336 stride_pixels = stride_bytes / 4;
337 num_loops = DIV_ROUND_UP(num_pages, 8191);
338
339 /* Ask for enough room for blit + flush + fence */
340 ndw = 64 + (10 * num_loops);
341 r = radeon_ring_lock(rdev, ndw);
342 if (r) {
343 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
344 return -EINVAL;
345 }
346 while (num_pages > 0) {
347 cur_pages = num_pages;
348 if (cur_pages > 8191) {
349 cur_pages = 8191;
350 }
351 num_pages -= cur_pages;
352
353 /* pages are in Y direction - height
354 page width in X direction - width */
355 radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
356 radeon_ring_write(rdev,
357 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
358 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
359 RADEON_GMC_SRC_CLIPPING |
360 RADEON_GMC_DST_CLIPPING |
361 RADEON_GMC_BRUSH_NONE |
362 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
363 RADEON_GMC_SRC_DATATYPE_COLOR |
364 RADEON_ROP3_S |
365 RADEON_DP_SRC_SOURCE_MEMORY |
366 RADEON_GMC_CLR_CMP_CNTL_DIS |
367 RADEON_GMC_WR_MSK_DIS);
368 radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
369 radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
370 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
371 radeon_ring_write(rdev, 0);
372 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
373 radeon_ring_write(rdev, num_pages);
374 radeon_ring_write(rdev, num_pages);
375 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
376 }
377 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
378 radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
379 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
380 radeon_ring_write(rdev,
381 RADEON_WAIT_2D_IDLECLEAN |
382 RADEON_WAIT_HOST_IDLECLEAN |
383 RADEON_WAIT_DMA_GUI_IDLE);
384 if (fence) {
385 r = radeon_fence_emit(rdev, fence);
386 }
387 radeon_ring_unlock_commit(rdev);
388 return r;
389}
390
391
392/*
393 * CP
394 */
395void r100_ring_start(struct radeon_device *rdev)
396{
397 int r;
398
399 r = radeon_ring_lock(rdev, 2);
400 if (r) {
401 return;
402 }
403 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
404 radeon_ring_write(rdev,
405 RADEON_ISYNC_ANY2D_IDLE3D |
406 RADEON_ISYNC_ANY3D_IDLE2D |
407 RADEON_ISYNC_WAIT_IDLEGUI |
408 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
409 radeon_ring_unlock_commit(rdev);
410}
411
412static void r100_cp_load_microcode(struct radeon_device *rdev)
413{
414 int i;
415
416 if (r100_gui_wait_for_idle(rdev)) {
417 printk(KERN_WARNING "Failed to wait GUI idle while "
418 "programming pipes. Bad things might happen.\n");
419 }
420
421 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
422 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
423 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
424 (rdev->family == CHIP_RS200)) {
425 DRM_INFO("Loading R100 Microcode\n");
426 for (i = 0; i < 256; i++) {
427 WREG32(RADEON_CP_ME_RAM_DATAH, R100_cp_microcode[i][1]);
428 WREG32(RADEON_CP_ME_RAM_DATAL, R100_cp_microcode[i][0]);
429 }
430 } else if ((rdev->family == CHIP_R200) ||
431 (rdev->family == CHIP_RV250) ||
432 (rdev->family == CHIP_RV280) ||
433 (rdev->family == CHIP_RS300)) {
434 DRM_INFO("Loading R200 Microcode\n");
435 for (i = 0; i < 256; i++) {
436 WREG32(RADEON_CP_ME_RAM_DATAH, R200_cp_microcode[i][1]);
437 WREG32(RADEON_CP_ME_RAM_DATAL, R200_cp_microcode[i][0]);
438 }
439 } else if ((rdev->family == CHIP_R300) ||
440 (rdev->family == CHIP_R350) ||
441 (rdev->family == CHIP_RV350) ||
442 (rdev->family == CHIP_RV380) ||
443 (rdev->family == CHIP_RS400) ||
444 (rdev->family == CHIP_RS480)) {
445 DRM_INFO("Loading R300 Microcode\n");
446 for (i = 0; i < 256; i++) {
447 WREG32(RADEON_CP_ME_RAM_DATAH, R300_cp_microcode[i][1]);
448 WREG32(RADEON_CP_ME_RAM_DATAL, R300_cp_microcode[i][0]);
449 }
450 } else if ((rdev->family == CHIP_R420) ||
451 (rdev->family == CHIP_R423) ||
452 (rdev->family == CHIP_RV410)) {
453 DRM_INFO("Loading R400 Microcode\n");
454 for (i = 0; i < 256; i++) {
455 WREG32(RADEON_CP_ME_RAM_DATAH, R420_cp_microcode[i][1]);
456 WREG32(RADEON_CP_ME_RAM_DATAL, R420_cp_microcode[i][0]);
457 }
458 } else if ((rdev->family == CHIP_RS690) ||
459 (rdev->family == CHIP_RS740)) {
460 DRM_INFO("Loading RS690/RS740 Microcode\n");
461 for (i = 0; i < 256; i++) {
462 WREG32(RADEON_CP_ME_RAM_DATAH, RS690_cp_microcode[i][1]);
463 WREG32(RADEON_CP_ME_RAM_DATAL, RS690_cp_microcode[i][0]);
464 }
465 } else if (rdev->family == CHIP_RS600) {
466 DRM_INFO("Loading RS600 Microcode\n");
467 for (i = 0; i < 256; i++) {
468 WREG32(RADEON_CP_ME_RAM_DATAH, RS600_cp_microcode[i][1]);
469 WREG32(RADEON_CP_ME_RAM_DATAL, RS600_cp_microcode[i][0]);
470 }
471 } else if ((rdev->family == CHIP_RV515) ||
472 (rdev->family == CHIP_R520) ||
473 (rdev->family == CHIP_RV530) ||
474 (rdev->family == CHIP_R580) ||
475 (rdev->family == CHIP_RV560) ||
476 (rdev->family == CHIP_RV570)) {
477 DRM_INFO("Loading R500 Microcode\n");
478 for (i = 0; i < 256; i++) {
479 WREG32(RADEON_CP_ME_RAM_DATAH, R520_cp_microcode[i][1]);
480 WREG32(RADEON_CP_ME_RAM_DATAL, R520_cp_microcode[i][0]);
481 }
482 }
483}
484
485int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
486{
487 unsigned rb_bufsz;
488 unsigned rb_blksz;
489 unsigned max_fetch;
490 unsigned pre_write_timer;
491 unsigned pre_write_limit;
492 unsigned indirect2_start;
493 unsigned indirect1_start;
494 uint32_t tmp;
495 int r;
496
497 if (r100_debugfs_cp_init(rdev)) {
498 DRM_ERROR("Failed to register debugfs file for CP !\n");
499 }
500 /* Reset CP */
501 tmp = RREG32(RADEON_CP_CSQ_STAT);
502 if ((tmp & (1 << 31))) {
503 DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
504 WREG32(RADEON_CP_CSQ_MODE, 0);
505 WREG32(RADEON_CP_CSQ_CNTL, 0);
506 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
507 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
508 mdelay(2);
509 WREG32(RADEON_RBBM_SOFT_RESET, 0);
510 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
511 mdelay(2);
512 tmp = RREG32(RADEON_CP_CSQ_STAT);
513 if ((tmp & (1 << 31))) {
514 DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
515 }
516 } else {
517 DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
518 }
519 /* Align ring size */
520 rb_bufsz = drm_order(ring_size / 8);
521 ring_size = (1 << (rb_bufsz + 1)) * 4;
522 r100_cp_load_microcode(rdev);
523 r = radeon_ring_init(rdev, ring_size);
524 if (r) {
525 return r;
526 }
527 /* Each time the cp read 1024 bytes (16 dword/quadword) update
528 * the rptr copy in system ram */
529 rb_blksz = 9;
530 /* cp will read 128bytes at a time (4 dwords) */
531 max_fetch = 1;
532 rdev->cp.align_mask = 16 - 1;
533 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
534 pre_write_timer = 64;
535 /* Force CP_RB_WPTR write if written more than one time before the
536 * delay expire
537 */
538 pre_write_limit = 0;
539 /* Setup the cp cache like this (cache size is 96 dwords) :
540 * RING 0 to 15
541 * INDIRECT1 16 to 79
542 * INDIRECT2 80 to 95
543 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
544 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
545 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
546 * Idea being that most of the gpu cmd will be through indirect1 buffer
547 * so it gets the bigger cache.
548 */
549 indirect2_start = 80;
550 indirect1_start = 16;
551 /* cp setup */
552 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
553 WREG32(RADEON_CP_RB_CNTL,
554 REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
555 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
556 REG_SET(RADEON_MAX_FETCH, max_fetch) |
557 RADEON_RB_NO_UPDATE);
558 /* Set ring address */
559 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
560 WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
561 /* Force read & write ptr to 0 */
562 tmp = RREG32(RADEON_CP_RB_CNTL);
563 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
564 WREG32(RADEON_CP_RB_RPTR_WR, 0);
565 WREG32(RADEON_CP_RB_WPTR, 0);
566 WREG32(RADEON_CP_RB_CNTL, tmp);
567 udelay(10);
568 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
569 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
570 /* Set cp mode to bus mastering & enable cp*/
571 WREG32(RADEON_CP_CSQ_MODE,
572 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
573 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
574 WREG32(0x718, 0);
575 WREG32(0x744, 0x00004D4D);
576 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
577 radeon_ring_start(rdev);
578 r = radeon_ring_test(rdev);
579 if (r) {
580 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
581 return r;
582 }
583 rdev->cp.ready = true;
584 return 0;
585}
586
587void r100_cp_fini(struct radeon_device *rdev)
588{
589 /* Disable ring */
590 rdev->cp.ready = false;
591 WREG32(RADEON_CP_CSQ_CNTL, 0);
592 radeon_ring_fini(rdev);
593 DRM_INFO("radeon: cp finalized\n");
594}
595
596void r100_cp_disable(struct radeon_device *rdev)
597{
598 /* Disable ring */
599 rdev->cp.ready = false;
600 WREG32(RADEON_CP_CSQ_MODE, 0);
601 WREG32(RADEON_CP_CSQ_CNTL, 0);
602 if (r100_gui_wait_for_idle(rdev)) {
603 printk(KERN_WARNING "Failed to wait GUI idle while "
604 "programming pipes. Bad things might happen.\n");
605 }
606}
607
608int r100_cp_reset(struct radeon_device *rdev)
609{
610 uint32_t tmp;
611 bool reinit_cp;
612 int i;
613
614 reinit_cp = rdev->cp.ready;
615 rdev->cp.ready = false;
616 WREG32(RADEON_CP_CSQ_MODE, 0);
617 WREG32(RADEON_CP_CSQ_CNTL, 0);
618 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
619 (void)RREG32(RADEON_RBBM_SOFT_RESET);
620 udelay(200);
621 WREG32(RADEON_RBBM_SOFT_RESET, 0);
622 /* Wait to prevent race in RBBM_STATUS */
623 mdelay(1);
624 for (i = 0; i < rdev->usec_timeout; i++) {
625 tmp = RREG32(RADEON_RBBM_STATUS);
626 if (!(tmp & (1 << 16))) {
627 DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
628 tmp);
629 if (reinit_cp) {
630 return r100_cp_init(rdev, rdev->cp.ring_size);
631 }
632 return 0;
633 }
634 DRM_UDELAY(1);
635 }
636 tmp = RREG32(RADEON_RBBM_STATUS);
637 DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
638 return -1;
639}
640
641
642/*
643 * CS functions
644 */
645int r100_cs_parse_packet0(struct radeon_cs_parser *p,
646 struct radeon_cs_packet *pkt,
647 unsigned *auth, unsigned n,
648 radeon_packet0_check_t check)
649{
650 unsigned reg;
651 unsigned i, j, m;
652 unsigned idx;
653 int r;
654
655 idx = pkt->idx + 1;
656 reg = pkt->reg;
657 if (pkt->one_reg_wr) {
658 if ((reg >> 7) > n) {
659 return -EINVAL;
660 }
661 } else {
662 if (((reg + (pkt->count << 2)) >> 7) > n) {
663 return -EINVAL;
664 }
665 }
666 for (i = 0; i <= pkt->count; i++, idx++) {
667 j = (reg >> 7);
668 m = 1 << ((reg >> 2) & 31);
669 if (auth[j] & m) {
670 r = check(p, pkt, idx, reg);
671 if (r) {
672 return r;
673 }
674 }
675 if (pkt->one_reg_wr) {
676 if (!(auth[j] & m)) {
677 break;
678 }
679 } else {
680 reg += 4;
681 }
682 }
683 return 0;
684}
685
686int r100_cs_parse_packet3(struct radeon_cs_parser *p,
687 struct radeon_cs_packet *pkt,
688 unsigned *auth, unsigned n,
689 radeon_packet3_check_t check)
690{
691 unsigned i, m;
692
693 if ((pkt->opcode >> 5) > n) {
694 return -EINVAL;
695 }
696 i = pkt->opcode >> 5;
697 m = 1 << (pkt->opcode & 31);
698 if (auth[i] & m) {
699 return check(p, pkt);
700 }
701 return 0;
702}
703
704void r100_cs_dump_packet(struct radeon_cs_parser *p,
705 struct radeon_cs_packet *pkt)
706{
707 struct radeon_cs_chunk *ib_chunk;
708 volatile uint32_t *ib;
709 unsigned i;
710 unsigned idx;
711
712 ib = p->ib->ptr;
713 ib_chunk = &p->chunks[p->chunk_ib_idx];
714 idx = pkt->idx;
715 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
716 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
717 }
718}
719
720/**
721 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
722 * @parser: parser structure holding parsing context.
723 * @pkt: where to store packet informations
724 *
725 * Assume that chunk_ib_index is properly set. Will return -EINVAL
726 * if packet is bigger than remaining ib size. or if packets is unknown.
727 **/
728int r100_cs_packet_parse(struct radeon_cs_parser *p,
729 struct radeon_cs_packet *pkt,
730 unsigned idx)
731{
732 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
733 uint32_t header = ib_chunk->kdata[idx];
734
735 if (idx >= ib_chunk->length_dw) {
736 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
737 idx, ib_chunk->length_dw);
738 return -EINVAL;
739 }
740 pkt->idx = idx;
741 pkt->type = CP_PACKET_GET_TYPE(header);
742 pkt->count = CP_PACKET_GET_COUNT(header);
743 switch (pkt->type) {
744 case PACKET_TYPE0:
745 pkt->reg = CP_PACKET0_GET_REG(header);
746 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
747 break;
748 case PACKET_TYPE3:
749 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
750 break;
751 case PACKET_TYPE2:
752 pkt->count = -1;
753 break;
754 default:
755 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
756 return -EINVAL;
757 }
758 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
759 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
760 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
761 return -EINVAL;
762 }
763 return 0;
764}
765
766/**
767 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
768 * @parser: parser structure holding parsing context.
769 * @data: pointer to relocation data
770 * @offset_start: starting offset
771 * @offset_mask: offset mask (to align start offset on)
772 * @reloc: reloc informations
773 *
774 * Check next packet is relocation packet3, do bo validation and compute
775 * GPU offset using the provided start.
776 **/
777int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
778 struct radeon_cs_reloc **cs_reloc)
779{
780 struct radeon_cs_chunk *ib_chunk;
781 struct radeon_cs_chunk *relocs_chunk;
782 struct radeon_cs_packet p3reloc;
783 unsigned idx;
784 int r;
785
786 if (p->chunk_relocs_idx == -1) {
787 DRM_ERROR("No relocation chunk !\n");
788 return -EINVAL;
789 }
790 *cs_reloc = NULL;
791 ib_chunk = &p->chunks[p->chunk_ib_idx];
792 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
793 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
794 if (r) {
795 return r;
796 }
797 p->idx += p3reloc.count + 2;
798 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
799 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
800 p3reloc.idx);
801 r100_cs_dump_packet(p, &p3reloc);
802 return -EINVAL;
803 }
804 idx = ib_chunk->kdata[p3reloc.idx + 1];
805 if (idx >= relocs_chunk->length_dw) {
806 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
807 idx, relocs_chunk->length_dw);
808 r100_cs_dump_packet(p, &p3reloc);
809 return -EINVAL;
810 }
811 /* FIXME: we assume reloc size is 4 dwords */
812 *cs_reloc = p->relocs_ptr[(idx / 4)];
813 return 0;
814}
815
816static int r100_packet0_check(struct radeon_cs_parser *p,
817 struct radeon_cs_packet *pkt)
818{
819 struct radeon_cs_chunk *ib_chunk;
820 struct radeon_cs_reloc *reloc;
821 volatile uint32_t *ib;
822 uint32_t tmp;
823 unsigned reg;
824 unsigned i;
825 unsigned idx;
826 bool onereg;
827 int r;
828
829 ib = p->ib->ptr;
830 ib_chunk = &p->chunks[p->chunk_ib_idx];
831 idx = pkt->idx + 1;
832 reg = pkt->reg;
833 onereg = false;
834 if (CP_PACKET0_GET_ONE_REG_WR(ib_chunk->kdata[pkt->idx])) {
835 onereg = true;
836 }
837 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
838 switch (reg) {
839 /* FIXME: only allow PACKET3 blit? easier to check for out of
840 * range access */
841 case RADEON_DST_PITCH_OFFSET:
842 case RADEON_SRC_PITCH_OFFSET:
843 r = r100_cs_packet_next_reloc(p, &reloc);
844 if (r) {
845 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
846 idx, reg);
847 r100_cs_dump_packet(p, pkt);
848 return r;
849 }
850 tmp = ib_chunk->kdata[idx] & 0x003fffff;
851 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
852 ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
853 break;
854 case RADEON_RB3D_DEPTHOFFSET:
855 case RADEON_RB3D_COLOROFFSET:
856 case R300_RB3D_COLOROFFSET0:
857 case R300_ZB_DEPTHOFFSET:
858 case R200_PP_TXOFFSET_0:
859 case R200_PP_TXOFFSET_1:
860 case R200_PP_TXOFFSET_2:
861 case R200_PP_TXOFFSET_3:
862 case R200_PP_TXOFFSET_4:
863 case R200_PP_TXOFFSET_5:
864 case RADEON_PP_TXOFFSET_0:
865 case RADEON_PP_TXOFFSET_1:
866 case RADEON_PP_TXOFFSET_2:
867 case R300_TX_OFFSET_0:
868 case R300_TX_OFFSET_0+4:
869 case R300_TX_OFFSET_0+8:
870 case R300_TX_OFFSET_0+12:
871 case R300_TX_OFFSET_0+16:
872 case R300_TX_OFFSET_0+20:
873 case R300_TX_OFFSET_0+24:
874 case R300_TX_OFFSET_0+28:
875 case R300_TX_OFFSET_0+32:
876 case R300_TX_OFFSET_0+36:
877 case R300_TX_OFFSET_0+40:
878 case R300_TX_OFFSET_0+44:
879 case R300_TX_OFFSET_0+48:
880 case R300_TX_OFFSET_0+52:
881 case R300_TX_OFFSET_0+56:
882 case R300_TX_OFFSET_0+60:
883 r = r100_cs_packet_next_reloc(p, &reloc);
884 if (r) {
885 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
886 idx, reg);
887 r100_cs_dump_packet(p, pkt);
888 return r;
889 }
890 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
891 break;
892 default:
893 /* FIXME: we don't want to allow anyothers packet */
894 break;
895 }
896 if (onereg) {
897 /* FIXME: forbid onereg write to register on relocate */
898 break;
899 }
900 }
901 return 0;
902}
903
904static int r100_packet3_check(struct radeon_cs_parser *p,
905 struct radeon_cs_packet *pkt)
906{
907 struct radeon_cs_chunk *ib_chunk;
908 struct radeon_cs_reloc *reloc;
909 unsigned idx;
910 unsigned i, c;
911 volatile uint32_t *ib;
912 int r;
913
914 ib = p->ib->ptr;
915 ib_chunk = &p->chunks[p->chunk_ib_idx];
916 idx = pkt->idx + 1;
917 switch (pkt->opcode) {
918 case PACKET3_3D_LOAD_VBPNTR:
919 c = ib_chunk->kdata[idx++];
920 for (i = 0; i < (c - 1); i += 2, idx += 3) {
921 r = r100_cs_packet_next_reloc(p, &reloc);
922 if (r) {
923 DRM_ERROR("No reloc for packet3 %d\n",
924 pkt->opcode);
925 r100_cs_dump_packet(p, pkt);
926 return r;
927 }
928 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
929 r = r100_cs_packet_next_reloc(p, &reloc);
930 if (r) {
931 DRM_ERROR("No reloc for packet3 %d\n",
932 pkt->opcode);
933 r100_cs_dump_packet(p, pkt);
934 return r;
935 }
936 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
937 }
938 if (c & 1) {
939 r = r100_cs_packet_next_reloc(p, &reloc);
940 if (r) {
941 DRM_ERROR("No reloc for packet3 %d\n",
942 pkt->opcode);
943 r100_cs_dump_packet(p, pkt);
944 return r;
945 }
946 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
947 }
948 break;
949 case PACKET3_INDX_BUFFER:
950 r = r100_cs_packet_next_reloc(p, &reloc);
951 if (r) {
952 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
953 r100_cs_dump_packet(p, pkt);
954 return r;
955 }
956 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
957 break;
958 case 0x23:
959 /* FIXME: cleanup */
960 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
961 r = r100_cs_packet_next_reloc(p, &reloc);
962 if (r) {
963 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
964 r100_cs_dump_packet(p, pkt);
965 return r;
966 }
967 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
968 break;
969 case PACKET3_3D_DRAW_IMMD:
970 /* triggers drawing using in-packet vertex data */
971 case PACKET3_3D_DRAW_IMMD_2:
972 /* triggers drawing using in-packet vertex data */
973 case PACKET3_3D_DRAW_VBUF_2:
974 /* triggers drawing of vertex buffers setup elsewhere */
975 case PACKET3_3D_DRAW_INDX_2:
976 /* triggers drawing using indices to vertex buffer */
977 case PACKET3_3D_DRAW_VBUF:
978 /* triggers drawing of vertex buffers setup elsewhere */
979 case PACKET3_3D_DRAW_INDX:
980 /* triggers drawing using indices to vertex buffer */
981 case PACKET3_NOP:
982 break;
983 default:
984 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
985 return -EINVAL;
986 }
987 return 0;
988}
989
990int r100_cs_parse(struct radeon_cs_parser *p)
991{
992 struct radeon_cs_packet pkt;
993 int r;
994
995 do {
996 r = r100_cs_packet_parse(p, &pkt, p->idx);
997 if (r) {
998 return r;
999 }
1000 p->idx += pkt.count + 2;
1001 switch (pkt.type) {
1002 case PACKET_TYPE0:
1003 r = r100_packet0_check(p, &pkt);
1004 break;
1005 case PACKET_TYPE2:
1006 break;
1007 case PACKET_TYPE3:
1008 r = r100_packet3_check(p, &pkt);
1009 break;
1010 default:
1011 DRM_ERROR("Unknown packet type %d !\n",
1012 pkt.type);
1013 return -EINVAL;
1014 }
1015 if (r) {
1016 return r;
1017 }
1018 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1019 return 0;
1020}
1021
1022
1023/*
1024 * Global GPU functions
1025 */
1026void r100_errata(struct radeon_device *rdev)
1027{
1028 rdev->pll_errata = 0;
1029
1030 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1031 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1032 }
1033
1034 if (rdev->family == CHIP_RV100 ||
1035 rdev->family == CHIP_RS100 ||
1036 rdev->family == CHIP_RS200) {
1037 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1038 }
1039}
1040
1041/* Wait for vertical sync on primary CRTC */
1042void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1043{
1044 uint32_t crtc_gen_cntl, tmp;
1045 int i;
1046
1047 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1048 if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1049 !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1050 return;
1051 }
1052 /* Clear the CRTC_VBLANK_SAVE bit */
1053 WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1054 for (i = 0; i < rdev->usec_timeout; i++) {
1055 tmp = RREG32(RADEON_CRTC_STATUS);
1056 if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1057 return;
1058 }
1059 DRM_UDELAY(1);
1060 }
1061}
1062
1063/* Wait for vertical sync on secondary CRTC */
1064void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1065{
1066 uint32_t crtc2_gen_cntl, tmp;
1067 int i;
1068
1069 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1070 if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1071 !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1072 return;
1073
1074 /* Clear the CRTC_VBLANK_SAVE bit */
1075 WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1076 for (i = 0; i < rdev->usec_timeout; i++) {
1077 tmp = RREG32(RADEON_CRTC2_STATUS);
1078 if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1079 return;
1080 }
1081 DRM_UDELAY(1);
1082 }
1083}
1084
1085int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1086{
1087 unsigned i;
1088 uint32_t tmp;
1089
1090 for (i = 0; i < rdev->usec_timeout; i++) {
1091 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1092 if (tmp >= n) {
1093 return 0;
1094 }
1095 DRM_UDELAY(1);
1096 }
1097 return -1;
1098}
1099
1100int r100_gui_wait_for_idle(struct radeon_device *rdev)
1101{
1102 unsigned i;
1103 uint32_t tmp;
1104
1105 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1106 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1107 " Bad things might happen.\n");
1108 }
1109 for (i = 0; i < rdev->usec_timeout; i++) {
1110 tmp = RREG32(RADEON_RBBM_STATUS);
1111 if (!(tmp & (1 << 31))) {
1112 return 0;
1113 }
1114 DRM_UDELAY(1);
1115 }
1116 return -1;
1117}
1118
1119int r100_mc_wait_for_idle(struct radeon_device *rdev)
1120{
1121 unsigned i;
1122 uint32_t tmp;
1123
1124 for (i = 0; i < rdev->usec_timeout; i++) {
1125 /* read MC_STATUS */
1126 tmp = RREG32(0x0150);
1127 if (tmp & (1 << 2)) {
1128 return 0;
1129 }
1130 DRM_UDELAY(1);
1131 }
1132 return -1;
1133}
1134
1135void r100_gpu_init(struct radeon_device *rdev)
1136{
1137 /* TODO: anythings to do here ? pipes ? */
1138 r100_hdp_reset(rdev);
1139}
1140
1141void r100_hdp_reset(struct radeon_device *rdev)
1142{
1143 uint32_t tmp;
1144
1145 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
1146 tmp |= (7 << 28);
1147 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
1148 (void)RREG32(RADEON_HOST_PATH_CNTL);
1149 udelay(200);
1150 WREG32(RADEON_RBBM_SOFT_RESET, 0);
1151 WREG32(RADEON_HOST_PATH_CNTL, tmp);
1152 (void)RREG32(RADEON_HOST_PATH_CNTL);
1153}
1154
1155int r100_rb2d_reset(struct radeon_device *rdev)
1156{
1157 uint32_t tmp;
1158 int i;
1159
1160 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
1161 (void)RREG32(RADEON_RBBM_SOFT_RESET);
1162 udelay(200);
1163 WREG32(RADEON_RBBM_SOFT_RESET, 0);
1164 /* Wait to prevent race in RBBM_STATUS */
1165 mdelay(1);
1166 for (i = 0; i < rdev->usec_timeout; i++) {
1167 tmp = RREG32(RADEON_RBBM_STATUS);
1168 if (!(tmp & (1 << 26))) {
1169 DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
1170 tmp);
1171 return 0;
1172 }
1173 DRM_UDELAY(1);
1174 }
1175 tmp = RREG32(RADEON_RBBM_STATUS);
1176 DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
1177 return -1;
1178}
1179
1180int r100_gpu_reset(struct radeon_device *rdev)
1181{
1182 uint32_t status;
1183
1184 /* reset order likely matter */
1185 status = RREG32(RADEON_RBBM_STATUS);
1186 /* reset HDP */
1187 r100_hdp_reset(rdev);
1188 /* reset rb2d */
1189 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
1190 r100_rb2d_reset(rdev);
1191 }
1192 /* TODO: reset 3D engine */
1193 /* reset CP */
1194 status = RREG32(RADEON_RBBM_STATUS);
1195 if (status & (1 << 16)) {
1196 r100_cp_reset(rdev);
1197 }
1198 /* Check if GPU is idle */
1199 status = RREG32(RADEON_RBBM_STATUS);
1200 if (status & (1 << 31)) {
1201 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
1202 return -1;
1203 }
1204 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
1205 return 0;
1206}
1207
1208
1209/*
1210 * VRAM info
1211 */
1212static void r100_vram_get_type(struct radeon_device *rdev)
1213{
1214 uint32_t tmp;
1215
1216 rdev->mc.vram_is_ddr = false;
1217 if (rdev->flags & RADEON_IS_IGP)
1218 rdev->mc.vram_is_ddr = true;
1219 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
1220 rdev->mc.vram_is_ddr = true;
1221 if ((rdev->family == CHIP_RV100) ||
1222 (rdev->family == CHIP_RS100) ||
1223 (rdev->family == CHIP_RS200)) {
1224 tmp = RREG32(RADEON_MEM_CNTL);
1225 if (tmp & RV100_HALF_MODE) {
1226 rdev->mc.vram_width = 32;
1227 } else {
1228 rdev->mc.vram_width = 64;
1229 }
1230 if (rdev->flags & RADEON_SINGLE_CRTC) {
1231 rdev->mc.vram_width /= 4;
1232 rdev->mc.vram_is_ddr = true;
1233 }
1234 } else if (rdev->family <= CHIP_RV280) {
1235 tmp = RREG32(RADEON_MEM_CNTL);
1236 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
1237 rdev->mc.vram_width = 128;
1238 } else {
1239 rdev->mc.vram_width = 64;
1240 }
1241 } else {
1242 /* newer IGPs */
1243 rdev->mc.vram_width = 128;
1244 }
1245}
1246
1247void r100_vram_info(struct radeon_device *rdev)
1248{
1249 r100_vram_get_type(rdev);
1250
1251 if (rdev->flags & RADEON_IS_IGP) {
1252 uint32_t tom;
1253 /* read NB_TOM to get the amount of ram stolen for the GPU */
1254 tom = RREG32(RADEON_NB_TOM);
1255 rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
1256 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1257 } else {
1258 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
1259 /* Some production boards of m6 will report 0
1260 * if it's 8 MB
1261 */
1262 if (rdev->mc.vram_size == 0) {
1263 rdev->mc.vram_size = 8192 * 1024;
1264 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1265 }
1266 }
1267
1268 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1269 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1270 if (rdev->mc.aper_size > rdev->mc.vram_size) {
1271 /* Why does some hw doesn't have CONFIG_MEMSIZE properly
1272 * setup ? */
1273 rdev->mc.vram_size = rdev->mc.aper_size;
1274 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1275 }
1276}
1277
1278
1279/*
1280 * Indirect registers accessor
1281 */
1282void r100_pll_errata_after_index(struct radeon_device *rdev)
1283{
1284 if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
1285 return;
1286 }
1287 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
1288 (void)RREG32(RADEON_CRTC_GEN_CNTL);
1289}
1290
1291static void r100_pll_errata_after_data(struct radeon_device *rdev)
1292{
1293 /* This workarounds is necessary on RV100, RS100 and RS200 chips
1294 * or the chip could hang on a subsequent access
1295 */
1296 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
1297 udelay(5000);
1298 }
1299
1300 /* This function is required to workaround a hardware bug in some (all?)
1301 * revisions of the R300. This workaround should be called after every
1302 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
1303 * may not be correct.
1304 */
1305 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
1306 uint32_t save, tmp;
1307
1308 save = RREG32(RADEON_CLOCK_CNTL_INDEX);
1309 tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
1310 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
1311 tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
1312 WREG32(RADEON_CLOCK_CNTL_INDEX, save);
1313 }
1314}
1315
1316uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
1317{
1318 uint32_t data;
1319
1320 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
1321 r100_pll_errata_after_index(rdev);
1322 data = RREG32(RADEON_CLOCK_CNTL_DATA);
1323 r100_pll_errata_after_data(rdev);
1324 return data;
1325}
1326
1327void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1328{
1329 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
1330 r100_pll_errata_after_index(rdev);
1331 WREG32(RADEON_CLOCK_CNTL_DATA, v);
1332 r100_pll_errata_after_data(rdev);
1333}
1334
1335uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
1336{
1337 if (reg < 0x10000)
1338 return readl(((void __iomem *)rdev->rmmio) + reg);
1339 else {
1340 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
1341 return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
1342 }
1343}
1344
1345void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1346{
1347 if (reg < 0x10000)
1348 writel(v, ((void __iomem *)rdev->rmmio) + reg);
1349 else {
1350 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
1351 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
1352 }
1353}
1354
1355/*
1356 * Debugfs info
1357 */
1358#if defined(CONFIG_DEBUG_FS)
1359static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
1360{
1361 struct drm_info_node *node = (struct drm_info_node *) m->private;
1362 struct drm_device *dev = node->minor->dev;
1363 struct radeon_device *rdev = dev->dev_private;
1364 uint32_t reg, value;
1365 unsigned i;
1366
1367 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
1368 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
1369 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
1370 for (i = 0; i < 64; i++) {
1371 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
1372 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
1373 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
1374 value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
1375 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
1376 }
1377 return 0;
1378}
1379
1380static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
1381{
1382 struct drm_info_node *node = (struct drm_info_node *) m->private;
1383 struct drm_device *dev = node->minor->dev;
1384 struct radeon_device *rdev = dev->dev_private;
1385 uint32_t rdp, wdp;
1386 unsigned count, i, j;
1387
1388 radeon_ring_free_size(rdev);
1389 rdp = RREG32(RADEON_CP_RB_RPTR);
1390 wdp = RREG32(RADEON_CP_RB_WPTR);
1391 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
1392 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
1393 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
1394 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
1395 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1396 seq_printf(m, "%u dwords in ring\n", count);
1397 for (j = 0; j <= count; j++) {
1398 i = (rdp + j) & rdev->cp.ptr_mask;
1399 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
1400 }
1401 return 0;
1402}
1403
1404
1405static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
1406{
1407 struct drm_info_node *node = (struct drm_info_node *) m->private;
1408 struct drm_device *dev = node->minor->dev;
1409 struct radeon_device *rdev = dev->dev_private;
1410 uint32_t csq_stat, csq2_stat, tmp;
1411 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
1412 unsigned i;
1413
1414 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
1415 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
1416 csq_stat = RREG32(RADEON_CP_CSQ_STAT);
1417 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
1418 r_rptr = (csq_stat >> 0) & 0x3ff;
1419 r_wptr = (csq_stat >> 10) & 0x3ff;
1420 ib1_rptr = (csq_stat >> 20) & 0x3ff;
1421 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
1422 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
1423 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
1424 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
1425 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
1426 seq_printf(m, "Ring rptr %u\n", r_rptr);
1427 seq_printf(m, "Ring wptr %u\n", r_wptr);
1428 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
1429 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
1430 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
1431 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
1432 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
1433 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
1434 seq_printf(m, "Ring fifo:\n");
1435 for (i = 0; i < 256; i++) {
1436 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
1437 tmp = RREG32(RADEON_CP_CSQ_DATA);
1438 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
1439 }
1440 seq_printf(m, "Indirect1 fifo:\n");
1441 for (i = 256; i <= 512; i++) {
1442 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
1443 tmp = RREG32(RADEON_CP_CSQ_DATA);
1444 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
1445 }
1446 seq_printf(m, "Indirect2 fifo:\n");
1447 for (i = 640; i < ib1_wptr; i++) {
1448 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
1449 tmp = RREG32(RADEON_CP_CSQ_DATA);
1450 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
1451 }
1452 return 0;
1453}
1454
1455static int r100_debugfs_mc_info(struct seq_file *m, void *data)
1456{
1457 struct drm_info_node *node = (struct drm_info_node *) m->private;
1458 struct drm_device *dev = node->minor->dev;
1459 struct radeon_device *rdev = dev->dev_private;
1460 uint32_t tmp;
1461
1462 tmp = RREG32(RADEON_CONFIG_MEMSIZE);
1463 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
1464 tmp = RREG32(RADEON_MC_FB_LOCATION);
1465 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
1466 tmp = RREG32(RADEON_BUS_CNTL);
1467 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
1468 tmp = RREG32(RADEON_MC_AGP_LOCATION);
1469 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
1470 tmp = RREG32(RADEON_AGP_BASE);
1471 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
1472 tmp = RREG32(RADEON_HOST_PATH_CNTL);
1473 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
1474 tmp = RREG32(0x01D0);
1475 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
1476 tmp = RREG32(RADEON_AIC_LO_ADDR);
1477 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
1478 tmp = RREG32(RADEON_AIC_HI_ADDR);
1479 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
1480 tmp = RREG32(0x01E4);
1481 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
1482 return 0;
1483}
1484
1485static struct drm_info_list r100_debugfs_rbbm_list[] = {
1486 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
1487};
1488
1489static struct drm_info_list r100_debugfs_cp_list[] = {
1490 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
1491 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
1492};
1493
1494static struct drm_info_list r100_debugfs_mc_info_list[] = {
1495 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
1496};
1497#endif
1498
1499int r100_debugfs_rbbm_init(struct radeon_device *rdev)
1500{
1501#if defined(CONFIG_DEBUG_FS)
1502 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
1503#else
1504 return 0;
1505#endif
1506}
1507
1508int r100_debugfs_cp_init(struct radeon_device *rdev)
1509{
1510#if defined(CONFIG_DEBUG_FS)
1511 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
1512#else
1513 return 0;
1514#endif
1515}
1516
1517int r100_debugfs_mc_info_init(struct radeon_device *rdev)
1518{
1519#if defined(CONFIG_DEBUG_FS)
1520 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
1521#else
1522 return 0;
1523#endif
1524}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
new file mode 100644
index 000000000000..f5870a099d4f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -0,0 +1,1116 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "drm.h"
31#include "radeon_reg.h"
32#include "radeon.h"
33
34/* r300,r350,rv350,rv370,rv380 depends on : */
35void r100_hdp_reset(struct radeon_device *rdev);
36int r100_cp_reset(struct radeon_device *rdev);
37int r100_rb2d_reset(struct radeon_device *rdev);
38int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
39int r100_pci_gart_enable(struct radeon_device *rdev);
40void r100_pci_gart_disable(struct radeon_device *rdev);
41void r100_mc_setup(struct radeon_device *rdev);
42void r100_mc_disable_clients(struct radeon_device *rdev);
43int r100_gui_wait_for_idle(struct radeon_device *rdev);
44int r100_cs_packet_parse(struct radeon_cs_parser *p,
45 struct radeon_cs_packet *pkt,
46 unsigned idx);
47int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
48 struct radeon_cs_reloc **cs_reloc);
49int r100_cs_parse_packet0(struct radeon_cs_parser *p,
50 struct radeon_cs_packet *pkt,
51 unsigned *auth, unsigned n,
52 radeon_packet0_check_t check);
53int r100_cs_parse_packet3(struct radeon_cs_parser *p,
54 struct radeon_cs_packet *pkt,
55 unsigned *auth, unsigned n,
56 radeon_packet3_check_t check);
57void r100_cs_dump_packet(struct radeon_cs_parser *p,
58 struct radeon_cs_packet *pkt);
59
60/* This files gather functions specifics to:
61 * r300,r350,rv350,rv370,rv380
62 *
63 * Some of these functions might be used by newer ASICs.
64 */
65void r300_gpu_init(struct radeon_device *rdev);
66int r300_mc_wait_for_idle(struct radeon_device *rdev);
67int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
68
69
70/*
71 * rv370,rv380 PCIE GART
72 */
73void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
74{
75 uint32_t tmp;
76 int i;
77
78 /* Workaround HW bug do flush 2 times */
79 for (i = 0; i < 2; i++) {
80 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
81 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
82 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
83 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
84 mb();
85 }
86}
87
88int rv370_pcie_gart_enable(struct radeon_device *rdev)
89{
90 uint32_t table_addr;
91 uint32_t tmp;
92 int r;
93
94 /* Initialize common gart structure */
95 r = radeon_gart_init(rdev);
96 if (r) {
97 return r;
98 }
99 r = rv370_debugfs_pcie_gart_info_init(rdev);
100 if (r) {
101 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
102 }
103 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
104 r = radeon_gart_table_vram_alloc(rdev);
105 if (r) {
106 return r;
107 }
108 /* discard memory request outside of configured range */
109 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
110 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
111 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
112 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096;
113 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
114 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
115 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
116 table_addr = rdev->gart.table_addr;
117 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
118 /* FIXME: setup default page */
119 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
120 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
121 /* Clear error */
122 WREG32_PCIE(0x18, 0);
123 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
124 tmp |= RADEON_PCIE_TX_GART_EN;
125 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
126 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
127 rv370_pcie_gart_tlb_flush(rdev);
128 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
129 rdev->mc.gtt_size >> 20, table_addr);
130 rdev->gart.ready = true;
131 return 0;
132}
133
134void rv370_pcie_gart_disable(struct radeon_device *rdev)
135{
136 uint32_t tmp;
137
138 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
139 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
140 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
141 if (rdev->gart.table.vram.robj) {
142 radeon_object_kunmap(rdev->gart.table.vram.robj);
143 radeon_object_unpin(rdev->gart.table.vram.robj);
144 }
145}
146
147int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
148{
149 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
150
151 if (i < 0 || i > rdev->gart.num_gpu_pages) {
152 return -EINVAL;
153 }
154 addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC;
155 writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4));
156 return 0;
157}
158
159int r300_gart_enable(struct radeon_device *rdev)
160{
161#if __OS_HAS_AGP
162 if (rdev->flags & RADEON_IS_AGP) {
163 if (rdev->family > CHIP_RV350) {
164 rv370_pcie_gart_disable(rdev);
165 } else {
166 r100_pci_gart_disable(rdev);
167 }
168 return 0;
169 }
170#endif
171 if (rdev->flags & RADEON_IS_PCIE) {
172 rdev->asic->gart_disable = &rv370_pcie_gart_disable;
173 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
174 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
175 return rv370_pcie_gart_enable(rdev);
176 }
177 return r100_pci_gart_enable(rdev);
178}
179
180
181/*
182 * MC
183 */
184int r300_mc_init(struct radeon_device *rdev)
185{
186 int r;
187
188 if (r100_debugfs_rbbm_init(rdev)) {
189 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
190 }
191
192 r300_gpu_init(rdev);
193 r100_pci_gart_disable(rdev);
194 if (rdev->flags & RADEON_IS_PCIE) {
195 rv370_pcie_gart_disable(rdev);
196 }
197
198 /* Setup GPU memory space */
199 rdev->mc.vram_location = 0xFFFFFFFFUL;
200 rdev->mc.gtt_location = 0xFFFFFFFFUL;
201 if (rdev->flags & RADEON_IS_AGP) {
202 r = radeon_agp_init(rdev);
203 if (r) {
204 printk(KERN_WARNING "[drm] Disabling AGP\n");
205 rdev->flags &= ~RADEON_IS_AGP;
206 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
207 } else {
208 rdev->mc.gtt_location = rdev->mc.agp_base;
209 }
210 }
211 r = radeon_mc_setup(rdev);
212 if (r) {
213 return r;
214 }
215
216 /* Program GPU memory space */
217 r100_mc_disable_clients(rdev);
218 if (r300_mc_wait_for_idle(rdev)) {
219 printk(KERN_WARNING "Failed to wait MC idle while "
220 "programming pipes. Bad things might happen.\n");
221 }
222 r100_mc_setup(rdev);
223 return 0;
224}
225
226void r300_mc_fini(struct radeon_device *rdev)
227{
228 if (rdev->flags & RADEON_IS_PCIE) {
229 rv370_pcie_gart_disable(rdev);
230 radeon_gart_table_vram_free(rdev);
231 } else {
232 r100_pci_gart_disable(rdev);
233 radeon_gart_table_ram_free(rdev);
234 }
235 radeon_gart_fini(rdev);
236}
237
238
239/*
240 * Fence emission
241 */
242void r300_fence_ring_emit(struct radeon_device *rdev,
243 struct radeon_fence *fence)
244{
245 /* Who ever call radeon_fence_emit should call ring_lock and ask
246 * for enough space (today caller are ib schedule and buffer move) */
247 /* Write SC register so SC & US assert idle */
248 radeon_ring_write(rdev, PACKET0(0x43E0, 0));
249 radeon_ring_write(rdev, 0);
250 radeon_ring_write(rdev, PACKET0(0x43E4, 0));
251 radeon_ring_write(rdev, 0);
252 /* Flush 3D cache */
253 radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
254 radeon_ring_write(rdev, (2 << 0));
255 radeon_ring_write(rdev, PACKET0(0x4F18, 0));
256 radeon_ring_write(rdev, (1 << 0));
257 /* Wait until IDLE & CLEAN */
258 radeon_ring_write(rdev, PACKET0(0x1720, 0));
259 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
260 /* Emit fence sequence & fire IRQ */
261 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
262 radeon_ring_write(rdev, fence->seq);
263 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
264 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
265}
266
267
268/*
269 * Global GPU functions
270 */
271int r300_copy_dma(struct radeon_device *rdev,
272 uint64_t src_offset,
273 uint64_t dst_offset,
274 unsigned num_pages,
275 struct radeon_fence *fence)
276{
277 uint32_t size;
278 uint32_t cur_size;
279 int i, num_loops;
280 int r = 0;
281
282 /* radeon pitch is /64 */
283 size = num_pages << PAGE_SHIFT;
284 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
285 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
286 if (r) {
287 DRM_ERROR("radeon: moving bo (%d).\n", r);
288 return r;
289 }
290 /* Must wait for 2D idle & clean before DMA or hangs might happen */
291 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
292 radeon_ring_write(rdev, (1 << 16));
293 for (i = 0; i < num_loops; i++) {
294 cur_size = size;
295 if (cur_size > 0x1FFFFF) {
296 cur_size = 0x1FFFFF;
297 }
298 size -= cur_size;
299 radeon_ring_write(rdev, PACKET0(0x720, 2));
300 radeon_ring_write(rdev, src_offset);
301 radeon_ring_write(rdev, dst_offset);
302 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
303 src_offset += cur_size;
304 dst_offset += cur_size;
305 }
306 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
307 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
308 if (fence) {
309 r = radeon_fence_emit(rdev, fence);
310 }
311 radeon_ring_unlock_commit(rdev);
312 return r;
313}
314
315void r300_ring_start(struct radeon_device *rdev)
316{
317 unsigned gb_tile_config;
318 int r;
319
320 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
321 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
322 switch (rdev->num_gb_pipes) {
323 case 2:
324 gb_tile_config |= R300_PIPE_COUNT_R300;
325 break;
326 case 3:
327 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
328 break;
329 case 4:
330 gb_tile_config |= R300_PIPE_COUNT_R420;
331 break;
332 case 1:
333 default:
334 gb_tile_config |= R300_PIPE_COUNT_RV350;
335 break;
336 }
337
338 r = radeon_ring_lock(rdev, 64);
339 if (r) {
340 return;
341 }
342 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
343 radeon_ring_write(rdev,
344 RADEON_ISYNC_ANY2D_IDLE3D |
345 RADEON_ISYNC_ANY3D_IDLE2D |
346 RADEON_ISYNC_WAIT_IDLEGUI |
347 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
348 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
349 radeon_ring_write(rdev, gb_tile_config);
350 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
351 radeon_ring_write(rdev,
352 RADEON_WAIT_2D_IDLECLEAN |
353 RADEON_WAIT_3D_IDLECLEAN);
354 radeon_ring_write(rdev, PACKET0(0x170C, 0));
355 radeon_ring_write(rdev, 1 << 31);
356 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
357 radeon_ring_write(rdev, 0);
358 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
359 radeon_ring_write(rdev, 0);
360 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
361 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
362 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
363 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
364 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
365 radeon_ring_write(rdev,
366 RADEON_WAIT_2D_IDLECLEAN |
367 RADEON_WAIT_3D_IDLECLEAN);
368 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
369 radeon_ring_write(rdev, 0);
370 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
371 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
372 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
373 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
374 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
375 radeon_ring_write(rdev,
376 ((6 << R300_MS_X0_SHIFT) |
377 (6 << R300_MS_Y0_SHIFT) |
378 (6 << R300_MS_X1_SHIFT) |
379 (6 << R300_MS_Y1_SHIFT) |
380 (6 << R300_MS_X2_SHIFT) |
381 (6 << R300_MS_Y2_SHIFT) |
382 (6 << R300_MSBD0_Y_SHIFT) |
383 (6 << R300_MSBD0_X_SHIFT)));
384 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
385 radeon_ring_write(rdev,
386 ((6 << R300_MS_X3_SHIFT) |
387 (6 << R300_MS_Y3_SHIFT) |
388 (6 << R300_MS_X4_SHIFT) |
389 (6 << R300_MS_Y4_SHIFT) |
390 (6 << R300_MS_X5_SHIFT) |
391 (6 << R300_MS_Y5_SHIFT) |
392 (6 << R300_MSBD1_SHIFT)));
393 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
394 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
395 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
396 radeon_ring_write(rdev,
397 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
398 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
399 radeon_ring_write(rdev,
400 R300_GEOMETRY_ROUND_NEAREST |
401 R300_COLOR_ROUND_NEAREST);
402 radeon_ring_unlock_commit(rdev);
403}
404
405void r300_errata(struct radeon_device *rdev)
406{
407 rdev->pll_errata = 0;
408
409 if (rdev->family == CHIP_R300 &&
410 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
411 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
412 }
413}
414
415int r300_mc_wait_for_idle(struct radeon_device *rdev)
416{
417 unsigned i;
418 uint32_t tmp;
419
420 for (i = 0; i < rdev->usec_timeout; i++) {
421 /* read MC_STATUS */
422 tmp = RREG32(0x0150);
423 if (tmp & (1 << 4)) {
424 return 0;
425 }
426 DRM_UDELAY(1);
427 }
428 return -1;
429}
430
431void r300_gpu_init(struct radeon_device *rdev)
432{
433 uint32_t gb_tile_config, tmp;
434
435 r100_hdp_reset(rdev);
436 /* FIXME: rv380 one pipes ? */
437 if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
438 /* r300,r350 */
439 rdev->num_gb_pipes = 2;
440 } else {
441 /* rv350,rv370,rv380 */
442 rdev->num_gb_pipes = 1;
443 }
444 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
445 switch (rdev->num_gb_pipes) {
446 case 2:
447 gb_tile_config |= R300_PIPE_COUNT_R300;
448 break;
449 case 3:
450 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
451 break;
452 case 4:
453 gb_tile_config |= R300_PIPE_COUNT_R420;
454 break;
455 case 1:
456 default:
457 gb_tile_config |= R300_PIPE_COUNT_RV350;
458 break;
459 }
460 WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
461
462 if (r100_gui_wait_for_idle(rdev)) {
463 printk(KERN_WARNING "Failed to wait GUI idle while "
464 "programming pipes. Bad things might happen.\n");
465 }
466
467 tmp = RREG32(0x170C);
468 WREG32(0x170C, tmp | (1 << 31));
469
470 WREG32(R300_RB2D_DSTCACHE_MODE,
471 R300_DC_AUTOFLUSH_ENABLE |
472 R300_DC_DC_DISABLE_IGNORE_PE);
473
474 if (r100_gui_wait_for_idle(rdev)) {
475 printk(KERN_WARNING "Failed to wait GUI idle while "
476 "programming pipes. Bad things might happen.\n");
477 }
478 if (r300_mc_wait_for_idle(rdev)) {
479 printk(KERN_WARNING "Failed to wait MC idle while "
480 "programming pipes. Bad things might happen.\n");
481 }
482 DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
483}
484
485int r300_ga_reset(struct radeon_device *rdev)
486{
487 uint32_t tmp;
488 bool reinit_cp;
489 int i;
490
491 reinit_cp = rdev->cp.ready;
492 rdev->cp.ready = false;
493 for (i = 0; i < rdev->usec_timeout; i++) {
494 WREG32(RADEON_CP_CSQ_MODE, 0);
495 WREG32(RADEON_CP_CSQ_CNTL, 0);
496 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
497 (void)RREG32(RADEON_RBBM_SOFT_RESET);
498 udelay(200);
499 WREG32(RADEON_RBBM_SOFT_RESET, 0);
500 /* Wait to prevent race in RBBM_STATUS */
501 mdelay(1);
502 tmp = RREG32(RADEON_RBBM_STATUS);
503 if (tmp & ((1 << 20) | (1 << 26))) {
504 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
505 /* GA still busy soft reset it */
506 WREG32(0x429C, 0x200);
507 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
508 WREG32(0x43E0, 0);
509 WREG32(0x43E4, 0);
510 WREG32(0x24AC, 0);
511 }
512 /* Wait to prevent race in RBBM_STATUS */
513 mdelay(1);
514 tmp = RREG32(RADEON_RBBM_STATUS);
515 if (!(tmp & ((1 << 20) | (1 << 26)))) {
516 break;
517 }
518 }
519 for (i = 0; i < rdev->usec_timeout; i++) {
520 tmp = RREG32(RADEON_RBBM_STATUS);
521 if (!(tmp & ((1 << 20) | (1 << 26)))) {
522 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
523 tmp);
524 if (reinit_cp) {
525 return r100_cp_init(rdev, rdev->cp.ring_size);
526 }
527 return 0;
528 }
529 DRM_UDELAY(1);
530 }
531 tmp = RREG32(RADEON_RBBM_STATUS);
532 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
533 return -1;
534}
535
536int r300_gpu_reset(struct radeon_device *rdev)
537{
538 uint32_t status;
539
540 /* reset order likely matter */
541 status = RREG32(RADEON_RBBM_STATUS);
542 /* reset HDP */
543 r100_hdp_reset(rdev);
544 /* reset rb2d */
545 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
546 r100_rb2d_reset(rdev);
547 }
548 /* reset GA */
549 if (status & ((1 << 20) | (1 << 26))) {
550 r300_ga_reset(rdev);
551 }
552 /* reset CP */
553 status = RREG32(RADEON_RBBM_STATUS);
554 if (status & (1 << 16)) {
555 r100_cp_reset(rdev);
556 }
557 /* Check if GPU is idle */
558 status = RREG32(RADEON_RBBM_STATUS);
559 if (status & (1 << 31)) {
560 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
561 return -1;
562 }
563 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
564 return 0;
565}
566
567
568/*
569 * r300,r350,rv350,rv380 VRAM info
570 */
571void r300_vram_info(struct radeon_device *rdev)
572{
573 uint32_t tmp;
574
575 /* DDR for all card after R300 & IGP */
576 rdev->mc.vram_is_ddr = true;
577 tmp = RREG32(RADEON_MEM_CNTL);
578 if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
579 rdev->mc.vram_width = 128;
580 } else {
581 rdev->mc.vram_width = 64;
582 }
583 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
584
585 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
586 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
587}
588
589
590/*
591 * Indirect registers accessor
592 */
593uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
594{
595 uint32_t r;
596
597 WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
598 (void)RREG32(RADEON_PCIE_INDEX);
599 r = RREG32(RADEON_PCIE_DATA);
600 return r;
601}
602
603void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
604{
605 WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
606 (void)RREG32(RADEON_PCIE_INDEX);
607 WREG32(RADEON_PCIE_DATA, (v));
608 (void)RREG32(RADEON_PCIE_DATA);
609}
610
611/*
612 * PCIE Lanes
613 */
614
615void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
616{
617 uint32_t link_width_cntl, mask;
618
619 if (rdev->flags & RADEON_IS_IGP)
620 return;
621
622 if (!(rdev->flags & RADEON_IS_PCIE))
623 return;
624
625 /* FIXME wait for idle */
626
627 switch (lanes) {
628 case 0:
629 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
630 break;
631 case 1:
632 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
633 break;
634 case 2:
635 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
636 break;
637 case 4:
638 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
639 break;
640 case 8:
641 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
642 break;
643 case 12:
644 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
645 break;
646 case 16:
647 default:
648 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
649 break;
650 }
651
652 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
653
654 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
655 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
656 return;
657
658 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
659 RADEON_PCIE_LC_RECONFIG_NOW |
660 RADEON_PCIE_LC_RECONFIG_LATER |
661 RADEON_PCIE_LC_SHORT_RECONFIG_EN);
662 link_width_cntl |= mask;
663 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
664 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
665 RADEON_PCIE_LC_RECONFIG_NOW));
666
667 /* wait for lane set to complete */
668 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
669 while (link_width_cntl == 0xffffffff)
670 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
671
672}
673
674
675/*
676 * Debugfs info
677 */
678#if defined(CONFIG_DEBUG_FS)
679static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
680{
681 struct drm_info_node *node = (struct drm_info_node *) m->private;
682 struct drm_device *dev = node->minor->dev;
683 struct radeon_device *rdev = dev->dev_private;
684 uint32_t tmp;
685
686 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
687 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
688 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
689 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
690 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
691 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
692 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
693 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
694 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
695 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
696 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
697 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
698 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
699 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
700 return 0;
701}
702
703static struct drm_info_list rv370_pcie_gart_info_list[] = {
704 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
705};
706#endif
707
708int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
709{
710#if defined(CONFIG_DEBUG_FS)
711 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
712#else
713 return 0;
714#endif
715}
716
717
718/*
719 * CS functions
720 */
721struct r300_cs_track_cb {
722 struct radeon_object *robj;
723 unsigned pitch;
724 unsigned cpp;
725 unsigned offset;
726};
727
728struct r300_cs_track {
729 unsigned num_cb;
730 unsigned maxy;
731 struct r300_cs_track_cb cb[4];
732 struct r300_cs_track_cb zb;
733 bool z_enabled;
734};
735
736int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track)
737{
738 unsigned i;
739 unsigned long size;
740
741 for (i = 0; i < track->num_cb; i++) {
742 if (track->cb[i].robj == NULL) {
743 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
744 return -EINVAL;
745 }
746 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
747 size += track->cb[i].offset;
748 if (size > radeon_object_size(track->cb[i].robj)) {
749 DRM_ERROR("[drm] Buffer too small for color buffer %d "
750 "(need %lu have %lu) !\n", i, size,
751 radeon_object_size(track->cb[i].robj));
752 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
753 i, track->cb[i].pitch, track->cb[i].cpp,
754 track->cb[i].offset, track->maxy);
755 return -EINVAL;
756 }
757 }
758 if (track->z_enabled) {
759 if (track->zb.robj == NULL) {
760 DRM_ERROR("[drm] No buffer for z buffer !\n");
761 return -EINVAL;
762 }
763 size = track->zb.pitch * track->zb.cpp * track->maxy;
764 size += track->zb.offset;
765 if (size > radeon_object_size(track->zb.robj)) {
766 DRM_ERROR("[drm] Buffer too small for z buffer "
767 "(need %lu have %lu) !\n", size,
768 radeon_object_size(track->zb.robj));
769 return -EINVAL;
770 }
771 }
772 return 0;
773}
774
775static inline void r300_cs_track_clear(struct r300_cs_track *track)
776{
777 unsigned i;
778
779 track->num_cb = 4;
780 track->maxy = 4096;
781 for (i = 0; i < track->num_cb; i++) {
782 track->cb[i].robj = NULL;
783 track->cb[i].pitch = 8192;
784 track->cb[i].cpp = 16;
785 track->cb[i].offset = 0;
786 }
787 track->z_enabled = true;
788 track->zb.robj = NULL;
789 track->zb.pitch = 8192;
790 track->zb.cpp = 4;
791 track->zb.offset = 0;
792}
793
794static unsigned r300_auth_reg[] = {
795 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
796 0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
797 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
798 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
799 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
800 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
801 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
802 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
803 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
804 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
805 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
806 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
807 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
808 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
809 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
810 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
811 0xFFFFFFFF, 0xFFFFCFCC, 0xF00E9FFF, 0x007C0000,
812 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
813 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
814 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
815 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
816 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
817 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
818 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
819 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
820 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
821 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
822 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
823 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
824 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
825 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
826 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
827 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFC, 0xFFFFFFFF,
828 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
829 0x00000000, 0x00000000, 0xFFFF0000, 0x00000000,
830 0x00000000, 0x0000C100, 0x00000000, 0x00000000,
831 0x00000000, 0x00000000, 0x00000000, 0x00000000,
832 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
833 0x00000000, 0x00000000, 0x00000000, 0x00000000,
834 0x0003FC01, 0xFFFFFFF8, 0xFE800B19,
835};
836
837static int r300_packet0_check(struct radeon_cs_parser *p,
838 struct radeon_cs_packet *pkt,
839 unsigned idx, unsigned reg)
840{
841 struct radeon_cs_chunk *ib_chunk;
842 struct radeon_cs_reloc *reloc;
843 struct r300_cs_track *track;
844 volatile uint32_t *ib;
845 uint32_t tmp;
846 unsigned i;
847 int r;
848
849 ib = p->ib->ptr;
850 ib_chunk = &p->chunks[p->chunk_ib_idx];
851 track = (struct r300_cs_track *)p->track;
852 switch (reg) {
853 case RADEON_DST_PITCH_OFFSET:
854 case RADEON_SRC_PITCH_OFFSET:
855 r = r100_cs_packet_next_reloc(p, &reloc);
856 if (r) {
857 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
858 idx, reg);
859 r100_cs_dump_packet(p, pkt);
860 return r;
861 }
862 tmp = ib_chunk->kdata[idx] & 0x003fffff;
863 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
864 ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
865 break;
866 case R300_RB3D_COLOROFFSET0:
867 case R300_RB3D_COLOROFFSET1:
868 case R300_RB3D_COLOROFFSET2:
869 case R300_RB3D_COLOROFFSET3:
870 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
871 r = r100_cs_packet_next_reloc(p, &reloc);
872 if (r) {
873 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
874 idx, reg);
875 r100_cs_dump_packet(p, pkt);
876 return r;
877 }
878 track->cb[i].robj = reloc->robj;
879 track->cb[i].offset = ib_chunk->kdata[idx];
880 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
881 break;
882 case R300_ZB_DEPTHOFFSET:
883 r = r100_cs_packet_next_reloc(p, &reloc);
884 if (r) {
885 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
886 idx, reg);
887 r100_cs_dump_packet(p, pkt);
888 return r;
889 }
890 track->zb.robj = reloc->robj;
891 track->zb.offset = ib_chunk->kdata[idx];
892 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
893 break;
894 case R300_TX_OFFSET_0:
895 case R300_TX_OFFSET_0+4:
896 case R300_TX_OFFSET_0+8:
897 case R300_TX_OFFSET_0+12:
898 case R300_TX_OFFSET_0+16:
899 case R300_TX_OFFSET_0+20:
900 case R300_TX_OFFSET_0+24:
901 case R300_TX_OFFSET_0+28:
902 case R300_TX_OFFSET_0+32:
903 case R300_TX_OFFSET_0+36:
904 case R300_TX_OFFSET_0+40:
905 case R300_TX_OFFSET_0+44:
906 case R300_TX_OFFSET_0+48:
907 case R300_TX_OFFSET_0+52:
908 case R300_TX_OFFSET_0+56:
909 case R300_TX_OFFSET_0+60:
910 r = r100_cs_packet_next_reloc(p, &reloc);
911 if (r) {
912 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
913 idx, reg);
914 r100_cs_dump_packet(p, pkt);
915 return r;
916 }
917 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
918 break;
919 /* Tracked registers */
920 case 0x43E4:
921 /* SC_SCISSOR1 */
922
923 track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1;
924 if (p->rdev->family < CHIP_RV515) {
925 track->maxy -= 1440;
926 }
927 break;
928 case 0x4E00:
929 /* RB3D_CCTL */
930 track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1;
931 break;
932 case 0x4E38:
933 case 0x4E3C:
934 case 0x4E40:
935 case 0x4E44:
936 /* RB3D_COLORPITCH0 */
937 /* RB3D_COLORPITCH1 */
938 /* RB3D_COLORPITCH2 */
939 /* RB3D_COLORPITCH3 */
940 i = (reg - 0x4E38) >> 2;
941 track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
942 switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
943 case 9:
944 case 11:
945 case 12:
946 track->cb[i].cpp = 1;
947 break;
948 case 3:
949 case 4:
950 case 13:
951 case 15:
952 track->cb[i].cpp = 2;
953 break;
954 case 6:
955 track->cb[i].cpp = 4;
956 break;
957 case 10:
958 track->cb[i].cpp = 8;
959 break;
960 case 7:
961 track->cb[i].cpp = 16;
962 break;
963 default:
964 DRM_ERROR("Invalid color buffer format (%d) !\n",
965 ((ib_chunk->kdata[idx] >> 21) & 0xF));
966 return -EINVAL;
967 }
968 break;
969 case 0x4F00:
970 /* ZB_CNTL */
971 if (ib_chunk->kdata[idx] & 2) {
972 track->z_enabled = true;
973 } else {
974 track->z_enabled = false;
975 }
976 break;
977 case 0x4F10:
978 /* ZB_FORMAT */
979 switch ((ib_chunk->kdata[idx] & 0xF)) {
980 case 0:
981 case 1:
982 track->zb.cpp = 2;
983 break;
984 case 2:
985 track->zb.cpp = 4;
986 break;
987 default:
988 DRM_ERROR("Invalid z buffer format (%d) !\n",
989 (ib_chunk->kdata[idx] & 0xF));
990 return -EINVAL;
991 }
992 break;
993 case 0x4F24:
994 /* ZB_DEPTHPITCH */
995 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
996 break;
997 default:
998 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", reg, idx);
999 return -EINVAL;
1000 }
1001 return 0;
1002}
1003
1004static int r300_packet3_check(struct radeon_cs_parser *p,
1005 struct radeon_cs_packet *pkt)
1006{
1007 struct radeon_cs_chunk *ib_chunk;
1008 struct radeon_cs_reloc *reloc;
1009 struct r300_cs_track *track;
1010 volatile uint32_t *ib;
1011 unsigned idx;
1012 unsigned i, c;
1013 int r;
1014
1015 ib = p->ib->ptr;
1016 ib_chunk = &p->chunks[p->chunk_ib_idx];
1017 idx = pkt->idx + 1;
1018 track = (struct r300_cs_track *)p->track;
1019 switch (pkt->opcode) {
1020 case PACKET3_3D_LOAD_VBPNTR:
1021 c = ib_chunk->kdata[idx++];
1022 for (i = 0; i < (c - 1); i += 2, idx += 3) {
1023 r = r100_cs_packet_next_reloc(p, &reloc);
1024 if (r) {
1025 DRM_ERROR("No reloc for packet3 %d\n",
1026 pkt->opcode);
1027 r100_cs_dump_packet(p, pkt);
1028 return r;
1029 }
1030 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1031 r = r100_cs_packet_next_reloc(p, &reloc);
1032 if (r) {
1033 DRM_ERROR("No reloc for packet3 %d\n",
1034 pkt->opcode);
1035 r100_cs_dump_packet(p, pkt);
1036 return r;
1037 }
1038 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1039 }
1040 if (c & 1) {
1041 r = r100_cs_packet_next_reloc(p, &reloc);
1042 if (r) {
1043 DRM_ERROR("No reloc for packet3 %d\n",
1044 pkt->opcode);
1045 r100_cs_dump_packet(p, pkt);
1046 return r;
1047 }
1048 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1049 }
1050 break;
1051 case PACKET3_INDX_BUFFER:
1052 r = r100_cs_packet_next_reloc(p, &reloc);
1053 if (r) {
1054 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1055 r100_cs_dump_packet(p, pkt);
1056 return r;
1057 }
1058 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1059 break;
1060 /* Draw packet */
1061 case PACKET3_3D_DRAW_VBUF:
1062 case PACKET3_3D_DRAW_IMMD:
1063 case PACKET3_3D_DRAW_INDX:
1064 case PACKET3_3D_DRAW_VBUF_2:
1065 case PACKET3_3D_DRAW_IMMD_2:
1066 case PACKET3_3D_DRAW_INDX_2:
1067 r = r300_cs_track_check(p->rdev, track);
1068 if (r) {
1069 return r;
1070 }
1071 break;
1072 case PACKET3_NOP:
1073 break;
1074 default:
1075 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1076 return -EINVAL;
1077 }
1078 return 0;
1079}
1080
1081int r300_cs_parse(struct radeon_cs_parser *p)
1082{
1083 struct radeon_cs_packet pkt;
1084 struct r300_cs_track track;
1085 int r;
1086
1087 r300_cs_track_clear(&track);
1088 p->track = &track;
1089 do {
1090 r = r100_cs_packet_parse(p, &pkt, p->idx);
1091 if (r) {
1092 return r;
1093 }
1094 p->idx += pkt.count + 2;
1095 switch (pkt.type) {
1096 case PACKET_TYPE0:
1097 r = r100_cs_parse_packet0(p, &pkt,
1098 r300_auth_reg,
1099 ARRAY_SIZE(r300_auth_reg),
1100 &r300_packet0_check);
1101 break;
1102 case PACKET_TYPE2:
1103 break;
1104 case PACKET_TYPE3:
1105 r = r300_packet3_check(p, &pkt);
1106 break;
1107 default:
1108 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1109 return -EINVAL;
1110 }
1111 if (r) {
1112 return r;
1113 }
1114 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1115 return 0;
1116}
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index bdbc95fa6721..70f48609515e 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -1,30 +1,34 @@
1/************************************************************************** 1/*
2 2 * Copyright 2005 Nicolai Haehnle et al.
3Copyright (C) 2004-2005 Nicolai Haehnle et al. 3 * Copyright 2008 Advanced Micro Devices, Inc.
4 4 * Copyright 2009 Jerome Glisse.
5Permission is hereby granted, free of charge, to any person obtaining a 5 *
6copy of this software and associated documentation files (the "Software"), 6 * Permission is hereby granted, free of charge, to any person obtaining a
7to deal in the Software without restriction, including without limitation 7 * copy of this software and associated documentation files (the "Software"),
8on the rights to use, copy, modify, merge, publish, distribute, sub 8 * to deal in the Software without restriction, including without limitation
9license, and/or sell copies of the Software, and to permit persons to whom 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10the Software is furnished to do so, subject to the following conditions: 10 * and/or sell copies of the Software, and to permit persons to whom the
11 11 * Software is furnished to do so, subject to the following conditions:
12The above copyright notice and this permission notice (including the next 12 *
13paragraph) shall be included in all copies or substantial portions of the 13 * The above copyright notice and this permission notice shall be included in
14Software. 14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Nicolai Haehnle
25 * Jerome Glisse
26 */
27#ifndef _R300_REG_H_
28#define _R300_REG_H_
15 29
16THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22USE OR OTHER DEALINGS IN THE SOFTWARE.
23 30
24**************************************************************************/
25 31
26#ifndef _R300_REG_H
27#define _R300_REG_H
28 32
29#define R300_MC_INIT_MISC_LAT_TIMER 0x180 33#define R300_MC_INIT_MISC_LAT_TIMER 0x180
30# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0 34# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
new file mode 100644
index 000000000000..dea497a979f2
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -0,0 +1,223 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "radeon_reg.h"
31#include "radeon.h"
32
33/* r420,r423,rv410 depends on : */
34void r100_pci_gart_disable(struct radeon_device *rdev);
35void r100_hdp_reset(struct radeon_device *rdev);
36void r100_mc_setup(struct radeon_device *rdev);
37int r100_gui_wait_for_idle(struct radeon_device *rdev);
38void r100_mc_disable_clients(struct radeon_device *rdev);
39void r300_vram_info(struct radeon_device *rdev);
40int r300_mc_wait_for_idle(struct radeon_device *rdev);
41int rv370_pcie_gart_enable(struct radeon_device *rdev);
42void rv370_pcie_gart_disable(struct radeon_device *rdev);
43
44/* This files gather functions specifics to :
45 * r420,r423,rv410
46 *
47 * Some of these functions might be used by newer ASICs.
48 */
49void r420_gpu_init(struct radeon_device *rdev);
50int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
51
52
53/*
54 * MC
55 */
56int r420_mc_init(struct radeon_device *rdev)
57{
58 int r;
59
60 if (r100_debugfs_rbbm_init(rdev)) {
61 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
62 }
63 if (r420_debugfs_pipes_info_init(rdev)) {
64 DRM_ERROR("Failed to register debugfs file for pipes !\n");
65 }
66
67 r420_gpu_init(rdev);
68 r100_pci_gart_disable(rdev);
69 if (rdev->flags & RADEON_IS_PCIE) {
70 rv370_pcie_gart_disable(rdev);
71 }
72
73 /* Setup GPU memory space */
74 rdev->mc.vram_location = 0xFFFFFFFFUL;
75 rdev->mc.gtt_location = 0xFFFFFFFFUL;
76 if (rdev->flags & RADEON_IS_AGP) {
77 r = radeon_agp_init(rdev);
78 if (r) {
79 printk(KERN_WARNING "[drm] Disabling AGP\n");
80 rdev->flags &= ~RADEON_IS_AGP;
81 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
82 } else {
83 rdev->mc.gtt_location = rdev->mc.agp_base;
84 }
85 }
86 r = radeon_mc_setup(rdev);
87 if (r) {
88 return r;
89 }
90
91 /* Program GPU memory space */
92 r100_mc_disable_clients(rdev);
93 if (r300_mc_wait_for_idle(rdev)) {
94 printk(KERN_WARNING "Failed to wait MC idle while "
95 "programming pipes. Bad things might happen.\n");
96 }
97 r100_mc_setup(rdev);
98 return 0;
99}
100
101void r420_mc_fini(struct radeon_device *rdev)
102{
103 rv370_pcie_gart_disable(rdev);
104 radeon_gart_table_vram_free(rdev);
105 radeon_gart_fini(rdev);
106}
107
108
109/*
110 * Global GPU functions
111 */
112void r420_errata(struct radeon_device *rdev)
113{
114 rdev->pll_errata = 0;
115}
116
117void r420_pipes_init(struct radeon_device *rdev)
118{
119 unsigned tmp;
120 unsigned gb_pipe_select;
121 unsigned num_pipes;
122
123 /* GA_ENHANCE workaround TCL deadlock issue */
124 WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
125 /* get max number of pipes */
126 gb_pipe_select = RREG32(0x402C);
127 num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
128 rdev->num_gb_pipes = num_pipes;
129 tmp = 0;
130 switch (num_pipes) {
131 default:
132 /* force to 1 pipe */
133 num_pipes = 1;
134 case 1:
135 tmp = (0 << 1);
136 break;
137 case 2:
138 tmp = (3 << 1);
139 break;
140 case 3:
141 tmp = (6 << 1);
142 break;
143 case 4:
144 tmp = (7 << 1);
145 break;
146 }
147 WREG32(0x42C8, (1 << num_pipes) - 1);
148 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
149 tmp |= (1 << 4) | (1 << 0);
150 WREG32(0x4018, tmp);
151 if (r100_gui_wait_for_idle(rdev)) {
152 printk(KERN_WARNING "Failed to wait GUI idle while "
153 "programming pipes. Bad things might happen.\n");
154 }
155
156 tmp = RREG32(0x170C);
157 WREG32(0x170C, tmp | (1 << 31));
158
159 WREG32(R300_RB2D_DSTCACHE_MODE,
160 RREG32(R300_RB2D_DSTCACHE_MODE) |
161 R300_DC_AUTOFLUSH_ENABLE |
162 R300_DC_DC_DISABLE_IGNORE_PE);
163
164 if (r100_gui_wait_for_idle(rdev)) {
165 printk(KERN_WARNING "Failed to wait GUI idle while "
166 "programming pipes. Bad things might happen.\n");
167 }
168 DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
169}
170
171void r420_gpu_init(struct radeon_device *rdev)
172{
173 r100_hdp_reset(rdev);
174 r420_pipes_init(rdev);
175 if (r300_mc_wait_for_idle(rdev)) {
176 printk(KERN_WARNING "Failed to wait MC idle while "
177 "programming pipes. Bad things might happen.\n");
178 }
179}
180
181
182/*
183 * r420,r423,rv410 VRAM info
184 */
185void r420_vram_info(struct radeon_device *rdev)
186{
187 r300_vram_info(rdev);
188}
189
190
191/*
192 * Debugfs info
193 */
194#if defined(CONFIG_DEBUG_FS)
195static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
196{
197 struct drm_info_node *node = (struct drm_info_node *) m->private;
198 struct drm_device *dev = node->minor->dev;
199 struct radeon_device *rdev = dev->dev_private;
200 uint32_t tmp;
201
202 tmp = RREG32(R400_GB_PIPE_SELECT);
203 seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
204 tmp = RREG32(R300_GB_TILE_CONFIG);
205 seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
206 tmp = RREG32(R300_DST_PIPE_CONFIG);
207 seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
208 return 0;
209}
210
211static struct drm_info_list r420_pipes_info_list[] = {
212 {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
213};
214#endif
215
216int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
217{
218#if defined(CONFIG_DEBUG_FS)
219 return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
220#else
221 return 0;
222#endif
223}
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
new file mode 100644
index 000000000000..9070a1c2ce23
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -0,0 +1,749 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __R500_REG_H__
29#define __R500_REG_H__
30
31/* pipe config regs */
32#define R300_GA_POLY_MODE 0x4288
33# define R300_FRONT_PTYPE_POINT (0 << 4)
34# define R300_FRONT_PTYPE_LINE (1 << 4)
35# define R300_FRONT_PTYPE_TRIANGE (2 << 4)
36# define R300_BACK_PTYPE_POINT (0 << 7)
37# define R300_BACK_PTYPE_LINE (1 << 7)
38# define R300_BACK_PTYPE_TRIANGE (2 << 7)
39#define R300_GA_ROUND_MODE 0x428c
40# define R300_GEOMETRY_ROUND_TRUNC (0 << 0)
41# define R300_GEOMETRY_ROUND_NEAREST (1 << 0)
42# define R300_COLOR_ROUND_TRUNC (0 << 2)
43# define R300_COLOR_ROUND_NEAREST (1 << 2)
44#define R300_GB_MSPOS0 0x4010
45# define R300_MS_X0_SHIFT 0
46# define R300_MS_Y0_SHIFT 4
47# define R300_MS_X1_SHIFT 8
48# define R300_MS_Y1_SHIFT 12
49# define R300_MS_X2_SHIFT 16
50# define R300_MS_Y2_SHIFT 20
51# define R300_MSBD0_Y_SHIFT 24
52# define R300_MSBD0_X_SHIFT 28
53#define R300_GB_MSPOS1 0x4014
54# define R300_MS_X3_SHIFT 0
55# define R300_MS_Y3_SHIFT 4
56# define R300_MS_X4_SHIFT 8
57# define R300_MS_Y4_SHIFT 12
58# define R300_MS_X5_SHIFT 16
59# define R300_MS_Y5_SHIFT 20
60# define R300_MSBD1_SHIFT 24
61
62#define R300_GA_ENHANCE 0x4274
63# define R300_GA_DEADLOCK_CNTL (1 << 0)
64# define R300_GA_FASTSYNC_CNTL (1 << 1)
65#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
66# define R300_RB3D_DC_FLUSH (2 << 0)
67# define R300_RB3D_DC_FREE (2 << 2)
68# define R300_RB3D_DC_FINISH (1 << 4)
69#define R300_RB3D_ZCACHE_CTLSTAT 0x4f18
70# define R300_ZC_FLUSH (1 << 0)
71# define R300_ZC_FREE (1 << 1)
72# define R300_ZC_FLUSH_ALL 0x3
73#define R400_GB_PIPE_SELECT 0x402c
74#define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */
75#define R500_SU_REG_DEST 0x42c8
76#define R300_GB_TILE_CONFIG 0x4018
77# define R300_ENABLE_TILING (1 << 0)
78# define R300_PIPE_COUNT_RV350 (0 << 1)
79# define R300_PIPE_COUNT_R300 (3 << 1)
80# define R300_PIPE_COUNT_R420_3P (6 << 1)
81# define R300_PIPE_COUNT_R420 (7 << 1)
82# define R300_TILE_SIZE_8 (0 << 4)
83# define R300_TILE_SIZE_16 (1 << 4)
84# define R300_TILE_SIZE_32 (2 << 4)
85# define R300_SUBPIXEL_1_12 (0 << 16)
86# define R300_SUBPIXEL_1_16 (1 << 16)
87#define R300_DST_PIPE_CONFIG 0x170c
88# define R300_PIPE_AUTO_CONFIG (1 << 31)
89#define R300_RB2D_DSTCACHE_MODE 0x3428
90# define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
91# define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
92
93#define RADEON_CP_STAT 0x7C0
94#define RADEON_RBBM_CMDFIFO_ADDR 0xE70
95#define RADEON_RBBM_CMDFIFO_DATA 0xE74
96#define RADEON_ISYNC_CNTL 0x1724
97# define RADEON_ISYNC_ANY2D_IDLE3D (1 << 0)
98# define RADEON_ISYNC_ANY3D_IDLE2D (1 << 1)
99# define RADEON_ISYNC_TRIG2D_IDLE3D (1 << 2)
100# define RADEON_ISYNC_TRIG3D_IDLE2D (1 << 3)
101# define RADEON_ISYNC_WAIT_IDLEGUI (1 << 4)
102# define RADEON_ISYNC_CPSCRATCH_IDLEGUI (1 << 5)
103
104#define RS480_NB_MC_INDEX 0x168
105# define RS480_NB_MC_IND_WR_EN (1 << 8)
106#define RS480_NB_MC_DATA 0x16c
107
108/*
109 * RS690
110 */
111#define RS690_MCCFG_FB_LOCATION 0x100
112#define RS690_MC_FB_START_MASK 0x0000FFFF
113#define RS690_MC_FB_START_SHIFT 0
114#define RS690_MC_FB_TOP_MASK 0xFFFF0000
115#define RS690_MC_FB_TOP_SHIFT 16
116#define RS690_MCCFG_AGP_LOCATION 0x101
117#define RS690_MC_AGP_START_MASK 0x0000FFFF
118#define RS690_MC_AGP_START_SHIFT 0
119#define RS690_MC_AGP_TOP_MASK 0xFFFF0000
120#define RS690_MC_AGP_TOP_SHIFT 16
121#define RS690_MCCFG_AGP_BASE 0x102
122#define RS690_MCCFG_AGP_BASE_2 0x103
123#define RS690_MC_INIT_MISC_LAT_TIMER 0x104
124#define RS690_HDP_FB_LOCATION 0x0134
125#define RS690_MC_INDEX 0x78
126# define RS690_MC_INDEX_MASK 0x1ff
127# define RS690_MC_INDEX_WR_EN (1 << 9)
128# define RS690_MC_INDEX_WR_ACK 0x7f
129#define RS690_MC_DATA 0x7c
130#define RS690_MC_STATUS 0x90
131#define RS690_MC_STATUS_IDLE (1 << 0)
132#define RS480_AGP_BASE_2 0x0164
133#define RS480_MC_MISC_CNTL 0x18
134# define RS480_DISABLE_GTW (1 << 1)
135# define RS480_GART_INDEX_REG_EN (1 << 12)
136# define RS690_BLOCK_GFX_D3_EN (1 << 14)
137#define RS480_GART_FEATURE_ID 0x2b
138# define RS480_HANG_EN (1 << 11)
139# define RS480_TLB_ENABLE (1 << 18)
140# define RS480_P2P_ENABLE (1 << 19)
141# define RS480_GTW_LAC_EN (1 << 25)
142# define RS480_2LEVEL_GART (0 << 30)
143# define RS480_1LEVEL_GART (1 << 30)
144# define RS480_PDC_EN (1 << 31)
145#define RS480_GART_BASE 0x2c
146#define RS480_GART_CACHE_CNTRL 0x2e
147# define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
148#define RS480_AGP_ADDRESS_SPACE_SIZE 0x38
149# define RS480_GART_EN (1 << 0)
150# define RS480_VA_SIZE_32MB (0 << 1)
151# define RS480_VA_SIZE_64MB (1 << 1)
152# define RS480_VA_SIZE_128MB (2 << 1)
153# define RS480_VA_SIZE_256MB (3 << 1)
154# define RS480_VA_SIZE_512MB (4 << 1)
155# define RS480_VA_SIZE_1GB (5 << 1)
156# define RS480_VA_SIZE_2GB (6 << 1)
157#define RS480_AGP_MODE_CNTL 0x39
158# define RS480_POST_GART_Q_SIZE (1 << 18)
159# define RS480_NONGART_SNOOP (1 << 19)
160# define RS480_AGP_RD_BUF_SIZE (1 << 20)
161# define RS480_REQ_TYPE_SNOOP_SHIFT 22
162# define RS480_REQ_TYPE_SNOOP_MASK 0x3
163# define RS480_REQ_TYPE_SNOOP_DIS (1 << 24)
164
165#define RS690_AIC_CTRL_SCRATCH 0x3A
166# define RS690_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1)
167
168/*
169 * RS600
170 */
171#define RS600_MC_STATUS 0x0
172#define RS600_MC_STATUS_IDLE (1 << 0)
173#define RS600_MC_INDEX 0x70
174# define RS600_MC_ADDR_MASK 0xffff
175# define RS600_MC_IND_SEQ_RBS_0 (1 << 16)
176# define RS600_MC_IND_SEQ_RBS_1 (1 << 17)
177# define RS600_MC_IND_SEQ_RBS_2 (1 << 18)
178# define RS600_MC_IND_SEQ_RBS_3 (1 << 19)
179# define RS600_MC_IND_AIC_RBS (1 << 20)
180# define RS600_MC_IND_CITF_ARB0 (1 << 21)
181# define RS600_MC_IND_CITF_ARB1 (1 << 22)
182# define RS600_MC_IND_WR_EN (1 << 23)
183#define RS600_MC_DATA 0x74
184#define RS600_MC_STATUS 0x0
185# define RS600_MC_IDLE (1 << 1)
186#define RS600_MC_FB_LOCATION 0x4
187#define RS600_MC_FB_START_MASK 0x0000FFFF
188#define RS600_MC_FB_START_SHIFT 0
189#define RS600_MC_FB_TOP_MASK 0xFFFF0000
190#define RS600_MC_FB_TOP_SHIFT 16
191#define RS600_MC_AGP_LOCATION 0x5
192#define RS600_MC_AGP_START_MASK 0x0000FFFF
193#define RS600_MC_AGP_START_SHIFT 0
194#define RS600_MC_AGP_TOP_MASK 0xFFFF0000
195#define RS600_MC_AGP_TOP_SHIFT 16
196#define RS600_MC_AGP_BASE 0x6
197#define RS600_MC_AGP_BASE_2 0x7
198#define RS600_MC_CNTL1 0x9
199# define RS600_ENABLE_PAGE_TABLES (1 << 26)
200#define RS600_MC_PT0_CNTL 0x100
201# define RS600_ENABLE_PT (1 << 0)
202# define RS600_EFFECTIVE_L2_CACHE_SIZE(x) ((x) << 15)
203# define RS600_EFFECTIVE_L2_QUEUE_SIZE(x) ((x) << 21)
204# define RS600_INVALIDATE_ALL_L1_TLBS (1 << 28)
205# define RS600_INVALIDATE_L2_CACHE (1 << 29)
206#define RS600_MC_PT0_CONTEXT0_CNTL 0x102
207# define RS600_ENABLE_PAGE_TABLE (1 << 0)
208# define RS600_PAGE_TABLE_TYPE_FLAT (0 << 1)
209#define RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x112
210#define RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x114
211#define RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x11c
212#define RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x12c
213#define RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x13c
214#define RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x14c
215#define RS600_MC_PT0_CLIENT0_CNTL 0x16c
216# define RS600_ENABLE_TRANSLATION_MODE_OVERRIDE (1 << 0)
217# define RS600_TRANSLATION_MODE_OVERRIDE (1 << 1)
218# define RS600_SYSTEM_ACCESS_MODE_MASK (3 << 8)
219# define RS600_SYSTEM_ACCESS_MODE_PA_ONLY (0 << 8)
220# define RS600_SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 8)
221# define RS600_SYSTEM_ACCESS_MODE_IN_SYS (2 << 8)
222# define RS600_SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 8)
223# define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH (0 << 10)
224# define RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE (1 << 10)
225# define RS600_EFFECTIVE_L1_CACHE_SIZE(x) ((x) << 11)
226# define RS600_ENABLE_FRAGMENT_PROCESSING (1 << 14)
227# define RS600_EFFECTIVE_L1_QUEUE_SIZE(x) ((x) << 15)
228# define RS600_INVALIDATE_L1_TLB (1 << 20)
229/* rs600/rs690/rs740 */
230# define RS600_BUS_MASTER_DIS (1 << 14)
231# define RS600_MSI_REARM (1 << 20)
232/* see RS400_MSI_REARM in AIC_CNTL for rs480 */
233
234
235
236#define RV515_MC_FB_LOCATION 0x01
237#define RV515_MC_FB_START_MASK 0x0000FFFF
238#define RV515_MC_FB_START_SHIFT 0
239#define RV515_MC_FB_TOP_MASK 0xFFFF0000
240#define RV515_MC_FB_TOP_SHIFT 16
241#define RV515_MC_AGP_LOCATION 0x02
242#define RV515_MC_AGP_START_MASK 0x0000FFFF
243#define RV515_MC_AGP_START_SHIFT 0
244#define RV515_MC_AGP_TOP_MASK 0xFFFF0000
245#define RV515_MC_AGP_TOP_SHIFT 16
246#define RV515_MC_AGP_BASE 0x03
247#define RV515_MC_AGP_BASE_2 0x04
248
249#define R520_MC_FB_LOCATION 0x04
250#define R520_MC_FB_START_MASK 0x0000FFFF
251#define R520_MC_FB_START_SHIFT 0
252#define R520_MC_FB_TOP_MASK 0xFFFF0000
253#define R520_MC_FB_TOP_SHIFT 16
254#define R520_MC_AGP_LOCATION 0x05
255#define R520_MC_AGP_START_MASK 0x0000FFFF
256#define R520_MC_AGP_START_SHIFT 0
257#define R520_MC_AGP_TOP_MASK 0xFFFF0000
258#define R520_MC_AGP_TOP_SHIFT 16
259#define R520_MC_AGP_BASE 0x06
260#define R520_MC_AGP_BASE_2 0x07
261
262
263#define AVIVO_MC_INDEX 0x0070
264#define R520_MC_STATUS 0x00
265#define R520_MC_STATUS_IDLE (1<<1)
266#define RV515_MC_STATUS 0x08
267#define RV515_MC_STATUS_IDLE (1<<4)
268#define RV515_MC_INIT_MISC_LAT_TIMER 0x09
269#define AVIVO_MC_DATA 0x0074
270
271#define R520_MC_IND_INDEX 0x70
272#define R520_MC_IND_WR_EN (1 << 24)
273#define R520_MC_IND_DATA 0x74
274
275#define RV515_MC_CNTL 0x5
276# define RV515_MEM_NUM_CHANNELS_MASK 0x3
277#define R520_MC_CNTL0 0x8
278# define R520_MEM_NUM_CHANNELS_MASK (0x3 << 24)
279# define R520_MEM_NUM_CHANNELS_SHIFT 24
280# define R520_MC_CHANNEL_SIZE (1 << 23)
281
282#define AVIVO_CP_DYN_CNTL 0x000f /* PLL */
283# define AVIVO_CP_FORCEON (1 << 0)
284#define AVIVO_E2_DYN_CNTL 0x0011 /* PLL */
285# define AVIVO_E2_FORCEON (1 << 0)
286#define AVIVO_IDCT_DYN_CNTL 0x0013 /* PLL */
287# define AVIVO_IDCT_FORCEON (1 << 0)
288
289#define AVIVO_HDP_FB_LOCATION 0x134
290
291#define AVIVO_VGA_RENDER_CONTROL 0x0300
292# define AVIVO_VGA_VSTATUS_CNTL_MASK (3 << 16)
293#define AVIVO_D1VGA_CONTROL 0x0330
294# define AVIVO_DVGA_CONTROL_MODE_ENABLE (1<<0)
295# define AVIVO_DVGA_CONTROL_TIMING_SELECT (1<<8)
296# define AVIVO_DVGA_CONTROL_SYNC_POLARITY_SELECT (1<<9)
297# define AVIVO_DVGA_CONTROL_OVERSCAN_TIMING_SELECT (1<<10)
298# define AVIVO_DVGA_CONTROL_OVERSCAN_COLOR_EN (1<<16)
299# define AVIVO_DVGA_CONTROL_ROTATE (1<<24)
300#define AVIVO_D2VGA_CONTROL 0x0338
301
302#define AVIVO_EXT1_PPLL_REF_DIV_SRC 0x400
303#define AVIVO_EXT1_PPLL_REF_DIV 0x404
304#define AVIVO_EXT1_PPLL_UPDATE_LOCK 0x408
305#define AVIVO_EXT1_PPLL_UPDATE_CNTL 0x40c
306
307#define AVIVO_EXT2_PPLL_REF_DIV_SRC 0x410
308#define AVIVO_EXT2_PPLL_REF_DIV 0x414
309#define AVIVO_EXT2_PPLL_UPDATE_LOCK 0x418
310#define AVIVO_EXT2_PPLL_UPDATE_CNTL 0x41c
311
312#define AVIVO_EXT1_PPLL_FB_DIV 0x430
313#define AVIVO_EXT2_PPLL_FB_DIV 0x434
314
315#define AVIVO_EXT1_PPLL_POST_DIV_SRC 0x438
316#define AVIVO_EXT1_PPLL_POST_DIV 0x43c
317
318#define AVIVO_EXT2_PPLL_POST_DIV_SRC 0x440
319#define AVIVO_EXT2_PPLL_POST_DIV 0x444
320
321#define AVIVO_EXT1_PPLL_CNTL 0x448
322#define AVIVO_EXT2_PPLL_CNTL 0x44c
323
324#define AVIVO_P1PLL_CNTL 0x450
325#define AVIVO_P2PLL_CNTL 0x454
326#define AVIVO_P1PLL_INT_SS_CNTL 0x458
327#define AVIVO_P2PLL_INT_SS_CNTL 0x45c
328#define AVIVO_P1PLL_TMDSA_CNTL 0x460
329#define AVIVO_P2PLL_LVTMA_CNTL 0x464
330
331#define AVIVO_PCLK_CRTC1_CNTL 0x480
332#define AVIVO_PCLK_CRTC2_CNTL 0x484
333
334#define AVIVO_D1CRTC_H_TOTAL 0x6000
335#define AVIVO_D1CRTC_H_BLANK_START_END 0x6004
336#define AVIVO_D1CRTC_H_SYNC_A 0x6008
337#define AVIVO_D1CRTC_H_SYNC_A_CNTL 0x600c
338#define AVIVO_D1CRTC_H_SYNC_B 0x6010
339#define AVIVO_D1CRTC_H_SYNC_B_CNTL 0x6014
340
341#define AVIVO_D1CRTC_V_TOTAL 0x6020
342#define AVIVO_D1CRTC_V_BLANK_START_END 0x6024
343#define AVIVO_D1CRTC_V_SYNC_A 0x6028
344#define AVIVO_D1CRTC_V_SYNC_A_CNTL 0x602c
345#define AVIVO_D1CRTC_V_SYNC_B 0x6030
346#define AVIVO_D1CRTC_V_SYNC_B_CNTL 0x6034
347
348#define AVIVO_D1CRTC_CONTROL 0x6080
349# define AVIVO_CRTC_EN (1 << 0)
350#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
351#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
352#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
353#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
354
355/* master controls */
356#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
357#define AVIVO_DC_CRTC_TV_CONTROL 0x60fc
358
359#define AVIVO_D1GRPH_ENABLE 0x6100
360#define AVIVO_D1GRPH_CONTROL 0x6104
361# define AVIVO_D1GRPH_CONTROL_DEPTH_8BPP (0 << 0)
362# define AVIVO_D1GRPH_CONTROL_DEPTH_16BPP (1 << 0)
363# define AVIVO_D1GRPH_CONTROL_DEPTH_32BPP (2 << 0)
364# define AVIVO_D1GRPH_CONTROL_DEPTH_64BPP (3 << 0)
365
366# define AVIVO_D1GRPH_CONTROL_8BPP_INDEXED (0 << 8)
367
368# define AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555 (0 << 8)
369# define AVIVO_D1GRPH_CONTROL_16BPP_RGB565 (1 << 8)
370# define AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444 (2 << 8)
371# define AVIVO_D1GRPH_CONTROL_16BPP_AI88 (3 << 8)
372# define AVIVO_D1GRPH_CONTROL_16BPP_MONO16 (4 << 8)
373
374# define AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888 (0 << 8)
375# define AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010 (1 << 8)
376# define AVIVO_D1GRPH_CONTROL_32BPP_DIGITAL (2 << 8)
377# define AVIVO_D1GRPH_CONTROL_32BPP_8B_ARGB2101010 (3 << 8)
378
379
380# define AVIVO_D1GRPH_CONTROL_64BPP_ARGB16161616 (0 << 8)
381
382# define AVIVO_D1GRPH_SWAP_RB (1 << 16)
383# define AVIVO_D1GRPH_TILED (1 << 20)
384# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21)
385
386#define AVIVO_D1GRPH_LUT_SEL 0x6108
387#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
388#define AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
389#define AVIVO_D1GRPH_PITCH 0x6120
390#define AVIVO_D1GRPH_SURFACE_OFFSET_X 0x6124
391#define AVIVO_D1GRPH_SURFACE_OFFSET_Y 0x6128
392#define AVIVO_D1GRPH_X_START 0x612c
393#define AVIVO_D1GRPH_Y_START 0x6130
394#define AVIVO_D1GRPH_X_END 0x6134
395#define AVIVO_D1GRPH_Y_END 0x6138
396#define AVIVO_D1GRPH_UPDATE 0x6144
397# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16)
398#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148
399
400#define AVIVO_D1CUR_CONTROL 0x6400
401# define AVIVO_D1CURSOR_EN (1 << 0)
402# define AVIVO_D1CURSOR_MODE_SHIFT 8
403# define AVIVO_D1CURSOR_MODE_MASK (3 << 8)
404# define AVIVO_D1CURSOR_MODE_24BPP 2
405#define AVIVO_D1CUR_SURFACE_ADDRESS 0x6408
406#define AVIVO_D1CUR_SIZE 0x6410
407#define AVIVO_D1CUR_POSITION 0x6414
408#define AVIVO_D1CUR_HOT_SPOT 0x6418
409#define AVIVO_D1CUR_UPDATE 0x6424
410# define AVIVO_D1CURSOR_UPDATE_LOCK (1 << 16)
411
412#define AVIVO_DC_LUT_RW_SELECT 0x6480
413#define AVIVO_DC_LUT_RW_MODE 0x6484
414#define AVIVO_DC_LUT_RW_INDEX 0x6488
415#define AVIVO_DC_LUT_SEQ_COLOR 0x648c
416#define AVIVO_DC_LUT_PWL_DATA 0x6490
417#define AVIVO_DC_LUT_30_COLOR 0x6494
418#define AVIVO_DC_LUT_READ_PIPE_SELECT 0x6498
419#define AVIVO_DC_LUT_WRITE_EN_MASK 0x649c
420#define AVIVO_DC_LUT_AUTOFILL 0x64a0
421
422#define AVIVO_DC_LUTA_CONTROL 0x64c0
423#define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE 0x64c4
424#define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN 0x64c8
425#define AVIVO_DC_LUTA_BLACK_OFFSET_RED 0x64cc
426#define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE 0x64d0
427#define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN 0x64d4
428#define AVIVO_DC_LUTA_WHITE_OFFSET_RED 0x64d8
429
430#define AVIVO_DC_LB_MEMORY_SPLIT 0x6520
431# define AVIVO_DC_LB_MEMORY_SPLIT_MASK 0x3
432# define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT 0
433# define AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
434# define AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
435# define AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY 2
436# define AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
437# define AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
438# define AVIVO_DC_LB_DISP1_END_ADR_SHIFT 4
439# define AVIVO_DC_LB_DISP1_END_ADR_MASK 0x7ff
440
441#define R500_DxMODE_INT_MASK 0x6540
442#define R500_D1MODE_INT_MASK (1<<0)
443#define R500_D2MODE_INT_MASK (1<<8)
444
445#define AVIVO_D1MODE_DATA_FORMAT 0x6528
446# define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0)
447#define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C
448#define AVIVO_D1MODE_VIEWPORT_START 0x6580
449#define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584
450#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588
451#define AVIVO_D1MODE_EXT_OVERSCAN_TOP_BOTTOM 0x658c
452
453#define AVIVO_D1SCL_SCALER_ENABLE 0x6590
454#define AVIVO_D1SCL_SCALER_TAP_CONTROL 0x6594
455#define AVIVO_D1SCL_UPDATE 0x65cc
456# define AVIVO_D1SCL_UPDATE_LOCK (1 << 16)
457
458/* second crtc */
459#define AVIVO_D2CRTC_H_TOTAL 0x6800
460#define AVIVO_D2CRTC_H_BLANK_START_END 0x6804
461#define AVIVO_D2CRTC_H_SYNC_A 0x6808
462#define AVIVO_D2CRTC_H_SYNC_A_CNTL 0x680c
463#define AVIVO_D2CRTC_H_SYNC_B 0x6810
464#define AVIVO_D2CRTC_H_SYNC_B_CNTL 0x6814
465
466#define AVIVO_D2CRTC_V_TOTAL 0x6820
467#define AVIVO_D2CRTC_V_BLANK_START_END 0x6824
468#define AVIVO_D2CRTC_V_SYNC_A 0x6828
469#define AVIVO_D2CRTC_V_SYNC_A_CNTL 0x682c
470#define AVIVO_D2CRTC_V_SYNC_B 0x6830
471#define AVIVO_D2CRTC_V_SYNC_B_CNTL 0x6834
472
473#define AVIVO_D2CRTC_CONTROL 0x6880
474#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884
475#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888
476#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c
477#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4
478
479#define AVIVO_D2GRPH_ENABLE 0x6900
480#define AVIVO_D2GRPH_CONTROL 0x6904
481#define AVIVO_D2GRPH_LUT_SEL 0x6908
482#define AVIVO_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
483#define AVIVO_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
484#define AVIVO_D2GRPH_PITCH 0x6920
485#define AVIVO_D2GRPH_SURFACE_OFFSET_X 0x6924
486#define AVIVO_D2GRPH_SURFACE_OFFSET_Y 0x6928
487#define AVIVO_D2GRPH_X_START 0x692c
488#define AVIVO_D2GRPH_Y_START 0x6930
489#define AVIVO_D2GRPH_X_END 0x6934
490#define AVIVO_D2GRPH_Y_END 0x6938
491#define AVIVO_D2GRPH_UPDATE 0x6944
492#define AVIVO_D2GRPH_FLIP_CONTROL 0x6948
493
494#define AVIVO_D2CUR_CONTROL 0x6c00
495#define AVIVO_D2CUR_SURFACE_ADDRESS 0x6c08
496#define AVIVO_D2CUR_SIZE 0x6c10
497#define AVIVO_D2CUR_POSITION 0x6c14
498
499#define AVIVO_D2MODE_VIEWPORT_START 0x6d80
500#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84
501#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88
502#define AVIVO_D2MODE_EXT_OVERSCAN_TOP_BOTTOM 0x6d8c
503
504#define AVIVO_D2SCL_SCALER_ENABLE 0x6d90
505#define AVIVO_D2SCL_SCALER_TAP_CONTROL 0x6d94
506
507#define AVIVO_DDIA_BIT_DEPTH_CONTROL 0x7214
508
509#define AVIVO_DACA_ENABLE 0x7800
510# define AVIVO_DAC_ENABLE (1 << 0)
511#define AVIVO_DACA_SOURCE_SELECT 0x7804
512# define AVIVO_DAC_SOURCE_CRTC1 (0 << 0)
513# define AVIVO_DAC_SOURCE_CRTC2 (1 << 0)
514# define AVIVO_DAC_SOURCE_TV (2 << 0)
515
516#define AVIVO_DACA_FORCE_OUTPUT_CNTL 0x783c
517# define AVIVO_DACA_FORCE_OUTPUT_CNTL_FORCE_DATA_EN (1 << 0)
518# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT (8)
519# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE (1 << 0)
520# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN (1 << 1)
521# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_SEL_RED (1 << 2)
522# define AVIVO_DACA_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY (1 << 24)
523#define AVIVO_DACA_POWERDOWN 0x7850
524# define AVIVO_DACA_POWERDOWN_POWERDOWN (1 << 0)
525# define AVIVO_DACA_POWERDOWN_BLUE (1 << 8)
526# define AVIVO_DACA_POWERDOWN_GREEN (1 << 16)
527# define AVIVO_DACA_POWERDOWN_RED (1 << 24)
528
529#define AVIVO_DACB_ENABLE 0x7a00
530#define AVIVO_DACB_SOURCE_SELECT 0x7a04
531#define AVIVO_DACB_FORCE_OUTPUT_CNTL 0x7a3c
532# define AVIVO_DACB_FORCE_OUTPUT_CNTL_FORCE_DATA_EN (1 << 0)
533# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_SHIFT (8)
534# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_BLUE (1 << 0)
535# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_GREEN (1 << 1)
536# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_SEL_RED (1 << 2)
537# define AVIVO_DACB_FORCE_OUTPUT_CNTL_DATA_ON_BLANKB_ONLY (1 << 24)
538#define AVIVO_DACB_POWERDOWN 0x7a50
539# define AVIVO_DACB_POWERDOWN_POWERDOWN (1 << 0)
540# define AVIVO_DACB_POWERDOWN_BLUE (1 << 8)
541# define AVIVO_DACB_POWERDOWN_GREEN (1 << 16)
542# define AVIVO_DACB_POWERDOWN_RED
543
544#define AVIVO_TMDSA_CNTL 0x7880
545# define AVIVO_TMDSA_CNTL_ENABLE (1 << 0)
546# define AVIVO_TMDSA_CNTL_HPD_MASK (1 << 4)
547# define AVIVO_TMDSA_CNTL_HPD_SELECT (1 << 8)
548# define AVIVO_TMDSA_CNTL_SYNC_PHASE (1 << 12)
549# define AVIVO_TMDSA_CNTL_PIXEL_ENCODING (1 << 16)
550# define AVIVO_TMDSA_CNTL_DUAL_LINK_ENABLE (1 << 24)
551# define AVIVO_TMDSA_CNTL_SWAP (1 << 28)
552#define AVIVO_TMDSA_SOURCE_SELECT 0x7884
553/* 78a8 appears to be some kind of (reasonably tolerant) clock?
554 * 78d0 definitely hits the transmitter, definitely clock. */
555/* MYSTERY1 This appears to control dithering? */
556#define AVIVO_TMDSA_BIT_DEPTH_CONTROL 0x7894
557# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN (1 << 0)
558# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH (1 << 4)
559# define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN (1 << 8)
560# define AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH (1 << 12)
561# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN (1 << 16)
562# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
563# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL (1 << 24)
564# define AVIVO_TMDS_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
565#define AVIVO_TMDSA_DCBALANCER_CONTROL 0x78d0
566# define AVIVO_TMDSA_DCBALANCER_CONTROL_EN (1 << 0)
567# define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_EN (1 << 8)
568# define AVIVO_TMDSA_DCBALANCER_CONTROL_TEST_IN_SHIFT (16)
569# define AVIVO_TMDSA_DCBALANCER_CONTROL_FORCE (1 << 24)
570#define AVIVO_TMDSA_DATA_SYNCHRONIZATION 0x78d8
571# define AVIVO_TMDSA_DATA_SYNCHRONIZATION_DSYNSEL (1 << 0)
572# define AVIVO_TMDSA_DATA_SYNCHRONIZATION_PFREQCHG (1 << 8)
573#define AVIVO_TMDSA_CLOCK_ENABLE 0x7900
574#define AVIVO_TMDSA_TRANSMITTER_ENABLE 0x7904
575# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX0_ENABLE (1 << 0)
576# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKC0EN (1 << 1)
577# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD00EN (1 << 2)
578# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD01EN (1 << 3)
579# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD02EN (1 << 4)
580# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX1_ENABLE (1 << 8)
581# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD10EN (1 << 10)
582# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD11EN (1 << 11)
583# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKD12EN (1 << 12)
584# define AVIVO_TMDSA_TRANSMITTER_ENABLE_TX_ENABLE_HPD_MASK (1 << 16)
585# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK (1 << 17)
586# define AVIVO_TMDSA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK (1 << 18)
587
588#define AVIVO_TMDSA_TRANSMITTER_CONTROL 0x7910
589# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_ENABLE (1 << 0)
590# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_RESET (1 << 1)
591# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2)
592# define AVIVO_TMDSA_TRANSMITTER_CONTROL_IDSCKSEL (1 << 4)
593# define AVIVO_TMDSA_TRANSMITTER_CONTROL_BGSLEEP (1 << 5)
594# define AVIVO_TMDSA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN (1 << 6)
595# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK (1 << 8)
596# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS (1 << 13)
597# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK (1 << 14)
598# define AVIVO_TMDSA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS (1 << 15)
599# define AVIVO_TMDSA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16)
600# define AVIVO_TMDSA_TRANSMITTER_CONTROL_BYPASS_PLL (1 << 28)
601# define AVIVO_TMDSA_TRANSMITTER_CONTROL_USE_CLK_DATA (1 << 29)
602# define AVIVO_TMDSA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31)
603
604#define AVIVO_LVTMA_CNTL 0x7a80
605# define AVIVO_LVTMA_CNTL_ENABLE (1 << 0)
606# define AVIVO_LVTMA_CNTL_HPD_MASK (1 << 4)
607# define AVIVO_LVTMA_CNTL_HPD_SELECT (1 << 8)
608# define AVIVO_LVTMA_CNTL_SYNC_PHASE (1 << 12)
609# define AVIVO_LVTMA_CNTL_PIXEL_ENCODING (1 << 16)
610# define AVIVO_LVTMA_CNTL_DUAL_LINK_ENABLE (1 << 24)
611# define AVIVO_LVTMA_CNTL_SWAP (1 << 28)
612#define AVIVO_LVTMA_SOURCE_SELECT 0x7a84
613#define AVIVO_LVTMA_COLOR_FORMAT 0x7a88
614#define AVIVO_LVTMA_BIT_DEPTH_CONTROL 0x7a94
615# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN (1 << 0)
616# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH (1 << 4)
617# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN (1 << 8)
618# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH (1 << 12)
619# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_EN (1 << 16)
620# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH (1 << 20)
621# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL (1 << 24)
622# define AVIVO_LVTMA_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_RESET (1 << 26)
623
624
625
626#define AVIVO_LVTMA_DCBALANCER_CONTROL 0x7ad0
627# define AVIVO_LVTMA_DCBALANCER_CONTROL_EN (1 << 0)
628# define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_EN (1 << 8)
629# define AVIVO_LVTMA_DCBALANCER_CONTROL_TEST_IN_SHIFT (16)
630# define AVIVO_LVTMA_DCBALANCER_CONTROL_FORCE (1 << 24)
631
632#define AVIVO_LVTMA_DATA_SYNCHRONIZATION 0x78d8
633# define AVIVO_LVTMA_DATA_SYNCHRONIZATION_DSYNSEL (1 << 0)
634# define AVIVO_LVTMA_DATA_SYNCHRONIZATION_PFREQCHG (1 << 8)
635#define R500_LVTMA_CLOCK_ENABLE 0x7b00
636#define R600_LVTMA_CLOCK_ENABLE 0x7b04
637
638#define R500_LVTMA_TRANSMITTER_ENABLE 0x7b04
639#define R600_LVTMA_TRANSMITTER_ENABLE 0x7b08
640# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC0EN (1 << 1)
641# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD00EN (1 << 2)
642# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD01EN (1 << 3)
643# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD02EN (1 << 4)
644# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD03EN (1 << 5)
645# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKC1EN (1 << 9)
646# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD10EN (1 << 10)
647# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD11EN (1 << 11)
648# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKD12EN (1 << 12)
649# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK (1 << 17)
650# define AVIVO_LVTMA_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK (1 << 18)
651
652#define R500_LVTMA_TRANSMITTER_CONTROL 0x7b10
653#define R600_LVTMA_TRANSMITTER_CONTROL 0x7b14
654# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_ENABLE (1 << 0)
655# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_RESET (1 << 1)
656# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_HPD_MASK_SHIFT (2)
657# define AVIVO_LVTMA_TRANSMITTER_CONTROL_IDSCKSEL (1 << 4)
658# define AVIVO_LVTMA_TRANSMITTER_CONTROL_BGSLEEP (1 << 5)
659# define AVIVO_LVTMA_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN (1 << 6)
660# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK (1 << 8)
661# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TMCLK_FROM_PADS (1 << 13)
662# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK (1 << 14)
663# define AVIVO_LVTMA_TRANSMITTER_CONTROL_TDCLK_FROM_PADS (1 << 15)
664# define AVIVO_LVTMA_TRANSMITTER_CONTROL_CLK_PATTERN_SHIFT (16)
665# define AVIVO_LVTMA_TRANSMITTER_CONTROL_BYPASS_PLL (1 << 28)
666# define AVIVO_LVTMA_TRANSMITTER_CONTROL_USE_CLK_DATA (1 << 29)
667# define AVIVO_LVTMA_TRANSMITTER_CONTROL_INPUT_TEST_CLK_SEL (1 << 31)
668
669#define R500_LVTMA_PWRSEQ_CNTL 0x7af0
670#define R600_LVTMA_PWRSEQ_CNTL 0x7af4
671# define AVIVO_LVTMA_PWRSEQ_EN (1 << 0)
672# define AVIVO_LVTMA_PWRSEQ_PLL_ENABLE_MASK (1 << 2)
673# define AVIVO_LVTMA_PWRSEQ_PLL_RESET_MASK (1 << 3)
674# define AVIVO_LVTMA_PWRSEQ_TARGET_STATE (1 << 4)
675# define AVIVO_LVTMA_SYNCEN (1 << 8)
676# define AVIVO_LVTMA_SYNCEN_OVRD (1 << 9)
677# define AVIVO_LVTMA_SYNCEN_POL (1 << 10)
678# define AVIVO_LVTMA_DIGON (1 << 16)
679# define AVIVO_LVTMA_DIGON_OVRD (1 << 17)
680# define AVIVO_LVTMA_DIGON_POL (1 << 18)
681# define AVIVO_LVTMA_BLON (1 << 24)
682# define AVIVO_LVTMA_BLON_OVRD (1 << 25)
683# define AVIVO_LVTMA_BLON_POL (1 << 26)
684
685#define R500_LVTMA_PWRSEQ_STATE 0x7af4
686#define R600_LVTMA_PWRSEQ_STATE 0x7af8
687# define AVIVO_LVTMA_PWRSEQ_STATE_TARGET_STATE_R (1 << 0)
688# define AVIVO_LVTMA_PWRSEQ_STATE_DIGON (1 << 1)
689# define AVIVO_LVTMA_PWRSEQ_STATE_SYNCEN (1 << 2)
690# define AVIVO_LVTMA_PWRSEQ_STATE_BLON (1 << 3)
691# define AVIVO_LVTMA_PWRSEQ_STATE_DONE (1 << 4)
692# define AVIVO_LVTMA_PWRSEQ_STATE_STATUS_SHIFT (8)
693
694#define AVIVO_LVDS_BACKLIGHT_CNTL 0x7af8
695# define AVIVO_LVDS_BACKLIGHT_CNTL_EN (1 << 0)
696# define AVIVO_LVDS_BACKLIGHT_LEVEL_MASK 0x0000ff00
697# define AVIVO_LVDS_BACKLIGHT_LEVEL_SHIFT 8
698
699#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
700
701#define AVIVO_GPIO_0 0x7e30
702#define AVIVO_GPIO_1 0x7e40
703#define AVIVO_GPIO_2 0x7e50
704#define AVIVO_GPIO_3 0x7e60
705
706#define AVIVO_DC_GPIO_HPD_Y 0x7e9c
707
708#define AVIVO_I2C_STATUS 0x7d30
709# define AVIVO_I2C_STATUS_DONE (1 << 0)
710# define AVIVO_I2C_STATUS_NACK (1 << 1)
711# define AVIVO_I2C_STATUS_HALT (1 << 2)
712# define AVIVO_I2C_STATUS_GO (1 << 3)
713# define AVIVO_I2C_STATUS_MASK 0x7
714/* If radeon_mm_i2c is to be believed, this is HALT, NACK, and maybe
715 * DONE? */
716# define AVIVO_I2C_STATUS_CMD_RESET 0x7
717# define AVIVO_I2C_STATUS_CMD_WAIT (1 << 3)
718#define AVIVO_I2C_STOP 0x7d34
719#define AVIVO_I2C_START_CNTL 0x7d38
720# define AVIVO_I2C_START (1 << 8)
721# define AVIVO_I2C_CONNECTOR0 (0 << 16)
722# define AVIVO_I2C_CONNECTOR1 (1 << 16)
723#define R520_I2C_START (1<<0)
724#define R520_I2C_STOP (1<<1)
725#define R520_I2C_RX (1<<2)
726#define R520_I2C_EN (1<<8)
727#define R520_I2C_DDC1 (0<<16)
728#define R520_I2C_DDC2 (1<<16)
729#define R520_I2C_DDC3 (2<<16)
730#define R520_I2C_DDC_MASK (3<<16)
731#define AVIVO_I2C_CONTROL2 0x7d3c
732# define AVIVO_I2C_7D3C_SIZE_SHIFT 8
733# define AVIVO_I2C_7D3C_SIZE_MASK (0xf << 8)
734#define AVIVO_I2C_CONTROL3 0x7d40
735/* Reading is done 4 bytes at a time: read the bottom 8 bits from
736 * 7d44, four times in a row.
737 * Writing is a little more complex. First write DATA with
738 * 0xnnnnnnzz, then 0xnnnnnnyy, where nnnnnn is some non-deterministic
739 * magic number, zz is, I think, the slave address, and yy is the byte
740 * you want to write. */
741#define AVIVO_I2C_DATA 0x7d44
742#define R520_I2C_ADDR_COUNT_MASK (0x7)
743#define R520_I2C_DATA_COUNT_SHIFT (8)
744#define R520_I2C_DATA_COUNT_MASK (0xF00)
745#define AVIVO_I2C_CNTL 0x7d50
746# define AVIVO_I2C_EN (1 << 0)
747# define AVIVO_I2C_RESET (1 << 8)
748
749#endif
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
new file mode 100644
index 000000000000..570a244bd88b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -0,0 +1,234 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32/* r520,rv530,rv560,rv570,r580 depends on : */
33void r100_hdp_reset(struct radeon_device *rdev);
34int rv370_pcie_gart_enable(struct radeon_device *rdev);
35void rv370_pcie_gart_disable(struct radeon_device *rdev);
36void r420_pipes_init(struct radeon_device *rdev);
37void rs600_mc_disable_clients(struct radeon_device *rdev);
38void rs600_disable_vga(struct radeon_device *rdev);
39int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
40int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
41
42/* This files gather functions specifics to:
43 * r520,rv530,rv560,rv570,r580
44 *
45 * Some of these functions might be used by newer ASICs.
46 */
47void r520_gpu_init(struct radeon_device *rdev);
48int r520_mc_wait_for_idle(struct radeon_device *rdev);
49
50
51/*
52 * MC
53 */
54int r520_mc_init(struct radeon_device *rdev)
55{
56 uint32_t tmp;
57 int r;
58
59 if (r100_debugfs_rbbm_init(rdev)) {
60 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
61 }
62 if (rv515_debugfs_pipes_info_init(rdev)) {
63 DRM_ERROR("Failed to register debugfs file for pipes !\n");
64 }
65 if (rv515_debugfs_ga_info_init(rdev)) {
66 DRM_ERROR("Failed to register debugfs file for pipes !\n");
67 }
68
69 r520_gpu_init(rdev);
70 rv370_pcie_gart_disable(rdev);
71
72 /* Setup GPU memory space */
73 rdev->mc.vram_location = 0xFFFFFFFFUL;
74 rdev->mc.gtt_location = 0xFFFFFFFFUL;
75 if (rdev->flags & RADEON_IS_AGP) {
76 r = radeon_agp_init(rdev);
77 if (r) {
78 printk(KERN_WARNING "[drm] Disabling AGP\n");
79 rdev->flags &= ~RADEON_IS_AGP;
80 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
81 } else {
82 rdev->mc.gtt_location = rdev->mc.agp_base;
83 }
84 }
85 r = radeon_mc_setup(rdev);
86 if (r) {
87 return r;
88 }
89
90 /* Program GPU memory space */
91 rs600_mc_disable_clients(rdev);
92 if (r520_mc_wait_for_idle(rdev)) {
93 printk(KERN_WARNING "Failed to wait MC idle while "
94 "programming pipes. Bad things might happen.\n");
95 }
96 /* Write VRAM size in case we are limiting it */
97 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
98 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
99 tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16);
100 tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16);
101 WREG32_MC(R520_MC_FB_LOCATION, tmp);
102 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
103 WREG32(0x310, rdev->mc.vram_location);
104 if (rdev->flags & RADEON_IS_AGP) {
105 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
106 tmp = REG_SET(R520_MC_AGP_TOP, tmp >> 16);
107 tmp |= REG_SET(R520_MC_AGP_START, rdev->mc.gtt_location >> 16);
108 WREG32_MC(R520_MC_AGP_LOCATION, tmp);
109 WREG32_MC(R520_MC_AGP_BASE, rdev->mc.agp_base);
110 WREG32_MC(R520_MC_AGP_BASE_2, 0);
111 } else {
112 WREG32_MC(R520_MC_AGP_LOCATION, 0x0FFFFFFF);
113 WREG32_MC(R520_MC_AGP_BASE, 0);
114 WREG32_MC(R520_MC_AGP_BASE_2, 0);
115 }
116 return 0;
117}
118
119void r520_mc_fini(struct radeon_device *rdev)
120{
121 rv370_pcie_gart_disable(rdev);
122 radeon_gart_table_vram_free(rdev);
123 radeon_gart_fini(rdev);
124}
125
126
127/*
128 * Global GPU functions
129 */
130void r520_errata(struct radeon_device *rdev)
131{
132 rdev->pll_errata = 0;
133}
134
135int r520_mc_wait_for_idle(struct radeon_device *rdev)
136{
137 unsigned i;
138 uint32_t tmp;
139
140 for (i = 0; i < rdev->usec_timeout; i++) {
141 /* read MC_STATUS */
142 tmp = RREG32_MC(R520_MC_STATUS);
143 if (tmp & R520_MC_STATUS_IDLE) {
144 return 0;
145 }
146 DRM_UDELAY(1);
147 }
148 return -1;
149}
150
151void r520_gpu_init(struct radeon_device *rdev)
152{
153 unsigned pipe_select_current, gb_pipe_select, tmp;
154
155 r100_hdp_reset(rdev);
156 rs600_disable_vga(rdev);
157 /*
158 * DST_PIPE_CONFIG 0x170C
159 * GB_TILE_CONFIG 0x4018
160 * GB_FIFO_SIZE 0x4024
161 * GB_PIPE_SELECT 0x402C
162 * GB_PIPE_SELECT2 0x4124
163 * Z_PIPE_SHIFT 0
164 * Z_PIPE_MASK 0x000000003
165 * GB_FIFO_SIZE2 0x4128
166 * SC_SFIFO_SIZE_SHIFT 0
167 * SC_SFIFO_SIZE_MASK 0x000000003
168 * SC_MFIFO_SIZE_SHIFT 2
169 * SC_MFIFO_SIZE_MASK 0x00000000C
170 * FG_SFIFO_SIZE_SHIFT 4
171 * FG_SFIFO_SIZE_MASK 0x000000030
172 * ZB_MFIFO_SIZE_SHIFT 6
173 * ZB_MFIFO_SIZE_MASK 0x0000000C0
174 * GA_ENHANCE 0x4274
175 * SU_REG_DEST 0x42C8
176 */
177 /* workaround for RV530 */
178 if (rdev->family == CHIP_RV530) {
179 WREG32(0x4124, 1);
180 WREG32(0x4128, 0xFF);
181 }
182 r420_pipes_init(rdev);
183 gb_pipe_select = RREG32(0x402C);
184 tmp = RREG32(0x170C);
185 pipe_select_current = (tmp >> 2) & 3;
186 tmp = (1 << pipe_select_current) |
187 (((gb_pipe_select >> 8) & 0xF) << 4);
188 WREG32_PLL(0x000D, tmp);
189 if (r520_mc_wait_for_idle(rdev)) {
190 printk(KERN_WARNING "Failed to wait MC idle while "
191 "programming pipes. Bad things might happen.\n");
192 }
193}
194
195
196/*
197 * VRAM info
198 */
199static void r520_vram_get_type(struct radeon_device *rdev)
200{
201 uint32_t tmp;
202
203 rdev->mc.vram_width = 128;
204 rdev->mc.vram_is_ddr = true;
205 tmp = RREG32_MC(R520_MC_CNTL0);
206 switch ((tmp & R520_MEM_NUM_CHANNELS_MASK) >> R520_MEM_NUM_CHANNELS_SHIFT) {
207 case 0:
208 rdev->mc.vram_width = 32;
209 break;
210 case 1:
211 rdev->mc.vram_width = 64;
212 break;
213 case 2:
214 rdev->mc.vram_width = 128;
215 break;
216 case 3:
217 rdev->mc.vram_width = 256;
218 break;
219 default:
220 rdev->mc.vram_width = 128;
221 break;
222 }
223 if (tmp & R520_MC_CHANNEL_SIZE)
224 rdev->mc.vram_width *= 2;
225}
226
227void r520_vram_info(struct radeon_device *rdev)
228{
229 r520_vram_get_type(rdev);
230 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
231
232 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
233 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
234}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
new file mode 100644
index 000000000000..c45559fc97fd
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -0,0 +1,169 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32/* r600,rv610,rv630,rv620,rv635,rv670 depends on : */
33void rs600_mc_disable_clients(struct radeon_device *rdev);
34
35/* This files gather functions specifics to:
36 * r600,rv610,rv630,rv620,rv635,rv670
37 *
38 * Some of these functions might be used by newer ASICs.
39 */
40int r600_mc_wait_for_idle(struct radeon_device *rdev);
41void r600_gpu_init(struct radeon_device *rdev);
42
43
44/*
45 * MC
46 */
47int r600_mc_init(struct radeon_device *rdev)
48{
49 uint32_t tmp;
50
51 r600_gpu_init(rdev);
52
53 /* setup the gart before changing location so we can ask to
54 * discard unmapped mc request
55 */
56 /* FIXME: disable out of gart access */
57 tmp = rdev->mc.gtt_location / 4096;
58 tmp = REG_SET(R600_LOGICAL_PAGE_NUMBER, tmp);
59 WREG32(R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR, tmp);
60 tmp = (rdev->mc.gtt_location + rdev->mc.gtt_size) / 4096;
61 tmp = REG_SET(R600_LOGICAL_PAGE_NUMBER, tmp);
62 WREG32(R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, tmp);
63
64 rs600_mc_disable_clients(rdev);
65 if (r600_mc_wait_for_idle(rdev)) {
66 printk(KERN_WARNING "Failed to wait MC idle while "
67 "programming pipes. Bad things might happen.\n");
68 }
69
70 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
71 tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24);
72 tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24);
73 WREG32(R600_MC_VM_FB_LOCATION, tmp);
74 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
75 tmp = REG_SET(R600_MC_AGP_TOP, tmp >> 22);
76 WREG32(R600_MC_VM_AGP_TOP, tmp);
77 tmp = REG_SET(R600_MC_AGP_BOT, rdev->mc.gtt_location >> 22);
78 WREG32(R600_MC_VM_AGP_BOT, tmp);
79 return 0;
80}
81
82void r600_mc_fini(struct radeon_device *rdev)
83{
84 /* FIXME: implement */
85}
86
87
88/*
89 * Global GPU functions
90 */
91void r600_errata(struct radeon_device *rdev)
92{
93 rdev->pll_errata = 0;
94}
95
96int r600_mc_wait_for_idle(struct radeon_device *rdev)
97{
98 /* FIXME: implement */
99 return 0;
100}
101
102void r600_gpu_init(struct radeon_device *rdev)
103{
104 /* FIXME: implement */
105}
106
107
108/*
109 * VRAM info
110 */
111void r600_vram_get_type(struct radeon_device *rdev)
112{
113 uint32_t tmp;
114 int chansize;
115
116 rdev->mc.vram_width = 128;
117 rdev->mc.vram_is_ddr = true;
118
119 tmp = RREG32(R600_RAMCFG);
120 if (tmp & R600_CHANSIZE_OVERRIDE) {
121 chansize = 16;
122 } else if (tmp & R600_CHANSIZE) {
123 chansize = 64;
124 } else {
125 chansize = 32;
126 }
127 if (rdev->family == CHIP_R600) {
128 rdev->mc.vram_width = 8 * chansize;
129 } else if (rdev->family == CHIP_RV670) {
130 rdev->mc.vram_width = 4 * chansize;
131 } else if ((rdev->family == CHIP_RV610) ||
132 (rdev->family == CHIP_RV620)) {
133 rdev->mc.vram_width = chansize;
134 } else if ((rdev->family == CHIP_RV630) ||
135 (rdev->family == CHIP_RV635)) {
136 rdev->mc.vram_width = 2 * chansize;
137 }
138}
139
140void r600_vram_info(struct radeon_device *rdev)
141{
142 r600_vram_get_type(rdev);
143 rdev->mc.vram_size = RREG32(R600_CONFIG_MEMSIZE);
144
145 /* Could aper size report 0 ? */
146 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
147 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
148}
149
150/*
151 * Indirect registers accessor
152 */
153uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg)
154{
155 uint32_t r;
156
157 WREG32(R600_PCIE_PORT_INDEX, ((reg) & 0xff));
158 (void)RREG32(R600_PCIE_PORT_INDEX);
159 r = RREG32(R600_PCIE_PORT_DATA);
160 return r;
161}
162
163void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
164{
165 WREG32(R600_PCIE_PORT_INDEX, ((reg) & 0xff));
166 (void)RREG32(R600_PCIE_PORT_INDEX);
167 WREG32(R600_PCIE_PORT_DATA, (v));
168 (void)RREG32(R600_PCIE_PORT_DATA);
169}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
new file mode 100644
index 000000000000..e2d1f5f33f7e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -0,0 +1,114 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __R600_REG_H__
29#define __R600_REG_H__
30
31#define R600_PCIE_PORT_INDEX 0x0038
32#define R600_PCIE_PORT_DATA 0x003c
33
34#define R600_MC_VM_FB_LOCATION 0x2180
35#define R600_MC_FB_BASE_MASK 0x0000FFFF
36#define R600_MC_FB_BASE_SHIFT 0
37#define R600_MC_FB_TOP_MASK 0xFFFF0000
38#define R600_MC_FB_TOP_SHIFT 16
39#define R600_MC_VM_AGP_TOP 0x2184
40#define R600_MC_AGP_TOP_MASK 0x0003FFFF
41#define R600_MC_AGP_TOP_SHIFT 0
42#define R600_MC_VM_AGP_BOT 0x2188
43#define R600_MC_AGP_BOT_MASK 0x0003FFFF
44#define R600_MC_AGP_BOT_SHIFT 0
45#define R600_MC_VM_AGP_BASE 0x218c
46#define R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2190
47#define R600_LOGICAL_PAGE_NUMBER_MASK 0x000FFFFF
48#define R600_LOGICAL_PAGE_NUMBER_SHIFT 0
49#define R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194
50#define R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198
51
52#define R700_MC_VM_FB_LOCATION 0x2024
53#define R700_MC_FB_BASE_MASK 0x0000FFFF
54#define R700_MC_FB_BASE_SHIFT 0
55#define R700_MC_FB_TOP_MASK 0xFFFF0000
56#define R700_MC_FB_TOP_SHIFT 16
57#define R700_MC_VM_AGP_TOP 0x2028
58#define R700_MC_AGP_TOP_MASK 0x0003FFFF
59#define R700_MC_AGP_TOP_SHIFT 0
60#define R700_MC_VM_AGP_BOT 0x202c
61#define R700_MC_AGP_BOT_MASK 0x0003FFFF
62#define R700_MC_AGP_BOT_SHIFT 0
63#define R700_MC_VM_AGP_BASE 0x2030
64#define R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
65#define R700_LOGICAL_PAGE_NUMBER_MASK 0x000FFFFF
66#define R700_LOGICAL_PAGE_NUMBER_SHIFT 0
67#define R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
68#define R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203c
69
70#define R600_RAMCFG 0x2408
71# define R600_CHANSIZE (1 << 7)
72# define R600_CHANSIZE_OVERRIDE (1 << 10)
73
74
75#define R600_GENERAL_PWRMGT 0x618
76# define R600_OPEN_DRAIN_PADS (1 << 11)
77
78#define R600_LOWER_GPIO_ENABLE 0x710
79#define R600_CTXSW_VID_LOWER_GPIO_CNTL 0x718
80#define R600_HIGH_VID_LOWER_GPIO_CNTL 0x71c
81#define R600_MEDIUM_VID_LOWER_GPIO_CNTL 0x720
82#define R600_LOW_VID_LOWER_GPIO_CNTL 0x724
83
84
85
86#define R600_HDP_NONSURFACE_BASE 0x2c04
87
88#define R600_BUS_CNTL 0x5420
89#define R600_CONFIG_CNTL 0x5424
90#define R600_CONFIG_MEMSIZE 0x5428
91#define R600_CONFIG_F0_BASE 0x542C
92#define R600_CONFIG_APER_SIZE 0x5430
93
94#define R600_ROM_CNTL 0x1600
95# define R600_SCK_OVERWRITE (1 << 1)
96# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
97# define R600_SCK_PRESCALE_CRYSTAL_CLK_MASK (0xf << 28)
98
99#define R600_CG_SPLL_FUNC_CNTL 0x600
100# define R600_SPLL_BYPASS_EN (1 << 3)
101#define R600_CG_SPLL_STATUS 0x60c
102# define R600_SPLL_CHG_STATUS (1 << 1)
103
104#define R600_BIOS_0_SCRATCH 0x1724
105#define R600_BIOS_1_SCRATCH 0x1728
106#define R600_BIOS_2_SCRATCH 0x172c
107#define R600_BIOS_3_SCRATCH 0x1730
108#define R600_BIOS_4_SCRATCH 0x1734
109#define R600_BIOS_5_SCRATCH 0x1738
110#define R600_BIOS_6_SCRATCH 0x173c
111#define R600_BIOS_7_SCRATCH 0x1740
112
113
114#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
new file mode 100644
index 000000000000..c3f24cc56009
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -0,0 +1,793 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RADEON_H__
29#define __RADEON_H__
30
31#include "radeon_object.h"
32
33/* TODO: Here are things that needs to be done :
34 * - surface allocator & initializer : (bit like scratch reg) should
35 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
36 * related to surface
37 * - WB : write back stuff (do it bit like scratch reg things)
38 * - Vblank : look at Jesse's rework and what we should do
39 * - r600/r700: gart & cp
40 * - cs : clean cs ioctl use bitmap & things like that.
41 * - power management stuff
42 * - Barrier in gart code
43 * - Unmappabled vram ?
44 * - TESTING, TESTING, TESTING
45 */
46
47#include <asm/atomic.h>
48#include <linux/wait.h>
49#include <linux/list.h>
50#include <linux/kref.h>
51
52#include "radeon_mode.h"
53#include "radeon_reg.h"
54
55
56/*
57 * Modules parameters.
58 */
59extern int radeon_no_wb;
60extern int radeon_modeset;
61extern int radeon_dynclks;
62extern int radeon_r4xx_atom;
63extern int radeon_agpmode;
64extern int radeon_vram_limit;
65extern int radeon_gart_size;
66extern int radeon_benchmarking;
67extern int radeon_connector_table;
68
69/*
70 * Copy from radeon_drv.h so we don't have to include both and have conflicting
71 * symbol;
72 */
73#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
74#define RADEON_IB_POOL_SIZE 16
75#define RADEON_DEBUGFS_MAX_NUM_FILES 32
76#define RADEONFB_CONN_LIMIT 4
77
78enum radeon_family {
79 CHIP_R100,
80 CHIP_RV100,
81 CHIP_RS100,
82 CHIP_RV200,
83 CHIP_RS200,
84 CHIP_R200,
85 CHIP_RV250,
86 CHIP_RS300,
87 CHIP_RV280,
88 CHIP_R300,
89 CHIP_R350,
90 CHIP_RV350,
91 CHIP_RV380,
92 CHIP_R420,
93 CHIP_R423,
94 CHIP_RV410,
95 CHIP_RS400,
96 CHIP_RS480,
97 CHIP_RS600,
98 CHIP_RS690,
99 CHIP_RS740,
100 CHIP_RV515,
101 CHIP_R520,
102 CHIP_RV530,
103 CHIP_RV560,
104 CHIP_RV570,
105 CHIP_R580,
106 CHIP_R600,
107 CHIP_RV610,
108 CHIP_RV630,
109 CHIP_RV620,
110 CHIP_RV635,
111 CHIP_RV670,
112 CHIP_RS780,
113 CHIP_RV770,
114 CHIP_RV730,
115 CHIP_RV710,
116 CHIP_LAST,
117};
118
119enum radeon_chip_flags {
120 RADEON_FAMILY_MASK = 0x0000ffffUL,
121 RADEON_FLAGS_MASK = 0xffff0000UL,
122 RADEON_IS_MOBILITY = 0x00010000UL,
123 RADEON_IS_IGP = 0x00020000UL,
124 RADEON_SINGLE_CRTC = 0x00040000UL,
125 RADEON_IS_AGP = 0x00080000UL,
126 RADEON_HAS_HIERZ = 0x00100000UL,
127 RADEON_IS_PCIE = 0x00200000UL,
128 RADEON_NEW_MEMMAP = 0x00400000UL,
129 RADEON_IS_PCI = 0x00800000UL,
130 RADEON_IS_IGPGART = 0x01000000UL,
131};
132
133
134/*
135 * Errata workarounds.
136 */
137enum radeon_pll_errata {
138 CHIP_ERRATA_R300_CG = 0x00000001,
139 CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002,
140 CHIP_ERRATA_PLL_DELAY = 0x00000004
141};
142
143
144struct radeon_device;
145
146
147/*
148 * BIOS.
149 */
150bool radeon_get_bios(struct radeon_device *rdev);
151
152/*
153 * Clocks
154 */
155
156struct radeon_clock {
157 struct radeon_pll p1pll;
158 struct radeon_pll p2pll;
159 struct radeon_pll spll;
160 struct radeon_pll mpll;
161 /* 10 Khz units */
162 uint32_t default_mclk;
163 uint32_t default_sclk;
164};
165
166/*
167 * Fences.
168 */
169struct radeon_fence_driver {
170 uint32_t scratch_reg;
171 atomic_t seq;
172 uint32_t last_seq;
173 unsigned long count_timeout;
174 wait_queue_head_t queue;
175 rwlock_t lock;
176 struct list_head created;
177 struct list_head emited;
178 struct list_head signaled;
179};
180
181struct radeon_fence {
182 struct radeon_device *rdev;
183 struct kref kref;
184 struct list_head list;
185 /* protected by radeon_fence.lock */
186 uint32_t seq;
187 unsigned long timeout;
188 bool emited;
189 bool signaled;
190};
191
192int radeon_fence_driver_init(struct radeon_device *rdev);
193void radeon_fence_driver_fini(struct radeon_device *rdev);
194int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
195int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
196void radeon_fence_process(struct radeon_device *rdev);
197bool radeon_fence_signaled(struct radeon_fence *fence);
198int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
199int radeon_fence_wait_next(struct radeon_device *rdev);
200int radeon_fence_wait_last(struct radeon_device *rdev);
201struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
202void radeon_fence_unref(struct radeon_fence **fence);
203
204
205/*
206 * Radeon buffer.
207 */
208struct radeon_object;
209
210struct radeon_object_list {
211 struct list_head list;
212 struct radeon_object *robj;
213 uint64_t gpu_offset;
214 unsigned rdomain;
215 unsigned wdomain;
216};
217
218int radeon_object_init(struct radeon_device *rdev);
219void radeon_object_fini(struct radeon_device *rdev);
220int radeon_object_create(struct radeon_device *rdev,
221 struct drm_gem_object *gobj,
222 unsigned long size,
223 bool kernel,
224 uint32_t domain,
225 bool interruptible,
226 struct radeon_object **robj_ptr);
227int radeon_object_kmap(struct radeon_object *robj, void **ptr);
228void radeon_object_kunmap(struct radeon_object *robj);
229void radeon_object_unref(struct radeon_object **robj);
230int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
231 uint64_t *gpu_addr);
232void radeon_object_unpin(struct radeon_object *robj);
233int radeon_object_wait(struct radeon_object *robj);
234int radeon_object_evict_vram(struct radeon_device *rdev);
235int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
236void radeon_object_force_delete(struct radeon_device *rdev);
237void radeon_object_list_add_object(struct radeon_object_list *lobj,
238 struct list_head *head);
239int radeon_object_list_validate(struct list_head *head, void *fence);
240void radeon_object_list_unvalidate(struct list_head *head);
241void radeon_object_list_clean(struct list_head *head);
242int radeon_object_fbdev_mmap(struct radeon_object *robj,
243 struct vm_area_struct *vma);
244unsigned long radeon_object_size(struct radeon_object *robj);
245
246
247/*
248 * GEM objects.
249 */
250struct radeon_gem {
251 struct list_head objects;
252};
253
254int radeon_gem_init(struct radeon_device *rdev);
255void radeon_gem_fini(struct radeon_device *rdev);
256int radeon_gem_object_create(struct radeon_device *rdev, int size,
257 int alignment, int initial_domain,
258 bool discardable, bool kernel,
259 bool interruptible,
260 struct drm_gem_object **obj);
261int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
262 uint64_t *gpu_addr);
263void radeon_gem_object_unpin(struct drm_gem_object *obj);
264
265
266/*
267 * GART structures, functions & helpers
268 */
269struct radeon_mc;
270
271struct radeon_gart_table_ram {
272 volatile uint32_t *ptr;
273};
274
275struct radeon_gart_table_vram {
276 struct radeon_object *robj;
277 volatile uint32_t *ptr;
278};
279
280union radeon_gart_table {
281 struct radeon_gart_table_ram ram;
282 struct radeon_gart_table_vram vram;
283};
284
285struct radeon_gart {
286 dma_addr_t table_addr;
287 unsigned num_gpu_pages;
288 unsigned num_cpu_pages;
289 unsigned table_size;
290 union radeon_gart_table table;
291 struct page **pages;
292 dma_addr_t *pages_addr;
293 bool ready;
294};
295
296int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
297void radeon_gart_table_ram_free(struct radeon_device *rdev);
298int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
299void radeon_gart_table_vram_free(struct radeon_device *rdev);
300int radeon_gart_init(struct radeon_device *rdev);
301void radeon_gart_fini(struct radeon_device *rdev);
302void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
303 int pages);
304int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
305 int pages, struct page **pagelist);
306
307
308/*
309 * GPU MC structures, functions & helpers
310 */
311struct radeon_mc {
312 resource_size_t aper_size;
313 resource_size_t aper_base;
314 resource_size_t agp_base;
315 unsigned gtt_location;
316 unsigned gtt_size;
317 unsigned vram_location;
318 unsigned vram_size;
319 unsigned vram_width;
320 int vram_mtrr;
321 bool vram_is_ddr;
322};
323
324int radeon_mc_setup(struct radeon_device *rdev);
325
326
327/*
328 * GPU scratch registers structures, functions & helpers
329 */
330struct radeon_scratch {
331 unsigned num_reg;
332 bool free[32];
333 uint32_t reg[32];
334};
335
336int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
337void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
338
339
340/*
341 * IRQS.
342 */
343struct radeon_irq {
344 bool installed;
345 bool sw_int;
346 /* FIXME: use a define max crtc rather than hardcode it */
347 bool crtc_vblank_int[2];
348};
349
350int radeon_irq_kms_init(struct radeon_device *rdev);
351void radeon_irq_kms_fini(struct radeon_device *rdev);
352
353
354/*
355 * CP & ring.
356 */
357struct radeon_ib {
358 struct list_head list;
359 unsigned long idx;
360 uint64_t gpu_addr;
361 struct radeon_fence *fence;
362 volatile uint32_t *ptr;
363 uint32_t length_dw;
364};
365
366struct radeon_ib_pool {
367 struct mutex mutex;
368 struct radeon_object *robj;
369 struct list_head scheduled_ibs;
370 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
371 bool ready;
372 DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
373};
374
375struct radeon_cp {
376 struct radeon_object *ring_obj;
377 volatile uint32_t *ring;
378 unsigned rptr;
379 unsigned wptr;
380 unsigned wptr_old;
381 unsigned ring_size;
382 unsigned ring_free_dw;
383 int count_dw;
384 uint64_t gpu_addr;
385 uint32_t align_mask;
386 uint32_t ptr_mask;
387 struct mutex mutex;
388 bool ready;
389};
390
391int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
392void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
393int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
394int radeon_ib_pool_init(struct radeon_device *rdev);
395void radeon_ib_pool_fini(struct radeon_device *rdev);
396int radeon_ib_test(struct radeon_device *rdev);
397/* Ring access between begin & end cannot sleep */
398void radeon_ring_free_size(struct radeon_device *rdev);
399int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
400void radeon_ring_unlock_commit(struct radeon_device *rdev);
401void radeon_ring_unlock_undo(struct radeon_device *rdev);
402int radeon_ring_test(struct radeon_device *rdev);
403int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
404void radeon_ring_fini(struct radeon_device *rdev);
405
406
407/*
408 * CS.
409 */
410struct radeon_cs_reloc {
411 struct drm_gem_object *gobj;
412 struct radeon_object *robj;
413 struct radeon_object_list lobj;
414 uint32_t handle;
415 uint32_t flags;
416};
417
418struct radeon_cs_chunk {
419 uint32_t chunk_id;
420 uint32_t length_dw;
421 uint32_t *kdata;
422};
423
424struct radeon_cs_parser {
425 struct radeon_device *rdev;
426 struct drm_file *filp;
427 /* chunks */
428 unsigned nchunks;
429 struct radeon_cs_chunk *chunks;
430 uint64_t *chunks_array;
431 /* IB */
432 unsigned idx;
433 /* relocations */
434 unsigned nrelocs;
435 struct radeon_cs_reloc *relocs;
436 struct radeon_cs_reloc **relocs_ptr;
437 struct list_head validated;
438 /* indices of various chunks */
439 int chunk_ib_idx;
440 int chunk_relocs_idx;
441 struct radeon_ib *ib;
442 void *track;
443};
444
445struct radeon_cs_packet {
446 unsigned idx;
447 unsigned type;
448 unsigned reg;
449 unsigned opcode;
450 int count;
451 unsigned one_reg_wr;
452};
453
454typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
455 struct radeon_cs_packet *pkt,
456 unsigned idx, unsigned reg);
457typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
458 struct radeon_cs_packet *pkt);
459
460
461/*
462 * AGP
463 */
464int radeon_agp_init(struct radeon_device *rdev);
465void radeon_agp_fini(struct radeon_device *rdev);
466
467
468/*
469 * Writeback
470 */
471struct radeon_wb {
472 struct radeon_object *wb_obj;
473 volatile uint32_t *wb;
474 uint64_t gpu_addr;
475};
476
477
478/*
479 * Benchmarking
480 */
481void radeon_benchmark(struct radeon_device *rdev);
482
483
484/*
485 * Debugfs
486 */
487int radeon_debugfs_add_files(struct radeon_device *rdev,
488 struct drm_info_list *files,
489 unsigned nfiles);
490int radeon_debugfs_fence_init(struct radeon_device *rdev);
491int r100_debugfs_rbbm_init(struct radeon_device *rdev);
492int r100_debugfs_cp_init(struct radeon_device *rdev);
493
494
495/*
496 * ASIC specific functions.
497 */
498struct radeon_asic {
499 void (*errata)(struct radeon_device *rdev);
500 void (*vram_info)(struct radeon_device *rdev);
501 int (*gpu_reset)(struct radeon_device *rdev);
502 int (*mc_init)(struct radeon_device *rdev);
503 void (*mc_fini)(struct radeon_device *rdev);
504 int (*wb_init)(struct radeon_device *rdev);
505 void (*wb_fini)(struct radeon_device *rdev);
506 int (*gart_enable)(struct radeon_device *rdev);
507 void (*gart_disable)(struct radeon_device *rdev);
508 void (*gart_tlb_flush)(struct radeon_device *rdev);
509 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
510 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
511 void (*cp_fini)(struct radeon_device *rdev);
512 void (*cp_disable)(struct radeon_device *rdev);
513 void (*ring_start)(struct radeon_device *rdev);
514 int (*irq_set)(struct radeon_device *rdev);
515 int (*irq_process)(struct radeon_device *rdev);
516 void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
517 int (*cs_parse)(struct radeon_cs_parser *p);
518 int (*copy_blit)(struct radeon_device *rdev,
519 uint64_t src_offset,
520 uint64_t dst_offset,
521 unsigned num_pages,
522 struct radeon_fence *fence);
523 int (*copy_dma)(struct radeon_device *rdev,
524 uint64_t src_offset,
525 uint64_t dst_offset,
526 unsigned num_pages,
527 struct radeon_fence *fence);
528 int (*copy)(struct radeon_device *rdev,
529 uint64_t src_offset,
530 uint64_t dst_offset,
531 unsigned num_pages,
532 struct radeon_fence *fence);
533 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
534 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
535 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
536 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
537};
538
539
540/*
541 * IOCTL.
542 */
543int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
544 struct drm_file *filp);
545int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
546 struct drm_file *filp);
547int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
548 struct drm_file *file_priv);
549int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
550 struct drm_file *file_priv);
551int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
552 struct drm_file *file_priv);
553int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
554 struct drm_file *file_priv);
555int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
556 struct drm_file *filp);
557int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
558 struct drm_file *filp);
559int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
560 struct drm_file *filp);
561int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
562 struct drm_file *filp);
563int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
564
565
566/*
567 * Core structure, functions and helpers.
568 */
569typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
570typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
571
572struct radeon_device {
573 struct drm_device *ddev;
574 struct pci_dev *pdev;
575 /* ASIC */
576 enum radeon_family family;
577 unsigned long flags;
578 int usec_timeout;
579 enum radeon_pll_errata pll_errata;
580 int num_gb_pipes;
581 int disp_priority;
582 /* BIOS */
583 uint8_t *bios;
584 bool is_atom_bios;
585 uint16_t bios_header_start;
586 struct radeon_object *stollen_vga_memory;
587 struct fb_info *fbdev_info;
588 struct radeon_object *fbdev_robj;
589 struct radeon_framebuffer *fbdev_rfb;
590 /* Register mmio */
591 unsigned long rmmio_base;
592 unsigned long rmmio_size;
593 void *rmmio;
594 radeon_rreg_t mm_rreg;
595 radeon_wreg_t mm_wreg;
596 radeon_rreg_t mc_rreg;
597 radeon_wreg_t mc_wreg;
598 radeon_rreg_t pll_rreg;
599 radeon_wreg_t pll_wreg;
600 radeon_rreg_t pcie_rreg;
601 radeon_wreg_t pcie_wreg;
602 radeon_rreg_t pciep_rreg;
603 radeon_wreg_t pciep_wreg;
604 struct radeon_clock clock;
605 struct radeon_mc mc;
606 struct radeon_gart gart;
607 struct radeon_mode_info mode_info;
608 struct radeon_scratch scratch;
609 struct radeon_mman mman;
610 struct radeon_fence_driver fence_drv;
611 struct radeon_cp cp;
612 struct radeon_ib_pool ib_pool;
613 struct radeon_irq irq;
614 struct radeon_asic *asic;
615 struct radeon_gem gem;
616 struct mutex cs_mutex;
617 struct radeon_wb wb;
618 bool gpu_lockup;
619 bool shutdown;
620 bool suspend;
621};
622
623int radeon_device_init(struct radeon_device *rdev,
624 struct drm_device *ddev,
625 struct pci_dev *pdev,
626 uint32_t flags);
627void radeon_device_fini(struct radeon_device *rdev);
628int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
629
630
631/*
632 * Registers read & write functions.
633 */
634#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
635#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
636#define RREG32(reg) rdev->mm_rreg(rdev, (reg))
637#define WREG32(reg, v) rdev->mm_wreg(rdev, (reg), (v))
638#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
639#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
640#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
641#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
642#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
643#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
644#define RREG32_PCIE(reg) rdev->pcie_rreg(rdev, (reg))
645#define WREG32_PCIE(reg, v) rdev->pcie_wreg(rdev, (reg), (v))
646#define WREG32_P(reg, val, mask) \
647 do { \
648 uint32_t tmp_ = RREG32(reg); \
649 tmp_ &= (mask); \
650 tmp_ |= ((val) & ~(mask)); \
651 WREG32(reg, tmp_); \
652 } while (0)
653#define WREG32_PLL_P(reg, val, mask) \
654 do { \
655 uint32_t tmp_ = RREG32_PLL(reg); \
656 tmp_ &= (mask); \
657 tmp_ |= ((val) & ~(mask)); \
658 WREG32_PLL(reg, tmp_); \
659 } while (0)
660
661void r100_pll_errata_after_index(struct radeon_device *rdev);
662
663
664/*
665 * ASICs helpers.
666 */
667#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
668 (rdev->family == CHIP_RV200) || \
669 (rdev->family == CHIP_RS100) || \
670 (rdev->family == CHIP_RS200) || \
671 (rdev->family == CHIP_RV250) || \
672 (rdev->family == CHIP_RV280) || \
673 (rdev->family == CHIP_RS300))
674#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \
675 (rdev->family == CHIP_RV350) || \
676 (rdev->family == CHIP_R350) || \
677 (rdev->family == CHIP_RV380) || \
678 (rdev->family == CHIP_R420) || \
679 (rdev->family == CHIP_R423) || \
680 (rdev->family == CHIP_RV410) || \
681 (rdev->family == CHIP_RS400) || \
682 (rdev->family == CHIP_RS480))
683#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
684#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
685#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
686
687
688/*
689 * BIOS helpers.
690 */
691#define RBIOS8(i) (rdev->bios[i])
692#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
693#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
694
695int radeon_combios_init(struct radeon_device *rdev);
696void radeon_combios_fini(struct radeon_device *rdev);
697int radeon_atombios_init(struct radeon_device *rdev);
698void radeon_atombios_fini(struct radeon_device *rdev);
699
700
701/*
702 * RING helpers.
703 */
704#define CP_PACKET0 0x00000000
705#define PACKET0_BASE_INDEX_SHIFT 0
706#define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
707#define PACKET0_COUNT_SHIFT 16
708#define PACKET0_COUNT_MASK (0x3fff << 16)
709#define CP_PACKET1 0x40000000
710#define CP_PACKET2 0x80000000
711#define PACKET2_PAD_SHIFT 0
712#define PACKET2_PAD_MASK (0x3fffffff << 0)
713#define CP_PACKET3 0xC0000000
714#define PACKET3_IT_OPCODE_SHIFT 8
715#define PACKET3_IT_OPCODE_MASK (0xff << 8)
716#define PACKET3_COUNT_SHIFT 16
717#define PACKET3_COUNT_MASK (0x3fff << 16)
718/* PACKET3 op code */
719#define PACKET3_NOP 0x10
720#define PACKET3_3D_DRAW_VBUF 0x28
721#define PACKET3_3D_DRAW_IMMD 0x29
722#define PACKET3_3D_DRAW_INDX 0x2A
723#define PACKET3_3D_LOAD_VBPNTR 0x2F
724#define PACKET3_INDX_BUFFER 0x33
725#define PACKET3_3D_DRAW_VBUF_2 0x34
726#define PACKET3_3D_DRAW_IMMD_2 0x35
727#define PACKET3_3D_DRAW_INDX_2 0x36
728#define PACKET3_BITBLT_MULTI 0x9B
729
730#define PACKET0(reg, n) (CP_PACKET0 | \
731 REG_SET(PACKET0_BASE_INDEX, (reg) >> 2) | \
732 REG_SET(PACKET0_COUNT, (n)))
733#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
734#define PACKET3(op, n) (CP_PACKET3 | \
735 REG_SET(PACKET3_IT_OPCODE, (op)) | \
736 REG_SET(PACKET3_COUNT, (n)))
737
738#define PACKET_TYPE0 0
739#define PACKET_TYPE1 1
740#define PACKET_TYPE2 2
741#define PACKET_TYPE3 3
742
743#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
744#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
745#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
746#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
747#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
748
749static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
750{
751#if DRM_DEBUG_CODE
752 if (rdev->cp.count_dw <= 0) {
753 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
754 }
755#endif
756 rdev->cp.ring[rdev->cp.wptr++] = v;
757 rdev->cp.wptr &= rdev->cp.ptr_mask;
758 rdev->cp.count_dw--;
759 rdev->cp.ring_free_dw--;
760}
761
762
763/*
764 * ASICs macro.
765 */
766#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
767#define radeon_errata(rdev) (rdev)->asic->errata((rdev))
768#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev))
769#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
770#define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev))
771#define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev))
772#define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev))
773#define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev))
774#define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev))
775#define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev))
776#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
777#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
778#define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize))
779#define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev))
780#define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev))
781#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
782#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
783#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
784#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
785#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
786#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
787#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
788#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
789#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
790#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
791#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
792
793#endif
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
new file mode 100644
index 000000000000..23ea9955ac59
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -0,0 +1,249 @@
1/*
2 * Copyright 2008 Red Hat Inc.
3 * Copyright 2009 Jerome Glisse.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Dave Airlie
25 * Jerome Glisse <glisse@freedesktop.org>
26 */
27#include "drmP.h"
28#include "drm.h"
29#include "radeon.h"
30#include "radeon_drm.h"
31
32#if __OS_HAS_AGP
33
34struct radeon_agpmode_quirk {
35 u32 hostbridge_vendor;
36 u32 hostbridge_device;
37 u32 chip_vendor;
38 u32 chip_device;
39 u32 subsys_vendor;
40 u32 subsys_device;
41 u32 default_mode;
42};
43
44static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
45 /* Intel E7505 Memory Controller Hub / RV350 AR [Radeon 9600XT] Needs AGPMode 4 (deb #515326) */
46 { PCI_VENDOR_ID_INTEL, 0x2550, PCI_VENDOR_ID_ATI, 0x4152, 0x1458, 0x4038, 4},
47 /* Intel 82865G/PE/P DRAM Controller/Host-Hub / Mobility 9800 Needs AGPMode 4 (deb #462590) */
48 { PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x4a4e, PCI_VENDOR_ID_DELL, 0x5106, 4},
49 /* Intel 82865G/PE/P DRAM Controller/Host-Hub / RV280 [Radeon 9200 SE] Needs AGPMode 4 (lp #300304) */
50 { PCI_VENDOR_ID_INTEL, 0x2570, PCI_VENDOR_ID_ATI, 0x5964,
51 0x148c, 0x2073, 4},
52 /* Intel 82855PM Processor to I/O Controller / Mobility M6 LY Needs AGPMode 1 (deb #467235) */
53 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c59,
54 PCI_VENDOR_ID_IBM, 0x052f, 1},
55 /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
56 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
57 PCI_VENDOR_ID_IBM, 0x0550, 1},
58 /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
59 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
60 PCI_VENDOR_ID_IBM, 0x0530, 1},
61 /* Intel 82855PM host bridge / FireGL Mobility T2 RV350 Needs AGPMode 2 (fdo #20647) */
62 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e54,
63 PCI_VENDOR_ID_IBM, 0x054f, 2},
64 /* Intel 82855PM host bridge / Mobility M9+ / VaioPCG-V505DX Needs AGPMode 2 (fdo #17928) */
65 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
66 PCI_VENDOR_ID_SONY, 0x816b, 2},
67 /* Intel 82855PM Processor to I/O Controller / Mobility M9+ Needs AGPMode 8 (phoronix forum) */
68 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x5c61,
69 PCI_VENDOR_ID_SONY, 0x8195, 8},
70 /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
71 { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
72 PCI_VENDOR_ID_DELL, 0x00e3, 2},
73 /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */
74 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
75 PCI_VENDOR_ID_DELL, 0x0149, 1},
76 /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
77 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
78 0x1025, 0x0061, 1},
79 /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #203007) */
80 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
81 0x1025, 0x0064, 1},
82 /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #141551) */
83 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
84 PCI_VENDOR_ID_ASUSTEK, 0x1942, 1},
85 /* Intel 82852/82855 host bridge / Mobility 9600/9700 Needs AGPMode 1 (deb #510208) */
86 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
87 0x10cf, 0x127f, 1},
88 /* ASRock K7VT4A+ AGP 8x / ATI Radeon 9250 AGP Needs AGPMode 4 (lp #133192) */
89 { 0x1849, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
90 0x1787, 0x5960, 4},
91 /* VIA K8M800 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (fdo #12544) */
92 { PCI_VENDOR_ID_VIA, 0x0204, PCI_VENDOR_ID_ATI, 0x5960,
93 0x17af, 0x2020, 4},
94 /* VIA KT880 Host Bridge / RV350 [Radeon 9550] Needs AGPMode 4 (fdo #19981) */
95 { PCI_VENDOR_ID_VIA, 0x0269, PCI_VENDOR_ID_ATI, 0x4153,
96 PCI_VENDOR_ID_ASUSTEK, 0x003c, 4},
97 /* VIA VT8363 Host Bridge / R200 QL [Radeon 8500] Needs AGPMode 2 (lp #141551) */
98 { PCI_VENDOR_ID_VIA, 0x0305, PCI_VENDOR_ID_ATI, 0x514c,
99 PCI_VENDOR_ID_ATI, 0x013a, 2},
100 /* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 (deb #515512) */
101 { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
102 PCI_VENDOR_ID_ASUSTEK, 0x004c, 2},
103 /* VIA VT82C693A Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 2 */
104 { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_ATI, 0x5960,
105 PCI_VENDOR_ID_ASUSTEK, 0x0054, 2},
106 /* VIA VT8377 Host Bridge / R200 QM [Radeon 9100] Needs AGPMode 4 (deb #461144) */
107 { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x514d,
108 0x174b, 0x7149, 4},
109 /* VIA VT8377 Host Bridge / RV280 [Radeon 9200 PRO] Needs AGPMode 4 (lp #312693) */
110 { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5960,
111 0x1462, 0x0380, 4},
112 /* VIA VT8377 Host Bridge / RV280 Needs AGPMode 4 (ati ML) */
113 { PCI_VENDOR_ID_VIA, 0x3189, PCI_VENDOR_ID_ATI, 0x5964,
114 0x148c, 0x2073, 4},
115 /* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */
116 { PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61,
117 PCI_VENDOR_ID_SONY, 0x8175, 1},
118 /* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */
119 { PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47,
120 PCI_VENDOR_ID_ATI, 0x0152, 2},
121 { 0, 0, 0, 0, 0, 0, 0 },
122};
123#endif
124
125int radeon_agp_init(struct radeon_device *rdev)
126{
127#if __OS_HAS_AGP
128 struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
129 struct drm_agp_mode mode;
130 struct drm_agp_info info;
131 uint32_t agp_status;
132 int default_mode;
133 bool is_v3;
134 int ret;
135
136 /* Acquire AGP. */
137 if (!rdev->ddev->agp->acquired) {
138 ret = drm_agp_acquire(rdev->ddev);
139 if (ret) {
140 DRM_ERROR("Unable to acquire AGP: %d\n", ret);
141 return ret;
142 }
143 }
144
145 ret = drm_agp_info(rdev->ddev, &info);
146 if (ret) {
147 DRM_ERROR("Unable to get AGP info: %d\n", ret);
148 return ret;
149 }
150 mode.mode = info.mode;
151 agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
152 is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
153
154 if (is_v3) {
155 default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4;
156 } else {
157 if (agp_status & RADEON_AGP_4X_MODE) {
158 default_mode = 4;
159 } else if (agp_status & RADEON_AGP_2X_MODE) {
160 default_mode = 2;
161 } else {
162 default_mode = 1;
163 }
164 }
165
166 /* Apply AGPMode Quirks */
167 while (p && p->chip_device != 0) {
168 if (info.id_vendor == p->hostbridge_vendor &&
169 info.id_device == p->hostbridge_device &&
170 rdev->pdev->vendor == p->chip_vendor &&
171 rdev->pdev->device == p->chip_device &&
172 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
173 rdev->pdev->subsystem_device == p->subsys_device) {
174 default_mode = p->default_mode;
175 }
176 ++p;
177 }
178
179 if (radeon_agpmode > 0) {
180 if ((radeon_agpmode < (is_v3 ? 4 : 1)) ||
181 (radeon_agpmode > (is_v3 ? 8 : 4)) ||
182 (radeon_agpmode & (radeon_agpmode - 1))) {
183 DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n",
184 radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4",
185 default_mode);
186 radeon_agpmode = default_mode;
187 } else {
188 DRM_INFO("AGP mode requested: %d\n", radeon_agpmode);
189 }
190 } else {
191 radeon_agpmode = default_mode;
192 }
193
194 mode.mode &= ~RADEON_AGP_MODE_MASK;
195 if (is_v3) {
196 switch (radeon_agpmode) {
197 case 8:
198 mode.mode |= RADEON_AGPv3_8X_MODE;
199 break;
200 case 4:
201 default:
202 mode.mode |= RADEON_AGPv3_4X_MODE;
203 break;
204 }
205 } else {
206 switch (radeon_agpmode) {
207 case 4:
208 mode.mode |= RADEON_AGP_4X_MODE;
209 break;
210 case 2:
211 mode.mode |= RADEON_AGP_2X_MODE;
212 break;
213 case 1:
214 default:
215 mode.mode |= RADEON_AGP_1X_MODE;
216 break;
217 }
218 }
219
220 mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */
221 ret = drm_agp_enable(rdev->ddev, mode);
222 if (ret) {
223 DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
224 return ret;
225 }
226
227 rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
228 rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
229
230 /* workaround some hw issues */
231 if (rdev->family < CHIP_R200) {
232 WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000);
233 }
234 return 0;
235#else
236 return 0;
237#endif
238}
239
240void radeon_agp_fini(struct radeon_device *rdev)
241{
242#if __OS_HAS_AGP
243 if (rdev->flags & RADEON_IS_AGP) {
244 if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
245 drm_agp_release(rdev->ddev);
246 }
247 }
248#endif
249}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
new file mode 100644
index 000000000000..e57d8a784e9f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -0,0 +1,405 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RADEON_ASIC_H__
29#define __RADEON_ASIC_H__
30
31/*
32 * common functions
33 */
34void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
35void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
36
37void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
38void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
39void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
40
41/*
42 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
43 */
44uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
45void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
46void r100_errata(struct radeon_device *rdev);
47void r100_vram_info(struct radeon_device *rdev);
48int r100_gpu_reset(struct radeon_device *rdev);
49int r100_mc_init(struct radeon_device *rdev);
50void r100_mc_fini(struct radeon_device *rdev);
51int r100_wb_init(struct radeon_device *rdev);
52void r100_wb_fini(struct radeon_device *rdev);
53int r100_gart_enable(struct radeon_device *rdev);
54void r100_pci_gart_disable(struct radeon_device *rdev);
55void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
56int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
57int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
58void r100_cp_fini(struct radeon_device *rdev);
59void r100_cp_disable(struct radeon_device *rdev);
60void r100_ring_start(struct radeon_device *rdev);
61int r100_irq_set(struct radeon_device *rdev);
62int r100_irq_process(struct radeon_device *rdev);
63void r100_fence_ring_emit(struct radeon_device *rdev,
64 struct radeon_fence *fence);
65int r100_cs_parse(struct radeon_cs_parser *p);
66void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
67uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
68int r100_copy_blit(struct radeon_device *rdev,
69 uint64_t src_offset,
70 uint64_t dst_offset,
71 unsigned num_pages,
72 struct radeon_fence *fence);
73
74static struct radeon_asic r100_asic = {
75 .errata = &r100_errata,
76 .vram_info = &r100_vram_info,
77 .gpu_reset = &r100_gpu_reset,
78 .mc_init = &r100_mc_init,
79 .mc_fini = &r100_mc_fini,
80 .wb_init = &r100_wb_init,
81 .wb_fini = &r100_wb_fini,
82 .gart_enable = &r100_gart_enable,
83 .gart_disable = &r100_pci_gart_disable,
84 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
85 .gart_set_page = &r100_pci_gart_set_page,
86 .cp_init = &r100_cp_init,
87 .cp_fini = &r100_cp_fini,
88 .cp_disable = &r100_cp_disable,
89 .ring_start = &r100_ring_start,
90 .irq_set = &r100_irq_set,
91 .irq_process = &r100_irq_process,
92 .fence_ring_emit = &r100_fence_ring_emit,
93 .cs_parse = &r100_cs_parse,
94 .copy_blit = &r100_copy_blit,
95 .copy_dma = NULL,
96 .copy = &r100_copy_blit,
97 .set_engine_clock = &radeon_legacy_set_engine_clock,
98 .set_memory_clock = NULL,
99 .set_pcie_lanes = NULL,
100 .set_clock_gating = &radeon_legacy_set_clock_gating,
101};
102
103
104/*
105 * r300,r350,rv350,rv380
106 */
107void r300_errata(struct radeon_device *rdev);
108void r300_vram_info(struct radeon_device *rdev);
109int r300_gpu_reset(struct radeon_device *rdev);
110int r300_mc_init(struct radeon_device *rdev);
111void r300_mc_fini(struct radeon_device *rdev);
112void r300_ring_start(struct radeon_device *rdev);
113void r300_fence_ring_emit(struct radeon_device *rdev,
114 struct radeon_fence *fence);
115int r300_cs_parse(struct radeon_cs_parser *p);
116int r300_gart_enable(struct radeon_device *rdev);
117void rv370_pcie_gart_disable(struct radeon_device *rdev);
118void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
119int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
120uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
121void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
122void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
123int r300_copy_dma(struct radeon_device *rdev,
124 uint64_t src_offset,
125 uint64_t dst_offset,
126 unsigned num_pages,
127 struct radeon_fence *fence);
128static struct radeon_asic r300_asic = {
129 .errata = &r300_errata,
130 .vram_info = &r300_vram_info,
131 .gpu_reset = &r300_gpu_reset,
132 .mc_init = &r300_mc_init,
133 .mc_fini = &r300_mc_fini,
134 .wb_init = &r100_wb_init,
135 .wb_fini = &r100_wb_fini,
136 .gart_enable = &r300_gart_enable,
137 .gart_disable = &r100_pci_gart_disable,
138 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
139 .gart_set_page = &r100_pci_gart_set_page,
140 .cp_init = &r100_cp_init,
141 .cp_fini = &r100_cp_fini,
142 .cp_disable = &r100_cp_disable,
143 .ring_start = &r300_ring_start,
144 .irq_set = &r100_irq_set,
145 .irq_process = &r100_irq_process,
146 .fence_ring_emit = &r300_fence_ring_emit,
147 .cs_parse = &r300_cs_parse,
148 .copy_blit = &r100_copy_blit,
149 .copy_dma = &r300_copy_dma,
150 .copy = &r100_copy_blit,
151 .set_engine_clock = &radeon_legacy_set_engine_clock,
152 .set_memory_clock = NULL,
153 .set_pcie_lanes = &rv370_set_pcie_lanes,
154 .set_clock_gating = &radeon_legacy_set_clock_gating,
155};
156
157/*
158 * r420,r423,rv410
159 */
160void r420_errata(struct radeon_device *rdev);
161void r420_vram_info(struct radeon_device *rdev);
162int r420_mc_init(struct radeon_device *rdev);
163void r420_mc_fini(struct radeon_device *rdev);
164static struct radeon_asic r420_asic = {
165 .errata = &r420_errata,
166 .vram_info = &r420_vram_info,
167 .gpu_reset = &r300_gpu_reset,
168 .mc_init = &r420_mc_init,
169 .mc_fini = &r420_mc_fini,
170 .wb_init = &r100_wb_init,
171 .wb_fini = &r100_wb_fini,
172 .gart_enable = &r300_gart_enable,
173 .gart_disable = &rv370_pcie_gart_disable,
174 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
175 .gart_set_page = &rv370_pcie_gart_set_page,
176 .cp_init = &r100_cp_init,
177 .cp_fini = &r100_cp_fini,
178 .cp_disable = &r100_cp_disable,
179 .ring_start = &r300_ring_start,
180 .irq_set = &r100_irq_set,
181 .irq_process = &r100_irq_process,
182 .fence_ring_emit = &r300_fence_ring_emit,
183 .cs_parse = &r300_cs_parse,
184 .copy_blit = &r100_copy_blit,
185 .copy_dma = &r300_copy_dma,
186 .copy = &r100_copy_blit,
187 .set_engine_clock = &radeon_atom_set_engine_clock,
188 .set_memory_clock = &radeon_atom_set_memory_clock,
189 .set_pcie_lanes = &rv370_set_pcie_lanes,
190 .set_clock_gating = &radeon_atom_set_clock_gating,
191};
192
193
194/*
195 * rs400,rs480
196 */
197void rs400_errata(struct radeon_device *rdev);
198void rs400_vram_info(struct radeon_device *rdev);
199int rs400_mc_init(struct radeon_device *rdev);
200void rs400_mc_fini(struct radeon_device *rdev);
201int rs400_gart_enable(struct radeon_device *rdev);
202void rs400_gart_disable(struct radeon_device *rdev);
203void rs400_gart_tlb_flush(struct radeon_device *rdev);
204int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
205uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
206void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
207static struct radeon_asic rs400_asic = {
208 .errata = &rs400_errata,
209 .vram_info = &rs400_vram_info,
210 .gpu_reset = &r300_gpu_reset,
211 .mc_init = &rs400_mc_init,
212 .mc_fini = &rs400_mc_fini,
213 .wb_init = &r100_wb_init,
214 .wb_fini = &r100_wb_fini,
215 .gart_enable = &rs400_gart_enable,
216 .gart_disable = &rs400_gart_disable,
217 .gart_tlb_flush = &rs400_gart_tlb_flush,
218 .gart_set_page = &rs400_gart_set_page,
219 .cp_init = &r100_cp_init,
220 .cp_fini = &r100_cp_fini,
221 .cp_disable = &r100_cp_disable,
222 .ring_start = &r300_ring_start,
223 .irq_set = &r100_irq_set,
224 .irq_process = &r100_irq_process,
225 .fence_ring_emit = &r300_fence_ring_emit,
226 .cs_parse = &r300_cs_parse,
227 .copy_blit = &r100_copy_blit,
228 .copy_dma = &r300_copy_dma,
229 .copy = &r100_copy_blit,
230 .set_engine_clock = &radeon_legacy_set_engine_clock,
231 .set_memory_clock = NULL,
232 .set_pcie_lanes = NULL,
233 .set_clock_gating = &radeon_legacy_set_clock_gating,
234};
235
236
237/*
238 * rs600.
239 */
240void rs600_errata(struct radeon_device *rdev);
241void rs600_vram_info(struct radeon_device *rdev);
242int rs600_mc_init(struct radeon_device *rdev);
243void rs600_mc_fini(struct radeon_device *rdev);
244int rs600_irq_set(struct radeon_device *rdev);
245int rs600_gart_enable(struct radeon_device *rdev);
246void rs600_gart_disable(struct radeon_device *rdev);
247void rs600_gart_tlb_flush(struct radeon_device *rdev);
248int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
249uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
250void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
251static struct radeon_asic rs600_asic = {
252 .errata = &rs600_errata,
253 .vram_info = &rs600_vram_info,
254 .gpu_reset = &r300_gpu_reset,
255 .mc_init = &rs600_mc_init,
256 .mc_fini = &rs600_mc_fini,
257 .wb_init = &r100_wb_init,
258 .wb_fini = &r100_wb_fini,
259 .gart_enable = &rs600_gart_enable,
260 .gart_disable = &rs600_gart_disable,
261 .gart_tlb_flush = &rs600_gart_tlb_flush,
262 .gart_set_page = &rs600_gart_set_page,
263 .cp_init = &r100_cp_init,
264 .cp_fini = &r100_cp_fini,
265 .cp_disable = &r100_cp_disable,
266 .ring_start = &r300_ring_start,
267 .irq_set = &rs600_irq_set,
268 .irq_process = &r100_irq_process,
269 .fence_ring_emit = &r300_fence_ring_emit,
270 .cs_parse = &r300_cs_parse,
271 .copy_blit = &r100_copy_blit,
272 .copy_dma = &r300_copy_dma,
273 .copy = &r100_copy_blit,
274 .set_engine_clock = &radeon_atom_set_engine_clock,
275 .set_memory_clock = &radeon_atom_set_memory_clock,
276 .set_pcie_lanes = NULL,
277 .set_clock_gating = &radeon_atom_set_clock_gating,
278};
279
280
281/*
282 * rs690,rs740
283 */
284void rs690_errata(struct radeon_device *rdev);
285void rs690_vram_info(struct radeon_device *rdev);
286int rs690_mc_init(struct radeon_device *rdev);
287void rs690_mc_fini(struct radeon_device *rdev);
288uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
289void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
290static struct radeon_asic rs690_asic = {
291 .errata = &rs690_errata,
292 .vram_info = &rs690_vram_info,
293 .gpu_reset = &r300_gpu_reset,
294 .mc_init = &rs690_mc_init,
295 .mc_fini = &rs690_mc_fini,
296 .wb_init = &r100_wb_init,
297 .wb_fini = &r100_wb_fini,
298 .gart_enable = &rs400_gart_enable,
299 .gart_disable = &rs400_gart_disable,
300 .gart_tlb_flush = &rs400_gart_tlb_flush,
301 .gart_set_page = &rs400_gart_set_page,
302 .cp_init = &r100_cp_init,
303 .cp_fini = &r100_cp_fini,
304 .cp_disable = &r100_cp_disable,
305 .ring_start = &r300_ring_start,
306 .irq_set = &rs600_irq_set,
307 .irq_process = &r100_irq_process,
308 .fence_ring_emit = &r300_fence_ring_emit,
309 .cs_parse = &r300_cs_parse,
310 .copy_blit = &r100_copy_blit,
311 .copy_dma = &r300_copy_dma,
312 .copy = &r300_copy_dma,
313 .set_engine_clock = &radeon_atom_set_engine_clock,
314 .set_memory_clock = &radeon_atom_set_memory_clock,
315 .set_pcie_lanes = NULL,
316 .set_clock_gating = &radeon_atom_set_clock_gating,
317};
318
319
320/*
321 * rv515
322 */
323void rv515_errata(struct radeon_device *rdev);
324void rv515_vram_info(struct radeon_device *rdev);
325int rv515_gpu_reset(struct radeon_device *rdev);
326int rv515_mc_init(struct radeon_device *rdev);
327void rv515_mc_fini(struct radeon_device *rdev);
328uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
329void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
330void rv515_ring_start(struct radeon_device *rdev);
331uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
332void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
333static struct radeon_asic rv515_asic = {
334 .errata = &rv515_errata,
335 .vram_info = &rv515_vram_info,
336 .gpu_reset = &rv515_gpu_reset,
337 .mc_init = &rv515_mc_init,
338 .mc_fini = &rv515_mc_fini,
339 .wb_init = &r100_wb_init,
340 .wb_fini = &r100_wb_fini,
341 .gart_enable = &r300_gart_enable,
342 .gart_disable = &rv370_pcie_gart_disable,
343 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
344 .gart_set_page = &rv370_pcie_gart_set_page,
345 .cp_init = &r100_cp_init,
346 .cp_fini = &r100_cp_fini,
347 .cp_disable = &r100_cp_disable,
348 .ring_start = &rv515_ring_start,
349 .irq_set = &r100_irq_set,
350 .irq_process = &r100_irq_process,
351 .fence_ring_emit = &r300_fence_ring_emit,
352 .cs_parse = &r100_cs_parse,
353 .copy_blit = &r100_copy_blit,
354 .copy_dma = &r300_copy_dma,
355 .copy = &r100_copy_blit,
356 .set_engine_clock = &radeon_atom_set_engine_clock,
357 .set_memory_clock = &radeon_atom_set_memory_clock,
358 .set_pcie_lanes = &rv370_set_pcie_lanes,
359 .set_clock_gating = &radeon_atom_set_clock_gating,
360};
361
362
363/*
364 * r520,rv530,rv560,rv570,r580
365 */
366void r520_errata(struct radeon_device *rdev);
367void r520_vram_info(struct radeon_device *rdev);
368int r520_mc_init(struct radeon_device *rdev);
369void r520_mc_fini(struct radeon_device *rdev);
370static struct radeon_asic r520_asic = {
371 .errata = &r520_errata,
372 .vram_info = &r520_vram_info,
373 .gpu_reset = &rv515_gpu_reset,
374 .mc_init = &r520_mc_init,
375 .mc_fini = &r520_mc_fini,
376 .wb_init = &r100_wb_init,
377 .wb_fini = &r100_wb_fini,
378 .gart_enable = &r300_gart_enable,
379 .gart_disable = &rv370_pcie_gart_disable,
380 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
381 .gart_set_page = &rv370_pcie_gart_set_page,
382 .cp_init = &r100_cp_init,
383 .cp_fini = &r100_cp_fini,
384 .cp_disable = &r100_cp_disable,
385 .ring_start = &rv515_ring_start,
386 .irq_set = &r100_irq_set,
387 .irq_process = &r100_irq_process,
388 .fence_ring_emit = &r300_fence_ring_emit,
389 .cs_parse = &r100_cs_parse,
390 .copy_blit = &r100_copy_blit,
391 .copy_dma = &r300_copy_dma,
392 .copy = &r100_copy_blit,
393 .set_engine_clock = &radeon_atom_set_engine_clock,
394 .set_memory_clock = &radeon_atom_set_memory_clock,
395 .set_pcie_lanes = &rv370_set_pcie_lanes,
396 .set_clock_gating = &radeon_atom_set_clock_gating,
397};
398
399/*
400 * r600,rv610,rv630,rv620,rv635,rv670,rs780,rv770,rv730,rv710
401 */
402uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
403void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
404
405#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
new file mode 100644
index 000000000000..786632d3e378
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -0,0 +1,1298 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "radeon_drm.h"
28#include "radeon.h"
29
30#include "atom.h"
31#include "atom-bits.h"
32
33/* from radeon_encoder.c */
34extern uint32_t
35radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
36 uint8_t dac);
37extern void radeon_link_encoder_connector(struct drm_device *dev);
38extern void
39radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id,
40 uint32_t supported_device);
41
42/* from radeon_connector.c */
43extern void
44radeon_add_atom_connector(struct drm_device *dev,
45 uint32_t connector_id,
46 uint32_t supported_device,
47 int connector_type,
48 struct radeon_i2c_bus_rec *i2c_bus,
49 bool linkb, uint32_t igp_lane_info);
50
51/* from radeon_legacy_encoder.c */
52extern void
53radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
54 uint32_t supported_device);
55
56union atom_supported_devices {
57 struct _ATOM_SUPPORTED_DEVICES_INFO info;
58 struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2;
59 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
60};
61
62static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
63 *dev, uint8_t id)
64{
65 struct radeon_device *rdev = dev->dev_private;
66 struct atom_context *ctx = rdev->mode_info.atom_context;
67 ATOM_GPIO_I2C_ASSIGMENT gpio;
68 struct radeon_i2c_bus_rec i2c;
69 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
70 struct _ATOM_GPIO_I2C_INFO *i2c_info;
71 uint16_t data_offset;
72
73 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
74 i2c.valid = false;
75
76 atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
77
78 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
79
80 gpio = i2c_info->asGPIO_Info[id];
81
82 i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4;
83 i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4;
84 i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4;
85 i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4;
86 i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4;
87 i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4;
88 i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4;
89 i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4;
90 i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift);
91 i2c.mask_data_mask = (1 << gpio.ucDataMaskShift);
92 i2c.put_clk_mask = (1 << gpio.ucClkEnShift);
93 i2c.put_data_mask = (1 << gpio.ucDataEnShift);
94 i2c.get_clk_mask = (1 << gpio.ucClkY_Shift);
95 i2c.get_data_mask = (1 << gpio.ucDataY_Shift);
96 i2c.a_clk_mask = (1 << gpio.ucClkA_Shift);
97 i2c.a_data_mask = (1 << gpio.ucDataA_Shift);
98 i2c.valid = true;
99
100 return i2c;
101}
102
103static bool radeon_atom_apply_quirks(struct drm_device *dev,
104 uint32_t supported_device,
105 int *connector_type,
106 struct radeon_i2c_bus_rec *i2c_bus)
107{
108
109 /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
110 if ((dev->pdev->device == 0x791e) &&
111 (dev->pdev->subsystem_vendor == 0x1043) &&
112 (dev->pdev->subsystem_device == 0x826d)) {
113 if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
114 (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
115 *connector_type = DRM_MODE_CONNECTOR_DVID;
116 }
117
118 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
119 if ((dev->pdev->device == 0x7941) &&
120 (dev->pdev->subsystem_vendor == 0x147b) &&
121 (dev->pdev->subsystem_device == 0x2412)) {
122 if (*connector_type == DRM_MODE_CONNECTOR_DVII)
123 return false;
124 }
125
126 /* Falcon NW laptop lists vga ddc line for LVDS */
127 if ((dev->pdev->device == 0x5653) &&
128 (dev->pdev->subsystem_vendor == 0x1462) &&
129 (dev->pdev->subsystem_device == 0x0291)) {
130 if (*connector_type == DRM_MODE_CONNECTOR_LVDS)
131 i2c_bus->valid = false;
132 }
133
134 /* Funky macbooks */
135 if ((dev->pdev->device == 0x71C5) &&
136 (dev->pdev->subsystem_vendor == 0x106b) &&
137 (dev->pdev->subsystem_device == 0x0080)) {
138 if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
139 (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
140 return false;
141 }
142
143 /* some BIOSes seem to report DAC on HDMI - they hurt me with their lies */
144 if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
145 (*connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
146 if (supported_device & (ATOM_DEVICE_CRT_SUPPORT)) {
147 return false;
148 }
149 }
150
151 /* ASUS HD 3600 XT board lists the DVI port as HDMI */
152 if ((dev->pdev->device == 0x9598) &&
153 (dev->pdev->subsystem_vendor == 0x1043) &&
154 (dev->pdev->subsystem_device == 0x01da)) {
155 if (*connector_type == DRM_MODE_CONNECTOR_HDMIB) {
156 *connector_type = DRM_MODE_CONNECTOR_DVID;
157 }
158 }
159
160 return true;
161}
162
163const int supported_devices_connector_convert[] = {
164 DRM_MODE_CONNECTOR_Unknown,
165 DRM_MODE_CONNECTOR_VGA,
166 DRM_MODE_CONNECTOR_DVII,
167 DRM_MODE_CONNECTOR_DVID,
168 DRM_MODE_CONNECTOR_DVIA,
169 DRM_MODE_CONNECTOR_SVIDEO,
170 DRM_MODE_CONNECTOR_Composite,
171 DRM_MODE_CONNECTOR_LVDS,
172 DRM_MODE_CONNECTOR_Unknown,
173 DRM_MODE_CONNECTOR_Unknown,
174 DRM_MODE_CONNECTOR_HDMIA,
175 DRM_MODE_CONNECTOR_HDMIB,
176 DRM_MODE_CONNECTOR_Unknown,
177 DRM_MODE_CONNECTOR_Unknown,
178 DRM_MODE_CONNECTOR_9PinDIN,
179 DRM_MODE_CONNECTOR_DisplayPort
180};
181
182const int object_connector_convert[] = {
183 DRM_MODE_CONNECTOR_Unknown,
184 DRM_MODE_CONNECTOR_DVII,
185 DRM_MODE_CONNECTOR_DVII,
186 DRM_MODE_CONNECTOR_DVID,
187 DRM_MODE_CONNECTOR_DVID,
188 DRM_MODE_CONNECTOR_VGA,
189 DRM_MODE_CONNECTOR_Composite,
190 DRM_MODE_CONNECTOR_SVIDEO,
191 DRM_MODE_CONNECTOR_Unknown,
192 DRM_MODE_CONNECTOR_9PinDIN,
193 DRM_MODE_CONNECTOR_Unknown,
194 DRM_MODE_CONNECTOR_HDMIA,
195 DRM_MODE_CONNECTOR_HDMIB,
196 DRM_MODE_CONNECTOR_HDMIB,
197 DRM_MODE_CONNECTOR_LVDS,
198 DRM_MODE_CONNECTOR_9PinDIN,
199 DRM_MODE_CONNECTOR_Unknown,
200 DRM_MODE_CONNECTOR_Unknown,
201 DRM_MODE_CONNECTOR_Unknown,
202 DRM_MODE_CONNECTOR_DisplayPort
203};
204
205bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
206{
207 struct radeon_device *rdev = dev->dev_private;
208 struct radeon_mode_info *mode_info = &rdev->mode_info;
209 struct atom_context *ctx = mode_info->atom_context;
210 int index = GetIndexIntoMasterTable(DATA, Object_Header);
211 uint16_t size, data_offset;
212 uint8_t frev, crev, line_mux = 0;
213 ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
214 ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
215 ATOM_OBJECT_HEADER *obj_header;
216 int i, j, path_size, device_support;
217 int connector_type;
218 uint16_t igp_lane_info;
219 bool linkb;
220 struct radeon_i2c_bus_rec ddc_bus;
221
222 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
223
224 if (data_offset == 0)
225 return false;
226
227 if (crev < 2)
228 return false;
229
230 obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
231 path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
232 (ctx->bios + data_offset +
233 le16_to_cpu(obj_header->usDisplayPathTableOffset));
234 con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
235 (ctx->bios + data_offset +
236 le16_to_cpu(obj_header->usConnectorObjectTableOffset));
237 device_support = le16_to_cpu(obj_header->usDeviceSupport);
238
239 path_size = 0;
240 for (i = 0; i < path_obj->ucNumOfDispPath; i++) {
241 uint8_t *addr = (uint8_t *) path_obj->asDispPath;
242 ATOM_DISPLAY_OBJECT_PATH *path;
243 addr += path_size;
244 path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
245 path_size += le16_to_cpu(path->usSize);
246 linkb = false;
247
248 if (device_support & le16_to_cpu(path->usDeviceTag)) {
249 uint8_t con_obj_id, con_obj_num, con_obj_type;
250
251 con_obj_id =
252 (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
253 >> OBJECT_ID_SHIFT;
254 con_obj_num =
255 (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
256 >> ENUM_ID_SHIFT;
257 con_obj_type =
258 (le16_to_cpu(path->usConnObjectId) &
259 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
260
261 if ((le16_to_cpu(path->usDeviceTag) ==
262 ATOM_DEVICE_TV1_SUPPORT)
263 || (le16_to_cpu(path->usDeviceTag) ==
264 ATOM_DEVICE_TV2_SUPPORT)
265 || (le16_to_cpu(path->usDeviceTag) ==
266 ATOM_DEVICE_CV_SUPPORT))
267 continue;
268
269 if ((rdev->family == CHIP_RS780) &&
270 (con_obj_id ==
271 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) {
272 uint16_t igp_offset = 0;
273 ATOM_INTEGRATED_SYSTEM_INFO_V2 *igp_obj;
274
275 index =
276 GetIndexIntoMasterTable(DATA,
277 IntegratedSystemInfo);
278
279 atom_parse_data_header(ctx, index, &size, &frev,
280 &crev, &igp_offset);
281
282 if (crev >= 2) {
283 igp_obj =
284 (ATOM_INTEGRATED_SYSTEM_INFO_V2
285 *) (ctx->bios + igp_offset);
286
287 if (igp_obj) {
288 uint32_t slot_config, ct;
289
290 if (con_obj_num == 1)
291 slot_config =
292 igp_obj->
293 ulDDISlot1Config;
294 else
295 slot_config =
296 igp_obj->
297 ulDDISlot2Config;
298
299 ct = (slot_config >> 16) & 0xff;
300 connector_type =
301 object_connector_convert
302 [ct];
303 igp_lane_info =
304 slot_config & 0xffff;
305 } else
306 continue;
307 } else
308 continue;
309 } else {
310 igp_lane_info = 0;
311 connector_type =
312 object_connector_convert[con_obj_id];
313 }
314
315 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
316 continue;
317
318 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2);
319 j++) {
320 uint8_t enc_obj_id, enc_obj_num, enc_obj_type;
321
322 enc_obj_id =
323 (le16_to_cpu(path->usGraphicObjIds[j]) &
324 OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
325 enc_obj_num =
326 (le16_to_cpu(path->usGraphicObjIds[j]) &
327 ENUM_ID_MASK) >> ENUM_ID_SHIFT;
328 enc_obj_type =
329 (le16_to_cpu(path->usGraphicObjIds[j]) &
330 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
331
332 /* FIXME: add support for router objects */
333 if (enc_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
334 if (enc_obj_num == 2)
335 linkb = true;
336 else
337 linkb = false;
338
339 radeon_add_atom_encoder(dev,
340 enc_obj_id,
341 le16_to_cpu
342 (path->
343 usDeviceTag));
344
345 }
346 }
347
348 /* look up gpio for ddc */
349 if ((le16_to_cpu(path->usDeviceTag) &
350 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
351 == 0) {
352 for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
353 if (le16_to_cpu(path->usConnObjectId) ==
354 le16_to_cpu(con_obj->asObjects[j].
355 usObjectID)) {
356 ATOM_COMMON_RECORD_HEADER
357 *record =
358 (ATOM_COMMON_RECORD_HEADER
359 *)
360 (ctx->bios + data_offset +
361 le16_to_cpu(con_obj->
362 asObjects[j].
363 usRecordOffset));
364 ATOM_I2C_RECORD *i2c_record;
365
366 while (record->ucRecordType > 0
367 && record->
368 ucRecordType <=
369 ATOM_MAX_OBJECT_RECORD_NUMBER) {
370 DRM_ERROR
371 ("record type %d\n",
372 record->
373 ucRecordType);
374 switch (record->
375 ucRecordType) {
376 case ATOM_I2C_RECORD_TYPE:
377 i2c_record =
378 (ATOM_I2C_RECORD
379 *) record;
380 line_mux =
381 i2c_record->
382 sucI2cId.
383 bfI2C_LineMux;
384 break;
385 }
386 record =
387 (ATOM_COMMON_RECORD_HEADER
388 *) ((char *)record
389 +
390 record->
391 ucRecordSize);
392 }
393 break;
394 }
395 }
396 } else
397 line_mux = 0;
398
399 if ((le16_to_cpu(path->usDeviceTag) ==
400 ATOM_DEVICE_TV1_SUPPORT)
401 || (le16_to_cpu(path->usDeviceTag) ==
402 ATOM_DEVICE_TV2_SUPPORT)
403 || (le16_to_cpu(path->usDeviceTag) ==
404 ATOM_DEVICE_CV_SUPPORT))
405 ddc_bus.valid = false;
406 else
407 ddc_bus = radeon_lookup_gpio(dev, line_mux);
408
409 radeon_add_atom_connector(dev,
410 le16_to_cpu(path->
411 usConnObjectId),
412 le16_to_cpu(path->
413 usDeviceTag),
414 connector_type, &ddc_bus,
415 linkb, igp_lane_info);
416
417 }
418 }
419
420 radeon_link_encoder_connector(dev);
421
422 return true;
423}
424
425struct bios_connector {
426 bool valid;
427 uint8_t line_mux;
428 uint16_t devices;
429 int connector_type;
430 struct radeon_i2c_bus_rec ddc_bus;
431};
432
433bool radeon_get_atom_connector_info_from_supported_devices_table(struct
434 drm_device
435 *dev)
436{
437 struct radeon_device *rdev = dev->dev_private;
438 struct radeon_mode_info *mode_info = &rdev->mode_info;
439 struct atom_context *ctx = mode_info->atom_context;
440 int index = GetIndexIntoMasterTable(DATA, SupportedDevicesInfo);
441 uint16_t size, data_offset;
442 uint8_t frev, crev;
443 uint16_t device_support;
444 uint8_t dac;
445 union atom_supported_devices *supported_devices;
446 int i, j;
447 struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
448
449 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
450
451 supported_devices =
452 (union atom_supported_devices *)(ctx->bios + data_offset);
453
454 device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
455
456 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
457 ATOM_CONNECTOR_INFO_I2C ci =
458 supported_devices->info.asConnInfo[i];
459
460 bios_connectors[i].valid = false;
461
462 if (!(device_support & (1 << i))) {
463 continue;
464 }
465
466 if (i == ATOM_DEVICE_CV_INDEX) {
467 DRM_DEBUG("Skipping Component Video\n");
468 continue;
469 }
470
471 if (i == ATOM_DEVICE_TV1_INDEX) {
472 DRM_DEBUG("Skipping TV Out\n");
473 continue;
474 }
475
476 bios_connectors[i].connector_type =
477 supported_devices_connector_convert[ci.sucConnectorInfo.
478 sbfAccess.
479 bfConnectorType];
480
481 if (bios_connectors[i].connector_type ==
482 DRM_MODE_CONNECTOR_Unknown)
483 continue;
484
485 dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
486
487 if ((rdev->family == CHIP_RS690) ||
488 (rdev->family == CHIP_RS740)) {
489 if ((i == ATOM_DEVICE_DFP2_INDEX)
490 && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
491 bios_connectors[i].line_mux =
492 ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
493 else if ((i == ATOM_DEVICE_DFP3_INDEX)
494 && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
495 bios_connectors[i].line_mux =
496 ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
497 else
498 bios_connectors[i].line_mux =
499 ci.sucI2cId.sbfAccess.bfI2C_LineMux;
500 } else
501 bios_connectors[i].line_mux =
502 ci.sucI2cId.sbfAccess.bfI2C_LineMux;
503
504 /* give tv unique connector ids */
505 if (i == ATOM_DEVICE_TV1_INDEX) {
506 bios_connectors[i].ddc_bus.valid = false;
507 bios_connectors[i].line_mux = 50;
508 } else if (i == ATOM_DEVICE_TV2_INDEX) {
509 bios_connectors[i].ddc_bus.valid = false;
510 bios_connectors[i].line_mux = 51;
511 } else if (i == ATOM_DEVICE_CV_INDEX) {
512 bios_connectors[i].ddc_bus.valid = false;
513 bios_connectors[i].line_mux = 52;
514 } else
515 bios_connectors[i].ddc_bus =
516 radeon_lookup_gpio(dev,
517 bios_connectors[i].line_mux);
518
519 /* Always set the connector type to VGA for CRT1/CRT2. if they are
520 * shared with a DVI port, we'll pick up the DVI connector when we
521 * merge the outputs. Some bioses incorrectly list VGA ports as DVI.
522 */
523 if (i == ATOM_DEVICE_CRT1_INDEX || i == ATOM_DEVICE_CRT2_INDEX)
524 bios_connectors[i].connector_type =
525 DRM_MODE_CONNECTOR_VGA;
526
527 if (!radeon_atom_apply_quirks
528 (dev, (1 << i), &bios_connectors[i].connector_type,
529 &bios_connectors[i].ddc_bus))
530 continue;
531
532 bios_connectors[i].valid = true;
533 bios_connectors[i].devices = (1 << i);
534
535 if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
536 radeon_add_atom_encoder(dev,
537 radeon_get_encoder_id(dev,
538 (1 << i),
539 dac),
540 (1 << i));
541 else
542 radeon_add_legacy_encoder(dev,
543 radeon_get_encoder_id(dev,
544 (1 <<
545 i),
546 dac),
547 (1 << i));
548 }
549
550 /* combine shared connectors */
551 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
552 if (bios_connectors[i].valid) {
553 for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) {
554 if (bios_connectors[j].valid && (i != j)) {
555 if (bios_connectors[i].line_mux ==
556 bios_connectors[j].line_mux) {
557 if (((bios_connectors[i].
558 devices &
559 (ATOM_DEVICE_DFP_SUPPORT))
560 && (bios_connectors[j].
561 devices &
562 (ATOM_DEVICE_CRT_SUPPORT)))
563 ||
564 ((bios_connectors[j].
565 devices &
566 (ATOM_DEVICE_DFP_SUPPORT))
567 && (bios_connectors[i].
568 devices &
569 (ATOM_DEVICE_CRT_SUPPORT)))) {
570 bios_connectors[i].
571 devices |=
572 bios_connectors[j].
573 devices;
574 bios_connectors[i].
575 connector_type =
576 DRM_MODE_CONNECTOR_DVII;
577 bios_connectors[j].
578 valid = false;
579 }
580 }
581 }
582 }
583 }
584 }
585
586 /* add the connectors */
587 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
588 if (bios_connectors[i].valid)
589 radeon_add_atom_connector(dev,
590 bios_connectors[i].line_mux,
591 bios_connectors[i].devices,
592 bios_connectors[i].
593 connector_type,
594 &bios_connectors[i].ddc_bus,
595 false, 0);
596 }
597
598 radeon_link_encoder_connector(dev);
599
600 return true;
601}
602
603union firmware_info {
604 ATOM_FIRMWARE_INFO info;
605 ATOM_FIRMWARE_INFO_V1_2 info_12;
606 ATOM_FIRMWARE_INFO_V1_3 info_13;
607 ATOM_FIRMWARE_INFO_V1_4 info_14;
608};
609
610bool radeon_atom_get_clock_info(struct drm_device *dev)
611{
612 struct radeon_device *rdev = dev->dev_private;
613 struct radeon_mode_info *mode_info = &rdev->mode_info;
614 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
615 union firmware_info *firmware_info;
616 uint8_t frev, crev;
617 struct radeon_pll *p1pll = &rdev->clock.p1pll;
618 struct radeon_pll *p2pll = &rdev->clock.p2pll;
619 struct radeon_pll *spll = &rdev->clock.spll;
620 struct radeon_pll *mpll = &rdev->clock.mpll;
621 uint16_t data_offset;
622
623 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
624 &crev, &data_offset);
625
626 firmware_info =
627 (union firmware_info *)(mode_info->atom_context->bios +
628 data_offset);
629
630 if (firmware_info) {
631 /* pixel clocks */
632 p1pll->reference_freq =
633 le16_to_cpu(firmware_info->info.usReferenceClock);
634 p1pll->reference_div = 0;
635
636 p1pll->pll_out_min =
637 le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
638 p1pll->pll_out_max =
639 le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
640
641 if (p1pll->pll_out_min == 0) {
642 if (ASIC_IS_AVIVO(rdev))
643 p1pll->pll_out_min = 64800;
644 else
645 p1pll->pll_out_min = 20000;
646 }
647
648 p1pll->pll_in_min =
649 le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input);
650 p1pll->pll_in_max =
651 le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input);
652
653 *p2pll = *p1pll;
654
655 /* system clock */
656 spll->reference_freq =
657 le16_to_cpu(firmware_info->info.usReferenceClock);
658 spll->reference_div = 0;
659
660 spll->pll_out_min =
661 le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output);
662 spll->pll_out_max =
663 le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output);
664
665 /* ??? */
666 if (spll->pll_out_min == 0) {
667 if (ASIC_IS_AVIVO(rdev))
668 spll->pll_out_min = 64800;
669 else
670 spll->pll_out_min = 20000;
671 }
672
673 spll->pll_in_min =
674 le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input);
675 spll->pll_in_max =
676 le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
677
678 /* memory clock */
679 mpll->reference_freq =
680 le16_to_cpu(firmware_info->info.usReferenceClock);
681 mpll->reference_div = 0;
682
683 mpll->pll_out_min =
684 le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output);
685 mpll->pll_out_max =
686 le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output);
687
688 /* ??? */
689 if (mpll->pll_out_min == 0) {
690 if (ASIC_IS_AVIVO(rdev))
691 mpll->pll_out_min = 64800;
692 else
693 mpll->pll_out_min = 20000;
694 }
695
696 mpll->pll_in_min =
697 le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input);
698 mpll->pll_in_max =
699 le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input);
700
701 rdev->clock.default_sclk =
702 le32_to_cpu(firmware_info->info.ulDefaultEngineClock);
703 rdev->clock.default_mclk =
704 le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
705
706 return true;
707 }
708 return false;
709}
710
711struct radeon_encoder_int_tmds *radeon_atombios_get_tmds_info(struct
712 radeon_encoder
713 *encoder)
714{
715 struct drm_device *dev = encoder->base.dev;
716 struct radeon_device *rdev = dev->dev_private;
717 struct radeon_mode_info *mode_info = &rdev->mode_info;
718 int index = GetIndexIntoMasterTable(DATA, TMDS_Info);
719 uint16_t data_offset;
720 struct _ATOM_TMDS_INFO *tmds_info;
721 uint8_t frev, crev;
722 uint16_t maxfreq;
723 int i;
724 struct radeon_encoder_int_tmds *tmds = NULL;
725
726 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
727 &crev, &data_offset);
728
729 tmds_info =
730 (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
731 data_offset);
732
733 if (tmds_info) {
734 tmds =
735 kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
736
737 if (!tmds)
738 return NULL;
739
740 maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
741 for (i = 0; i < 4; i++) {
742 tmds->tmds_pll[i].freq =
743 le16_to_cpu(tmds_info->asMiscInfo[i].usFrequency);
744 tmds->tmds_pll[i].value =
745 tmds_info->asMiscInfo[i].ucPLL_ChargePump & 0x3f;
746 tmds->tmds_pll[i].value |=
747 (tmds_info->asMiscInfo[i].
748 ucPLL_VCO_Gain & 0x3f) << 6;
749 tmds->tmds_pll[i].value |=
750 (tmds_info->asMiscInfo[i].
751 ucPLL_DutyCycle & 0xf) << 12;
752 tmds->tmds_pll[i].value |=
753 (tmds_info->asMiscInfo[i].
754 ucPLL_VoltageSwing & 0xf) << 16;
755
756 DRM_DEBUG("TMDS PLL From ATOMBIOS %u %x\n",
757 tmds->tmds_pll[i].freq,
758 tmds->tmds_pll[i].value);
759
760 if (maxfreq == tmds->tmds_pll[i].freq) {
761 tmds->tmds_pll[i].freq = 0xffffffff;
762 break;
763 }
764 }
765 }
766 return tmds;
767}
768
769union lvds_info {
770 struct _ATOM_LVDS_INFO info;
771 struct _ATOM_LVDS_INFO_V12 info_12;
772};
773
774struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
775 radeon_encoder
776 *encoder)
777{
778 struct drm_device *dev = encoder->base.dev;
779 struct radeon_device *rdev = dev->dev_private;
780 struct radeon_mode_info *mode_info = &rdev->mode_info;
781 int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
782 uint16_t data_offset;
783 union lvds_info *lvds_info;
784 uint8_t frev, crev;
785 struct radeon_encoder_atom_dig *lvds = NULL;
786
787 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
788 &crev, &data_offset);
789
790 lvds_info =
791 (union lvds_info *)(mode_info->atom_context->bios + data_offset);
792
793 if (lvds_info) {
794 lvds =
795 kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
796
797 if (!lvds)
798 return NULL;
799
800 lvds->native_mode.dotclock =
801 le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10;
802 lvds->native_mode.panel_xres =
803 le16_to_cpu(lvds_info->info.sLCDTiming.usHActive);
804 lvds->native_mode.panel_yres =
805 le16_to_cpu(lvds_info->info.sLCDTiming.usVActive);
806 lvds->native_mode.hblank =
807 le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
808 lvds->native_mode.hoverplus =
809 le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
810 lvds->native_mode.hsync_width =
811 le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
812 lvds->native_mode.vblank =
813 le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
814 lvds->native_mode.voverplus =
815 le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
816 lvds->native_mode.vsync_width =
817 le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
818 lvds->panel_pwr_delay =
819 le16_to_cpu(lvds_info->info.usOffDelayInMs);
820 lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
821
822 encoder->native_mode = lvds->native_mode;
823 }
824 return lvds;
825}
826
827struct radeon_encoder_primary_dac *
828radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
829{
830 struct drm_device *dev = encoder->base.dev;
831 struct radeon_device *rdev = dev->dev_private;
832 struct radeon_mode_info *mode_info = &rdev->mode_info;
833 int index = GetIndexIntoMasterTable(DATA, CompassionateData);
834 uint16_t data_offset;
835 struct _COMPASSIONATE_DATA *dac_info;
836 uint8_t frev, crev;
837 uint8_t bg, dac;
838 int i;
839 struct radeon_encoder_primary_dac *p_dac = NULL;
840
841 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
842
843 dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
844
845 if (dac_info) {
846 p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL);
847
848 if (!p_dac)
849 return NULL;
850
851 bg = dac_info->ucDAC1_BG_Adjustment;
852 dac = dac_info->ucDAC1_DAC_Adjustment;
853 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
854
855 }
856 return p_dac;
857}
858
859struct radeon_encoder_tv_dac *
860radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
861{
862 struct drm_device *dev = encoder->base.dev;
863 struct radeon_device *rdev = dev->dev_private;
864 struct radeon_mode_info *mode_info = &rdev->mode_info;
865 int index = GetIndexIntoMasterTable(DATA, CompassionateData);
866 uint16_t data_offset;
867 struct _COMPASSIONATE_DATA *dac_info;
868 uint8_t frev, crev;
869 uint8_t bg, dac;
870 int i;
871 struct radeon_encoder_tv_dac *tv_dac = NULL;
872
873 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
874
875 dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
876
877 if (dac_info) {
878 tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
879
880 if (!tv_dac)
881 return NULL;
882
883 bg = dac_info->ucDAC2_CRT2_BG_Adjustment;
884 dac = dac_info->ucDAC2_CRT2_DAC_Adjustment;
885 tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
886
887 bg = dac_info->ucDAC2_PAL_BG_Adjustment;
888 dac = dac_info->ucDAC2_PAL_DAC_Adjustment;
889 tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
890
891 bg = dac_info->ucDAC2_NTSC_BG_Adjustment;
892 dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
893 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
894
895 }
896 return tv_dac;
897}
898
899void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
900{
901 DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
902 int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
903
904 args.ucEnable = enable;
905
906 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
907}
908
909void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable)
910{
911 ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION args;
912 int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt);
913
914 args.ucEnable = enable;
915
916 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
917}
918
919void radeon_atom_set_engine_clock(struct radeon_device *rdev,
920 uint32_t eng_clock)
921{
922 SET_ENGINE_CLOCK_PS_ALLOCATION args;
923 int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
924
925 args.ulTargetEngineClock = eng_clock; /* 10 khz */
926
927 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
928}
929
930void radeon_atom_set_memory_clock(struct radeon_device *rdev,
931 uint32_t mem_clock)
932{
933 SET_MEMORY_CLOCK_PS_ALLOCATION args;
934 int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock);
935
936 if (rdev->flags & RADEON_IS_IGP)
937 return;
938
939 args.ulTargetMemoryClock = mem_clock; /* 10 khz */
940
941 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
942}
943
944void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
945{
946 struct radeon_device *rdev = dev->dev_private;
947 uint32_t bios_2_scratch, bios_6_scratch;
948
949 if (rdev->family >= CHIP_R600) {
950 bios_2_scratch = RREG32(R600_BIOS_0_SCRATCH);
951 bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
952 } else {
953 bios_2_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
954 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
955 }
956
957 /* let the bios control the backlight */
958 bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
959
960 /* tell the bios not to handle mode switching */
961 bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE);
962
963 if (rdev->family >= CHIP_R600) {
964 WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
965 WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
966 } else {
967 WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
968 WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
969 }
970
971}
972
973void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock)
974{
975 struct drm_device *dev = encoder->dev;
976 struct radeon_device *rdev = dev->dev_private;
977 uint32_t bios_6_scratch;
978
979 if (rdev->family >= CHIP_R600)
980 bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
981 else
982 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
983
984 if (lock)
985 bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
986 else
987 bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
988
989 if (rdev->family >= CHIP_R600)
990 WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
991 else
992 WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
993}
994
995/* at some point we may want to break this out into individual functions */
996void
997radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
998 struct drm_encoder *encoder,
999 bool connected)
1000{
1001 struct drm_device *dev = connector->dev;
1002 struct radeon_device *rdev = dev->dev_private;
1003 struct radeon_connector *radeon_connector =
1004 to_radeon_connector(connector);
1005 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1006 uint32_t bios_0_scratch, bios_3_scratch, bios_6_scratch;
1007
1008 if (rdev->family >= CHIP_R600) {
1009 bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
1010 bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
1011 bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH);
1012 } else {
1013 bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
1014 bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
1015 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
1016 }
1017
1018 if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
1019 (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
1020 if (connected) {
1021 DRM_DEBUG("TV1 connected\n");
1022 bios_3_scratch |= ATOM_S3_TV1_ACTIVE;
1023 bios_6_scratch |= ATOM_S6_ACC_REQ_TV1;
1024 } else {
1025 DRM_DEBUG("TV1 disconnected\n");
1026 bios_0_scratch &= ~ATOM_S0_TV1_MASK;
1027 bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE;
1028 bios_6_scratch &= ~ATOM_S6_ACC_REQ_TV1;
1029 }
1030 }
1031 if ((radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) &&
1032 (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT)) {
1033 if (connected) {
1034 DRM_DEBUG("CV connected\n");
1035 bios_3_scratch |= ATOM_S3_CV_ACTIVE;
1036 bios_6_scratch |= ATOM_S6_ACC_REQ_CV;
1037 } else {
1038 DRM_DEBUG("CV disconnected\n");
1039 bios_0_scratch &= ~ATOM_S0_CV_MASK;
1040 bios_3_scratch &= ~ATOM_S3_CV_ACTIVE;
1041 bios_6_scratch &= ~ATOM_S6_ACC_REQ_CV;
1042 }
1043 }
1044 if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
1045 (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
1046 if (connected) {
1047 DRM_DEBUG("LCD1 connected\n");
1048 bios_0_scratch |= ATOM_S0_LCD1;
1049 bios_3_scratch |= ATOM_S3_LCD1_ACTIVE;
1050 bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1;
1051 } else {
1052 DRM_DEBUG("LCD1 disconnected\n");
1053 bios_0_scratch &= ~ATOM_S0_LCD1;
1054 bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE;
1055 bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1;
1056 }
1057 }
1058 if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
1059 (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
1060 if (connected) {
1061 DRM_DEBUG("CRT1 connected\n");
1062 bios_0_scratch |= ATOM_S0_CRT1_COLOR;
1063 bios_3_scratch |= ATOM_S3_CRT1_ACTIVE;
1064 bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1;
1065 } else {
1066 DRM_DEBUG("CRT1 disconnected\n");
1067 bios_0_scratch &= ~ATOM_S0_CRT1_MASK;
1068 bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE;
1069 bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1;
1070 }
1071 }
1072 if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
1073 (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
1074 if (connected) {
1075 DRM_DEBUG("CRT2 connected\n");
1076 bios_0_scratch |= ATOM_S0_CRT2_COLOR;
1077 bios_3_scratch |= ATOM_S3_CRT2_ACTIVE;
1078 bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2;
1079 } else {
1080 DRM_DEBUG("CRT2 disconnected\n");
1081 bios_0_scratch &= ~ATOM_S0_CRT2_MASK;
1082 bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE;
1083 bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2;
1084 }
1085 }
1086 if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
1087 (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
1088 if (connected) {
1089 DRM_DEBUG("DFP1 connected\n");
1090 bios_0_scratch |= ATOM_S0_DFP1;
1091 bios_3_scratch |= ATOM_S3_DFP1_ACTIVE;
1092 bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1;
1093 } else {
1094 DRM_DEBUG("DFP1 disconnected\n");
1095 bios_0_scratch &= ~ATOM_S0_DFP1;
1096 bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE;
1097 bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1;
1098 }
1099 }
1100 if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
1101 (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
1102 if (connected) {
1103 DRM_DEBUG("DFP2 connected\n");
1104 bios_0_scratch |= ATOM_S0_DFP2;
1105 bios_3_scratch |= ATOM_S3_DFP2_ACTIVE;
1106 bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2;
1107 } else {
1108 DRM_DEBUG("DFP2 disconnected\n");
1109 bios_0_scratch &= ~ATOM_S0_DFP2;
1110 bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE;
1111 bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2;
1112 }
1113 }
1114 if ((radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) &&
1115 (radeon_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) {
1116 if (connected) {
1117 DRM_DEBUG("DFP3 connected\n");
1118 bios_0_scratch |= ATOM_S0_DFP3;
1119 bios_3_scratch |= ATOM_S3_DFP3_ACTIVE;
1120 bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3;
1121 } else {
1122 DRM_DEBUG("DFP3 disconnected\n");
1123 bios_0_scratch &= ~ATOM_S0_DFP3;
1124 bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE;
1125 bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3;
1126 }
1127 }
1128 if ((radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) &&
1129 (radeon_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) {
1130 if (connected) {
1131 DRM_DEBUG("DFP4 connected\n");
1132 bios_0_scratch |= ATOM_S0_DFP4;
1133 bios_3_scratch |= ATOM_S3_DFP4_ACTIVE;
1134 bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4;
1135 } else {
1136 DRM_DEBUG("DFP4 disconnected\n");
1137 bios_0_scratch &= ~ATOM_S0_DFP4;
1138 bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE;
1139 bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4;
1140 }
1141 }
1142 if ((radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) &&
1143 (radeon_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) {
1144 if (connected) {
1145 DRM_DEBUG("DFP5 connected\n");
1146 bios_0_scratch |= ATOM_S0_DFP5;
1147 bios_3_scratch |= ATOM_S3_DFP5_ACTIVE;
1148 bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5;
1149 } else {
1150 DRM_DEBUG("DFP5 disconnected\n");
1151 bios_0_scratch &= ~ATOM_S0_DFP5;
1152 bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE;
1153 bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
1154 }
1155 }
1156
1157 if (rdev->family >= CHIP_R600) {
1158 WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch);
1159 WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
1160 WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
1161 } else {
1162 WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
1163 WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
1164 WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
1165 }
1166}
1167
1168void
1169radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
1170{
1171 struct drm_device *dev = encoder->dev;
1172 struct radeon_device *rdev = dev->dev_private;
1173 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1174 uint32_t bios_3_scratch;
1175
1176 if (rdev->family >= CHIP_R600)
1177 bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
1178 else
1179 bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH);
1180
1181 if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
1182 bios_3_scratch &= ~ATOM_S3_TV1_CRTC_ACTIVE;
1183 bios_3_scratch |= (crtc << 18);
1184 }
1185 if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
1186 bios_3_scratch &= ~ATOM_S3_CV_CRTC_ACTIVE;
1187 bios_3_scratch |= (crtc << 24);
1188 }
1189 if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
1190 bios_3_scratch &= ~ATOM_S3_CRT1_CRTC_ACTIVE;
1191 bios_3_scratch |= (crtc << 16);
1192 }
1193 if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
1194 bios_3_scratch &= ~ATOM_S3_CRT2_CRTC_ACTIVE;
1195 bios_3_scratch |= (crtc << 20);
1196 }
1197 if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
1198 bios_3_scratch &= ~ATOM_S3_LCD1_CRTC_ACTIVE;
1199 bios_3_scratch |= (crtc << 17);
1200 }
1201 if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
1202 bios_3_scratch &= ~ATOM_S3_DFP1_CRTC_ACTIVE;
1203 bios_3_scratch |= (crtc << 19);
1204 }
1205 if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
1206 bios_3_scratch &= ~ATOM_S3_DFP2_CRTC_ACTIVE;
1207 bios_3_scratch |= (crtc << 23);
1208 }
1209 if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
1210 bios_3_scratch &= ~ATOM_S3_DFP3_CRTC_ACTIVE;
1211 bios_3_scratch |= (crtc << 25);
1212 }
1213
1214 if (rdev->family >= CHIP_R600)
1215 WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch);
1216 else
1217 WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch);
1218}
1219
1220void
1221radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
1222{
1223 struct drm_device *dev = encoder->dev;
1224 struct radeon_device *rdev = dev->dev_private;
1225 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1226 uint32_t bios_2_scratch;
1227
1228 if (rdev->family >= CHIP_R600)
1229 bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
1230 else
1231 bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
1232
1233 if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
1234 if (on)
1235 bios_2_scratch &= ~ATOM_S2_TV1_DPMS_STATE;
1236 else
1237 bios_2_scratch |= ATOM_S2_TV1_DPMS_STATE;
1238 }
1239 if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
1240 if (on)
1241 bios_2_scratch &= ~ATOM_S2_CV_DPMS_STATE;
1242 else
1243 bios_2_scratch |= ATOM_S2_CV_DPMS_STATE;
1244 }
1245 if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
1246 if (on)
1247 bios_2_scratch &= ~ATOM_S2_CRT1_DPMS_STATE;
1248 else
1249 bios_2_scratch |= ATOM_S2_CRT1_DPMS_STATE;
1250 }
1251 if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
1252 if (on)
1253 bios_2_scratch &= ~ATOM_S2_CRT2_DPMS_STATE;
1254 else
1255 bios_2_scratch |= ATOM_S2_CRT2_DPMS_STATE;
1256 }
1257 if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
1258 if (on)
1259 bios_2_scratch &= ~ATOM_S2_LCD1_DPMS_STATE;
1260 else
1261 bios_2_scratch |= ATOM_S2_LCD1_DPMS_STATE;
1262 }
1263 if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
1264 if (on)
1265 bios_2_scratch &= ~ATOM_S2_DFP1_DPMS_STATE;
1266 else
1267 bios_2_scratch |= ATOM_S2_DFP1_DPMS_STATE;
1268 }
1269 if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
1270 if (on)
1271 bios_2_scratch &= ~ATOM_S2_DFP2_DPMS_STATE;
1272 else
1273 bios_2_scratch |= ATOM_S2_DFP2_DPMS_STATE;
1274 }
1275 if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) {
1276 if (on)
1277 bios_2_scratch &= ~ATOM_S2_DFP3_DPMS_STATE;
1278 else
1279 bios_2_scratch |= ATOM_S2_DFP3_DPMS_STATE;
1280 }
1281 if (radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) {
1282 if (on)
1283 bios_2_scratch &= ~ATOM_S2_DFP4_DPMS_STATE;
1284 else
1285 bios_2_scratch |= ATOM_S2_DFP4_DPMS_STATE;
1286 }
1287 if (radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) {
1288 if (on)
1289 bios_2_scratch &= ~ATOM_S2_DFP5_DPMS_STATE;
1290 else
1291 bios_2_scratch |= ATOM_S2_DFP5_DPMS_STATE;
1292 }
1293
1294 if (rdev->family >= CHIP_R600)
1295 WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
1296 else
1297 WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
1298}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
new file mode 100644
index 000000000000..c44403a2ca76
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -0,0 +1,133 @@
1/*
2 * Copyright 2009 Jerome Glisse.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Jerome Glisse
23 */
24#include <drm/drmP.h>
25#include <drm/radeon_drm.h>
26#include "radeon_reg.h"
27#include "radeon.h"
28
29void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
30 unsigned sdomain, unsigned ddomain)
31{
32 struct radeon_object *dobj = NULL;
33 struct radeon_object *sobj = NULL;
34 struct radeon_fence *fence = NULL;
35 uint64_t saddr, daddr;
36 unsigned long start_jiffies;
37 unsigned long end_jiffies;
38 unsigned long time;
39 unsigned i, n, size;
40 int r;
41
42 size = bsize;
43 n = 1024;
44 r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj);
45 if (r) {
46 goto out_cleanup;
47 }
48 r = radeon_object_pin(sobj, sdomain, &saddr);
49 if (r) {
50 goto out_cleanup;
51 }
52 r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj);
53 if (r) {
54 goto out_cleanup;
55 }
56 r = radeon_object_pin(dobj, ddomain, &daddr);
57 if (r) {
58 goto out_cleanup;
59 }
60 start_jiffies = jiffies;
61 for (i = 0; i < n; i++) {
62 r = radeon_fence_create(rdev, &fence);
63 if (r) {
64 goto out_cleanup;
65 }
66 r = radeon_copy_dma(rdev, saddr, daddr, size >> 14, fence);
67 if (r) {
68 goto out_cleanup;
69 }
70 r = radeon_fence_wait(fence, false);
71 if (r) {
72 goto out_cleanup;
73 }
74 radeon_fence_unref(&fence);
75 }
76 end_jiffies = jiffies;
77 time = end_jiffies - start_jiffies;
78 time = jiffies_to_msecs(time);
79 if (time > 0) {
80 i = ((n * size) >> 10) / time;
81 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
82 " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
83 sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
84 }
85 start_jiffies = jiffies;
86 for (i = 0; i < n; i++) {
87 r = radeon_fence_create(rdev, &fence);
88 if (r) {
89 goto out_cleanup;
90 }
91 r = radeon_copy_blit(rdev, saddr, daddr, size >> 14, fence);
92 if (r) {
93 goto out_cleanup;
94 }
95 r = radeon_fence_wait(fence, false);
96 if (r) {
97 goto out_cleanup;
98 }
99 radeon_fence_unref(&fence);
100 }
101 end_jiffies = jiffies;
102 time = end_jiffies - start_jiffies;
103 time = jiffies_to_msecs(time);
104 if (time > 0) {
105 i = ((n * size) >> 10) / time;
106 printk(KERN_INFO "radeon: blit %u bo moves of %ukb from %d to %d"
107 " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
108 sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
109 }
110out_cleanup:
111 if (sobj) {
112 radeon_object_unpin(sobj);
113 radeon_object_unref(&sobj);
114 }
115 if (dobj) {
116 radeon_object_unpin(dobj);
117 radeon_object_unref(&dobj);
118 }
119 if (fence) {
120 radeon_fence_unref(&fence);
121 }
122 if (r) {
123 printk(KERN_WARNING "Error while benchmarking BO move.\n");
124 }
125}
126
127void radeon_benchmark(struct radeon_device *rdev)
128{
129 radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT,
130 RADEON_GEM_DOMAIN_VRAM);
131 radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
132 RADEON_GEM_DOMAIN_GTT);
133}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
new file mode 100644
index 000000000000..96e37a6e7ce4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -0,0 +1,390 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31#include "atom.h"
32
33/*
34 * BIOS.
35 */
36static bool radeon_read_bios(struct radeon_device *rdev)
37{
38 uint8_t __iomem *bios;
39 size_t size;
40
41 rdev->bios = NULL;
42 bios = pci_map_rom(rdev->pdev, &size);
43 if (!bios) {
44 return false;
45 }
46
47 if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
48 pci_unmap_rom(rdev->pdev, bios);
49 return false;
50 }
51 rdev->bios = kmalloc(size, GFP_KERNEL);
52 if (rdev->bios == NULL) {
53 pci_unmap_rom(rdev->pdev, bios);
54 return false;
55 }
56 memcpy(rdev->bios, bios, size);
57 pci_unmap_rom(rdev->pdev, bios);
58 return true;
59}
60
61static bool r700_read_disabled_bios(struct radeon_device *rdev)
62{
63 uint32_t viph_control;
64 uint32_t bus_cntl;
65 uint32_t d1vga_control;
66 uint32_t d2vga_control;
67 uint32_t vga_render_control;
68 uint32_t rom_cntl;
69 uint32_t cg_spll_func_cntl = 0;
70 uint32_t cg_spll_status;
71 bool r;
72
73 viph_control = RREG32(RADEON_VIPH_CONTROL);
74 bus_cntl = RREG32(RADEON_BUS_CNTL);
75 d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
76 d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
77 vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
78 rom_cntl = RREG32(R600_ROM_CNTL);
79
80 /* disable VIP */
81 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
82 /* enable the rom */
83 WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
84 /* Disable VGA mode */
85 WREG32(AVIVO_D1VGA_CONTROL,
86 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
87 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
88 WREG32(AVIVO_D2VGA_CONTROL,
89 (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
90 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
91 WREG32(AVIVO_VGA_RENDER_CONTROL,
92 (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
93
94 if (rdev->family == CHIP_RV730) {
95 cg_spll_func_cntl = RREG32(R600_CG_SPLL_FUNC_CNTL);
96
97 /* enable bypass mode */
98 WREG32(R600_CG_SPLL_FUNC_CNTL, (cg_spll_func_cntl |
99 R600_SPLL_BYPASS_EN));
100
101 /* wait for SPLL_CHG_STATUS to change to 1 */
102 cg_spll_status = 0;
103 while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
104 cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
105
106 WREG32(R600_ROM_CNTL, (rom_cntl & ~R600_SCK_OVERWRITE));
107 } else
108 WREG32(R600_ROM_CNTL, (rom_cntl | R600_SCK_OVERWRITE));
109
110 r = radeon_read_bios(rdev);
111
112 /* restore regs */
113 if (rdev->family == CHIP_RV730) {
114 WREG32(R600_CG_SPLL_FUNC_CNTL, cg_spll_func_cntl);
115
116 /* wait for SPLL_CHG_STATUS to change to 1 */
117 cg_spll_status = 0;
118 while (!(cg_spll_status & R600_SPLL_CHG_STATUS))
119 cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
120 }
121 WREG32(RADEON_VIPH_CONTROL, viph_control);
122 WREG32(RADEON_BUS_CNTL, bus_cntl);
123 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
124 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
125 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
126 WREG32(R600_ROM_CNTL, rom_cntl);
127 return r;
128}
129
130static bool r600_read_disabled_bios(struct radeon_device *rdev)
131{
132 uint32_t viph_control;
133 uint32_t bus_cntl;
134 uint32_t d1vga_control;
135 uint32_t d2vga_control;
136 uint32_t vga_render_control;
137 uint32_t rom_cntl;
138 uint32_t general_pwrmgt;
139 uint32_t low_vid_lower_gpio_cntl;
140 uint32_t medium_vid_lower_gpio_cntl;
141 uint32_t high_vid_lower_gpio_cntl;
142 uint32_t ctxsw_vid_lower_gpio_cntl;
143 uint32_t lower_gpio_enable;
144 bool r;
145
146 viph_control = RREG32(RADEON_VIPH_CONTROL);
147 bus_cntl = RREG32(RADEON_BUS_CNTL);
148 d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
149 d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
150 vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
151 rom_cntl = RREG32(R600_ROM_CNTL);
152 general_pwrmgt = RREG32(R600_GENERAL_PWRMGT);
153 low_vid_lower_gpio_cntl = RREG32(R600_LOW_VID_LOWER_GPIO_CNTL);
154 medium_vid_lower_gpio_cntl = RREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL);
155 high_vid_lower_gpio_cntl = RREG32(R600_HIGH_VID_LOWER_GPIO_CNTL);
156 ctxsw_vid_lower_gpio_cntl = RREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL);
157 lower_gpio_enable = RREG32(R600_LOWER_GPIO_ENABLE);
158
159 /* disable VIP */
160 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
161 /* enable the rom */
162 WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
163 /* Disable VGA mode */
164 WREG32(AVIVO_D1VGA_CONTROL,
165 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
166 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
167 WREG32(AVIVO_D2VGA_CONTROL,
168 (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
169 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
170 WREG32(AVIVO_VGA_RENDER_CONTROL,
171 (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
172
173 WREG32(R600_ROM_CNTL,
174 ((rom_cntl & ~R600_SCK_PRESCALE_CRYSTAL_CLK_MASK) |
175 (1 << R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT) |
176 R600_SCK_OVERWRITE));
177
178 WREG32(R600_GENERAL_PWRMGT, (general_pwrmgt & ~R600_OPEN_DRAIN_PADS));
179 WREG32(R600_LOW_VID_LOWER_GPIO_CNTL,
180 (low_vid_lower_gpio_cntl & ~0x400));
181 WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL,
182 (medium_vid_lower_gpio_cntl & ~0x400));
183 WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL,
184 (high_vid_lower_gpio_cntl & ~0x400));
185 WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL,
186 (ctxsw_vid_lower_gpio_cntl & ~0x400));
187 WREG32(R600_LOWER_GPIO_ENABLE, (lower_gpio_enable | 0x400));
188
189 r = radeon_read_bios(rdev);
190
191 /* restore regs */
192 WREG32(RADEON_VIPH_CONTROL, viph_control);
193 WREG32(RADEON_BUS_CNTL, bus_cntl);
194 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
195 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
196 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
197 WREG32(R600_ROM_CNTL, rom_cntl);
198 WREG32(R600_GENERAL_PWRMGT, general_pwrmgt);
199 WREG32(R600_LOW_VID_LOWER_GPIO_CNTL, low_vid_lower_gpio_cntl);
200 WREG32(R600_MEDIUM_VID_LOWER_GPIO_CNTL, medium_vid_lower_gpio_cntl);
201 WREG32(R600_HIGH_VID_LOWER_GPIO_CNTL, high_vid_lower_gpio_cntl);
202 WREG32(R600_CTXSW_VID_LOWER_GPIO_CNTL, ctxsw_vid_lower_gpio_cntl);
203 WREG32(R600_LOWER_GPIO_ENABLE, lower_gpio_enable);
204 return r;
205}
206
207static bool avivo_read_disabled_bios(struct radeon_device *rdev)
208{
209 uint32_t seprom_cntl1;
210 uint32_t viph_control;
211 uint32_t bus_cntl;
212 uint32_t d1vga_control;
213 uint32_t d2vga_control;
214 uint32_t vga_render_control;
215 uint32_t gpiopad_a;
216 uint32_t gpiopad_en;
217 uint32_t gpiopad_mask;
218 bool r;
219
220 seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
221 viph_control = RREG32(RADEON_VIPH_CONTROL);
222 bus_cntl = RREG32(RADEON_BUS_CNTL);
223 d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
224 d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
225 vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
226 gpiopad_a = RREG32(RADEON_GPIOPAD_A);
227 gpiopad_en = RREG32(RADEON_GPIOPAD_EN);
228 gpiopad_mask = RREG32(RADEON_GPIOPAD_MASK);
229
230 WREG32(RADEON_SEPROM_CNTL1,
231 ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
232 (0xc << RADEON_SCK_PRESCALE_SHIFT)));
233 WREG32(RADEON_GPIOPAD_A, 0);
234 WREG32(RADEON_GPIOPAD_EN, 0);
235 WREG32(RADEON_GPIOPAD_MASK, 0);
236
237 /* disable VIP */
238 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
239
240 /* enable the rom */
241 WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
242
243 /* Disable VGA mode */
244 WREG32(AVIVO_D1VGA_CONTROL,
245 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
246 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
247 WREG32(AVIVO_D2VGA_CONTROL,
248 (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
249 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
250 WREG32(AVIVO_VGA_RENDER_CONTROL,
251 (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
252
253 r = radeon_read_bios(rdev);
254
255 /* restore regs */
256 WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
257 WREG32(RADEON_VIPH_CONTROL, viph_control);
258 WREG32(RADEON_BUS_CNTL, bus_cntl);
259 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
260 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
261 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
262 WREG32(RADEON_GPIOPAD_A, gpiopad_a);
263 WREG32(RADEON_GPIOPAD_EN, gpiopad_en);
264 WREG32(RADEON_GPIOPAD_MASK, gpiopad_mask);
265 return r;
266}
267
268static bool legacy_read_disabled_bios(struct radeon_device *rdev)
269{
270 uint32_t seprom_cntl1;
271 uint32_t viph_control;
272 uint32_t bus_cntl;
273 uint32_t crtc_gen_cntl;
274 uint32_t crtc2_gen_cntl;
275 uint32_t crtc_ext_cntl;
276 uint32_t fp2_gen_cntl;
277 bool r;
278
279 seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
280 viph_control = RREG32(RADEON_VIPH_CONTROL);
281 bus_cntl = RREG32(RADEON_BUS_CNTL);
282 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
283 crtc2_gen_cntl = 0;
284 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
285 fp2_gen_cntl = 0;
286
287 if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
288 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
289 }
290
291 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
292 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
293 }
294
295 WREG32(RADEON_SEPROM_CNTL1,
296 ((seprom_cntl1 & ~RADEON_SCK_PRESCALE_MASK) |
297 (0xc << RADEON_SCK_PRESCALE_SHIFT)));
298
299 /* disable VIP */
300 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
301
302 /* enable the rom */
303 WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
304
305 /* Turn off mem requests and CRTC for both controllers */
306 WREG32(RADEON_CRTC_GEN_CNTL,
307 ((crtc_gen_cntl & ~RADEON_CRTC_EN) |
308 (RADEON_CRTC_DISP_REQ_EN_B |
309 RADEON_CRTC_EXT_DISP_EN)));
310 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
311 WREG32(RADEON_CRTC2_GEN_CNTL,
312 ((crtc2_gen_cntl & ~RADEON_CRTC2_EN) |
313 RADEON_CRTC2_DISP_REQ_EN_B));
314 }
315 /* Turn off CRTC */
316 WREG32(RADEON_CRTC_EXT_CNTL,
317 ((crtc_ext_cntl & ~RADEON_CRTC_CRT_ON) |
318 (RADEON_CRTC_SYNC_TRISTAT |
319 RADEON_CRTC_DISPLAY_DIS)));
320
321 if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
322 WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
323 }
324
325 r = radeon_read_bios(rdev);
326
327 /* restore regs */
328 WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
329 WREG32(RADEON_VIPH_CONTROL, viph_control);
330 WREG32(RADEON_BUS_CNTL, bus_cntl);
331 WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
332 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
333 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
334 }
335 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
336 if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
337 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
338 }
339 return r;
340}
341
342static bool radeon_read_disabled_bios(struct radeon_device *rdev)
343{
344 if (rdev->family >= CHIP_RV770)
345 return r700_read_disabled_bios(rdev);
346 else if (rdev->family >= CHIP_R600)
347 return r600_read_disabled_bios(rdev);
348 else if (rdev->family >= CHIP_RS600)
349 return avivo_read_disabled_bios(rdev);
350 else
351 return legacy_read_disabled_bios(rdev);
352}
353
354bool radeon_get_bios(struct radeon_device *rdev)
355{
356 bool r;
357 uint16_t tmp;
358
359 r = radeon_read_bios(rdev);
360 if (r == false) {
361 r = radeon_read_disabled_bios(rdev);
362 }
363 if (r == false || rdev->bios == NULL) {
364 DRM_ERROR("Unable to locate a BIOS ROM\n");
365 rdev->bios = NULL;
366 return false;
367 }
368 if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
369 goto free_bios;
370 }
371
372 rdev->bios_header_start = RBIOS16(0x48);
373 if (!rdev->bios_header_start) {
374 goto free_bios;
375 }
376 tmp = rdev->bios_header_start + 4;
377 if (!memcmp(rdev->bios + tmp, "ATOM", 4) ||
378 !memcmp(rdev->bios + tmp, "MOTA", 4)) {
379 rdev->is_atom_bios = true;
380 } else {
381 rdev->is_atom_bios = false;
382 }
383
384 DRM_DEBUG("%sBIOS detected\n", rdev->is_atom_bios ? "ATOM" : "COM");
385 return true;
386free_bios:
387 kfree(rdev->bios);
388 rdev->bios = NULL;
389 return false;
390}
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
new file mode 100644
index 000000000000..a37cbce53181
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -0,0 +1,833 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_drm.h"
30#include "radeon_reg.h"
31#include "radeon.h"
32#include "atom.h"
33
34/* 10 khz */
35static uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
36{
37 struct radeon_pll *spll = &rdev->clock.spll;
38 uint32_t fb_div, ref_div, post_div, sclk;
39
40 fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
41 fb_div = (fb_div >> RADEON_SPLL_FB_DIV_SHIFT) & RADEON_SPLL_FB_DIV_MASK;
42 fb_div <<= 1;
43 fb_div *= spll->reference_freq;
44
45 ref_div =
46 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
47 sclk = fb_div / ref_div;
48
49 post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
50 if (post_div == 2)
51 sclk >>= 1;
52 else if (post_div == 3)
53 sclk >>= 2;
54 else if (post_div == 4)
55 sclk >>= 4;
56
57 return sclk;
58}
59
60/* 10 khz */
61static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
62{
63 struct radeon_pll *mpll = &rdev->clock.mpll;
64 uint32_t fb_div, ref_div, post_div, mclk;
65
66 fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
67 fb_div = (fb_div >> RADEON_MPLL_FB_DIV_SHIFT) & RADEON_MPLL_FB_DIV_MASK;
68 fb_div <<= 1;
69 fb_div *= mpll->reference_freq;
70
71 ref_div =
72 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
73 mclk = fb_div / ref_div;
74
75 post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
76 if (post_div == 2)
77 mclk >>= 1;
78 else if (post_div == 3)
79 mclk >>= 2;
80 else if (post_div == 4)
81 mclk >>= 4;
82
83 return mclk;
84}
85
86void radeon_get_clock_info(struct drm_device *dev)
87{
88 struct radeon_device *rdev = dev->dev_private;
89 struct radeon_pll *p1pll = &rdev->clock.p1pll;
90 struct radeon_pll *p2pll = &rdev->clock.p2pll;
91 struct radeon_pll *spll = &rdev->clock.spll;
92 struct radeon_pll *mpll = &rdev->clock.mpll;
93 int ret;
94
95 if (rdev->is_atom_bios)
96 ret = radeon_atom_get_clock_info(dev);
97 else
98 ret = radeon_combios_get_clock_info(dev);
99
100 if (ret) {
101 if (p1pll->reference_div < 2)
102 p1pll->reference_div = 12;
103 if (p2pll->reference_div < 2)
104 p2pll->reference_div = 12;
105 if (spll->reference_div < 2)
106 spll->reference_div =
107 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
108 RADEON_M_SPLL_REF_DIV_MASK;
109 if (mpll->reference_div < 2)
110 mpll->reference_div = spll->reference_div;
111 } else {
112 if (ASIC_IS_AVIVO(rdev)) {
113 /* TODO FALLBACK */
114 } else {
115 DRM_INFO("Using generic clock info\n");
116
117 if (rdev->flags & RADEON_IS_IGP) {
118 p1pll->reference_freq = 1432;
119 p2pll->reference_freq = 1432;
120 spll->reference_freq = 1432;
121 mpll->reference_freq = 1432;
122 } else {
123 p1pll->reference_freq = 2700;
124 p2pll->reference_freq = 2700;
125 spll->reference_freq = 2700;
126 mpll->reference_freq = 2700;
127 }
128 p1pll->reference_div =
129 RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
130 if (p1pll->reference_div < 2)
131 p1pll->reference_div = 12;
132 p2pll->reference_div = p1pll->reference_div;
133
134 if (rdev->family >= CHIP_R420) {
135 p1pll->pll_in_min = 100;
136 p1pll->pll_in_max = 1350;
137 p1pll->pll_out_min = 20000;
138 p1pll->pll_out_max = 50000;
139 p2pll->pll_in_min = 100;
140 p2pll->pll_in_max = 1350;
141 p2pll->pll_out_min = 20000;
142 p2pll->pll_out_max = 50000;
143 } else {
144 p1pll->pll_in_min = 40;
145 p1pll->pll_in_max = 500;
146 p1pll->pll_out_min = 12500;
147 p1pll->pll_out_max = 35000;
148 p2pll->pll_in_min = 40;
149 p2pll->pll_in_max = 500;
150 p2pll->pll_out_min = 12500;
151 p2pll->pll_out_max = 35000;
152 }
153
154 spll->reference_div =
155 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
156 RADEON_M_SPLL_REF_DIV_MASK;
157 mpll->reference_div = spll->reference_div;
158 rdev->clock.default_sclk =
159 radeon_legacy_get_engine_clock(rdev);
160 rdev->clock.default_mclk =
161 radeon_legacy_get_memory_clock(rdev);
162 }
163 }
164
165 /* pixel clocks */
166 if (ASIC_IS_AVIVO(rdev)) {
167 p1pll->min_post_div = 2;
168 p1pll->max_post_div = 0x7f;
169 p1pll->min_frac_feedback_div = 0;
170 p1pll->max_frac_feedback_div = 9;
171 p2pll->min_post_div = 2;
172 p2pll->max_post_div = 0x7f;
173 p2pll->min_frac_feedback_div = 0;
174 p2pll->max_frac_feedback_div = 9;
175 } else {
176 p1pll->min_post_div = 1;
177 p1pll->max_post_div = 16;
178 p1pll->min_frac_feedback_div = 0;
179 p1pll->max_frac_feedback_div = 0;
180 p2pll->min_post_div = 1;
181 p2pll->max_post_div = 12;
182 p2pll->min_frac_feedback_div = 0;
183 p2pll->max_frac_feedback_div = 0;
184 }
185
186 p1pll->min_ref_div = 2;
187 p1pll->max_ref_div = 0x3ff;
188 p1pll->min_feedback_div = 4;
189 p1pll->max_feedback_div = 0x7ff;
190 p1pll->best_vco = 0;
191
192 p2pll->min_ref_div = 2;
193 p2pll->max_ref_div = 0x3ff;
194 p2pll->min_feedback_div = 4;
195 p2pll->max_feedback_div = 0x7ff;
196 p2pll->best_vco = 0;
197
198 /* system clock */
199 spll->min_post_div = 1;
200 spll->max_post_div = 1;
201 spll->min_ref_div = 2;
202 spll->max_ref_div = 0xff;
203 spll->min_feedback_div = 4;
204 spll->max_feedback_div = 0xff;
205 spll->best_vco = 0;
206
207 /* memory clock */
208 mpll->min_post_div = 1;
209 mpll->max_post_div = 1;
210 mpll->min_ref_div = 2;
211 mpll->max_ref_div = 0xff;
212 mpll->min_feedback_div = 4;
213 mpll->max_feedback_div = 0xff;
214 mpll->best_vco = 0;
215
216}
217
218/* 10 khz */
219static uint32_t calc_eng_mem_clock(struct radeon_device *rdev,
220 uint32_t req_clock,
221 int *fb_div, int *post_div)
222{
223 struct radeon_pll *spll = &rdev->clock.spll;
224 int ref_div = spll->reference_div;
225
226 if (!ref_div)
227 ref_div =
228 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
229 RADEON_M_SPLL_REF_DIV_MASK;
230
231 if (req_clock < 15000) {
232 *post_div = 8;
233 req_clock *= 8;
234 } else if (req_clock < 30000) {
235 *post_div = 4;
236 req_clock *= 4;
237 } else if (req_clock < 60000) {
238 *post_div = 2;
239 req_clock *= 2;
240 } else
241 *post_div = 1;
242
243 req_clock *= ref_div;
244 req_clock += spll->reference_freq;
245 req_clock /= (2 * spll->reference_freq);
246
247 *fb_div = req_clock & 0xff;
248
249 req_clock = (req_clock & 0xffff) << 1;
250 req_clock *= spll->reference_freq;
251 req_clock /= ref_div;
252 req_clock /= *post_div;
253
254 return req_clock;
255}
256
257/* 10 khz */
258void radeon_legacy_set_engine_clock(struct radeon_device *rdev,
259 uint32_t eng_clock)
260{
261 uint32_t tmp;
262 int fb_div, post_div;
263
264 /* XXX: wait for idle */
265
266 eng_clock = calc_eng_mem_clock(rdev, eng_clock, &fb_div, &post_div);
267
268 tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
269 tmp &= ~RADEON_DONT_USE_XTALIN;
270 WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
271
272 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
273 tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
274 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
275
276 udelay(10);
277
278 tmp = RREG32_PLL(RADEON_SPLL_CNTL);
279 tmp |= RADEON_SPLL_SLEEP;
280 WREG32_PLL(RADEON_SPLL_CNTL, tmp);
281
282 udelay(2);
283
284 tmp = RREG32_PLL(RADEON_SPLL_CNTL);
285 tmp |= RADEON_SPLL_RESET;
286 WREG32_PLL(RADEON_SPLL_CNTL, tmp);
287
288 udelay(200);
289
290 tmp = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
291 tmp &= ~(RADEON_SPLL_FB_DIV_MASK << RADEON_SPLL_FB_DIV_SHIFT);
292 tmp |= (fb_div & RADEON_SPLL_FB_DIV_MASK) << RADEON_SPLL_FB_DIV_SHIFT;
293 WREG32_PLL(RADEON_M_SPLL_REF_FB_DIV, tmp);
294
295 /* XXX: verify on different asics */
296 tmp = RREG32_PLL(RADEON_SPLL_CNTL);
297 tmp &= ~RADEON_SPLL_PVG_MASK;
298 if ((eng_clock * post_div) >= 90000)
299 tmp |= (0x7 << RADEON_SPLL_PVG_SHIFT);
300 else
301 tmp |= (0x4 << RADEON_SPLL_PVG_SHIFT);
302 WREG32_PLL(RADEON_SPLL_CNTL, tmp);
303
304 tmp = RREG32_PLL(RADEON_SPLL_CNTL);
305 tmp &= ~RADEON_SPLL_SLEEP;
306 WREG32_PLL(RADEON_SPLL_CNTL, tmp);
307
308 udelay(2);
309
310 tmp = RREG32_PLL(RADEON_SPLL_CNTL);
311 tmp &= ~RADEON_SPLL_RESET;
312 WREG32_PLL(RADEON_SPLL_CNTL, tmp);
313
314 udelay(200);
315
316 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
317 tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
318 switch (post_div) {
319 case 1:
320 default:
321 tmp |= 1;
322 break;
323 case 2:
324 tmp |= 2;
325 break;
326 case 4:
327 tmp |= 3;
328 break;
329 case 8:
330 tmp |= 4;
331 break;
332 }
333 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
334
335 udelay(20);
336
337 tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
338 tmp |= RADEON_DONT_USE_XTALIN;
339 WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
340
341 udelay(10);
342}
343
344void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
345{
346 uint32_t tmp;
347
348 if (enable) {
349 if (rdev->flags & RADEON_SINGLE_CRTC) {
350 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
351 if ((RREG32(RADEON_CONFIG_CNTL) &
352 RADEON_CFG_ATI_REV_ID_MASK) >
353 RADEON_CFG_ATI_REV_A13) {
354 tmp &=
355 ~(RADEON_SCLK_FORCE_CP |
356 RADEON_SCLK_FORCE_RB);
357 }
358 tmp &=
359 ~(RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1 |
360 RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_SE |
361 RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_RE |
362 RADEON_SCLK_FORCE_PB | RADEON_SCLK_FORCE_TAM |
363 RADEON_SCLK_FORCE_TDM);
364 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
365 } else if (ASIC_IS_R300(rdev)) {
366 if ((rdev->family == CHIP_RS400) ||
367 (rdev->family == CHIP_RS480)) {
368 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
369 tmp &=
370 ~(RADEON_SCLK_FORCE_DISP2 |
371 RADEON_SCLK_FORCE_CP |
372 RADEON_SCLK_FORCE_HDP |
373 RADEON_SCLK_FORCE_DISP1 |
374 RADEON_SCLK_FORCE_TOP |
375 RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
376 | RADEON_SCLK_FORCE_IDCT |
377 RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
378 | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
379 | R300_SCLK_FORCE_US |
380 RADEON_SCLK_FORCE_TV_SCLK |
381 R300_SCLK_FORCE_SU |
382 RADEON_SCLK_FORCE_OV0);
383 tmp |= RADEON_DYN_STOP_LAT_MASK;
384 tmp |=
385 RADEON_SCLK_FORCE_TOP |
386 RADEON_SCLK_FORCE_VIP;
387 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
388
389 tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
390 tmp &= ~RADEON_SCLK_MORE_FORCEON;
391 tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
392 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
393
394 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
395 tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
396 RADEON_PIXCLK_DAC_ALWAYS_ONb);
397 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
398
399 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
400 tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
401 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
402 RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
403 R300_DVOCLK_ALWAYS_ONb |
404 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
405 RADEON_PIXCLK_GV_ALWAYS_ONb |
406 R300_PIXCLK_DVO_ALWAYS_ONb |
407 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
408 RADEON_PIXCLK_TMDS_ALWAYS_ONb |
409 R300_PIXCLK_TRANS_ALWAYS_ONb |
410 R300_PIXCLK_TVO_ALWAYS_ONb |
411 R300_P2G2CLK_ALWAYS_ONb |
412 R300_P2G2CLK_ALWAYS_ONb);
413 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
414 } else if (rdev->family >= CHIP_RV350) {
415 tmp = RREG32_PLL(R300_SCLK_CNTL2);
416 tmp &= ~(R300_SCLK_FORCE_TCL |
417 R300_SCLK_FORCE_GA |
418 R300_SCLK_FORCE_CBA);
419 tmp |= (R300_SCLK_TCL_MAX_DYN_STOP_LAT |
420 R300_SCLK_GA_MAX_DYN_STOP_LAT |
421 R300_SCLK_CBA_MAX_DYN_STOP_LAT);
422 WREG32_PLL(R300_SCLK_CNTL2, tmp);
423
424 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
425 tmp &=
426 ~(RADEON_SCLK_FORCE_DISP2 |
427 RADEON_SCLK_FORCE_CP |
428 RADEON_SCLK_FORCE_HDP |
429 RADEON_SCLK_FORCE_DISP1 |
430 RADEON_SCLK_FORCE_TOP |
431 RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
432 | RADEON_SCLK_FORCE_IDCT |
433 RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
434 | R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
435 | R300_SCLK_FORCE_US |
436 RADEON_SCLK_FORCE_TV_SCLK |
437 R300_SCLK_FORCE_SU |
438 RADEON_SCLK_FORCE_OV0);
439 tmp |= RADEON_DYN_STOP_LAT_MASK;
440 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
441
442 tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
443 tmp &= ~RADEON_SCLK_MORE_FORCEON;
444 tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
445 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
446
447 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
448 tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
449 RADEON_PIXCLK_DAC_ALWAYS_ONb);
450 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
451
452 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
453 tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
454 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
455 RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
456 R300_DVOCLK_ALWAYS_ONb |
457 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
458 RADEON_PIXCLK_GV_ALWAYS_ONb |
459 R300_PIXCLK_DVO_ALWAYS_ONb |
460 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
461 RADEON_PIXCLK_TMDS_ALWAYS_ONb |
462 R300_PIXCLK_TRANS_ALWAYS_ONb |
463 R300_PIXCLK_TVO_ALWAYS_ONb |
464 R300_P2G2CLK_ALWAYS_ONb |
465 R300_P2G2CLK_ALWAYS_ONb);
466 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
467
468 tmp = RREG32_PLL(RADEON_MCLK_MISC);
469 tmp |= (RADEON_MC_MCLK_DYN_ENABLE |
470 RADEON_IO_MCLK_DYN_ENABLE);
471 WREG32_PLL(RADEON_MCLK_MISC, tmp);
472
473 tmp = RREG32_PLL(RADEON_MCLK_CNTL);
474 tmp |= (RADEON_FORCEON_MCLKA |
475 RADEON_FORCEON_MCLKB);
476
477 tmp &= ~(RADEON_FORCEON_YCLKA |
478 RADEON_FORCEON_YCLKB |
479 RADEON_FORCEON_MC);
480
481 /* Some releases of vbios have set DISABLE_MC_MCLKA
482 and DISABLE_MC_MCLKB bits in the vbios table. Setting these
483 bits will cause H/W hang when reading video memory with dynamic clocking
484 enabled. */
485 if ((tmp & R300_DISABLE_MC_MCLKA) &&
486 (tmp & R300_DISABLE_MC_MCLKB)) {
487 /* If both bits are set, then check the active channels */
488 tmp = RREG32_PLL(RADEON_MCLK_CNTL);
489 if (rdev->mc.vram_width == 64) {
490 if (RREG32(RADEON_MEM_CNTL) &
491 R300_MEM_USE_CD_CH_ONLY)
492 tmp &=
493 ~R300_DISABLE_MC_MCLKB;
494 else
495 tmp &=
496 ~R300_DISABLE_MC_MCLKA;
497 } else {
498 tmp &= ~(R300_DISABLE_MC_MCLKA |
499 R300_DISABLE_MC_MCLKB);
500 }
501 }
502
503 WREG32_PLL(RADEON_MCLK_CNTL, tmp);
504 } else {
505 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
506 tmp &= ~(R300_SCLK_FORCE_VAP);
507 tmp |= RADEON_SCLK_FORCE_CP;
508 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
509 udelay(15000);
510
511 tmp = RREG32_PLL(R300_SCLK_CNTL2);
512 tmp &= ~(R300_SCLK_FORCE_TCL |
513 R300_SCLK_FORCE_GA |
514 R300_SCLK_FORCE_CBA);
515 WREG32_PLL(R300_SCLK_CNTL2, tmp);
516 }
517 } else {
518 tmp = RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
519
520 tmp &= ~(RADEON_ACTIVE_HILO_LAT_MASK |
521 RADEON_DISP_DYN_STOP_LAT_MASK |
522 RADEON_DYN_STOP_MODE_MASK);
523
524 tmp |= (RADEON_ENGIN_DYNCLK_MODE |
525 (0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
526 WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
527 udelay(15000);
528
529 tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
530 tmp |= RADEON_SCLK_DYN_START_CNTL;
531 WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
532 udelay(15000);
533
534 /* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
535 to lockup randomly, leave them as set by BIOS.
536 */
537 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
538 /*tmp &= RADEON_SCLK_SRC_SEL_MASK; */
539 tmp &= ~RADEON_SCLK_FORCEON_MASK;
540
541 /*RAGE_6::A11 A12 A12N1 A13, RV250::A11 A12, R300 */
542 if (((rdev->family == CHIP_RV250) &&
543 ((RREG32(RADEON_CONFIG_CNTL) &
544 RADEON_CFG_ATI_REV_ID_MASK) <
545 RADEON_CFG_ATI_REV_A13))
546 || ((rdev->family == CHIP_RV100)
547 &&
548 ((RREG32(RADEON_CONFIG_CNTL) &
549 RADEON_CFG_ATI_REV_ID_MASK) <=
550 RADEON_CFG_ATI_REV_A13))) {
551 tmp |= RADEON_SCLK_FORCE_CP;
552 tmp |= RADEON_SCLK_FORCE_VIP;
553 }
554
555 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
556
557 if ((rdev->family == CHIP_RV200) ||
558 (rdev->family == CHIP_RV250) ||
559 (rdev->family == CHIP_RV280)) {
560 tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
561 tmp &= ~RADEON_SCLK_MORE_FORCEON;
562
563 /* RV200::A11 A12 RV250::A11 A12 */
564 if (((rdev->family == CHIP_RV200) ||
565 (rdev->family == CHIP_RV250)) &&
566 ((RREG32(RADEON_CONFIG_CNTL) &
567 RADEON_CFG_ATI_REV_ID_MASK) <
568 RADEON_CFG_ATI_REV_A13)) {
569 tmp |= RADEON_SCLK_MORE_FORCEON;
570 }
571 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
572 udelay(15000);
573 }
574
575 /* RV200::A11 A12, RV250::A11 A12 */
576 if (((rdev->family == CHIP_RV200) ||
577 (rdev->family == CHIP_RV250)) &&
578 ((RREG32(RADEON_CONFIG_CNTL) &
579 RADEON_CFG_ATI_REV_ID_MASK) <
580 RADEON_CFG_ATI_REV_A13)) {
581 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
582 tmp |= RADEON_TCL_BYPASS_DISABLE;
583 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
584 }
585 udelay(15000);
586
587 /*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
588 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
589 tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
590 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
591 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
592 RADEON_PIXCLK_GV_ALWAYS_ONb |
593 RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
594 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
595 RADEON_PIXCLK_TMDS_ALWAYS_ONb);
596
597 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
598 udelay(15000);
599
600 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
601 tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
602 RADEON_PIXCLK_DAC_ALWAYS_ONb);
603
604 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
605 udelay(15000);
606 }
607 } else {
608 /* Turn everything OFF (ForceON to everything) */
609 if (rdev->flags & RADEON_SINGLE_CRTC) {
610 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
611 tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_HDP |
612 RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_TOP
613 | RADEON_SCLK_FORCE_E2 | RADEON_SCLK_FORCE_SE |
614 RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_VIP |
615 RADEON_SCLK_FORCE_RE | RADEON_SCLK_FORCE_PB |
616 RADEON_SCLK_FORCE_TAM | RADEON_SCLK_FORCE_TDM |
617 RADEON_SCLK_FORCE_RB);
618 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
619 } else if ((rdev->family == CHIP_RS400) ||
620 (rdev->family == CHIP_RS480)) {
621 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
622 tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
623 RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
624 | RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
625 R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
626 RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
627 R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
628 R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
629 R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
630 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
631
632 tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
633 tmp |= RADEON_SCLK_MORE_FORCEON;
634 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
635
636 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
637 tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
638 RADEON_PIXCLK_DAC_ALWAYS_ONb |
639 R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
640 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
641
642 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
643 tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
644 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
645 RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
646 R300_DVOCLK_ALWAYS_ONb |
647 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
648 RADEON_PIXCLK_GV_ALWAYS_ONb |
649 R300_PIXCLK_DVO_ALWAYS_ONb |
650 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
651 RADEON_PIXCLK_TMDS_ALWAYS_ONb |
652 R300_PIXCLK_TRANS_ALWAYS_ONb |
653 R300_PIXCLK_TVO_ALWAYS_ONb |
654 R300_P2G2CLK_ALWAYS_ONb |
655 R300_P2G2CLK_ALWAYS_ONb |
656 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
657 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
658 } else if (rdev->family >= CHIP_RV350) {
659 /* for RV350/M10, no delays are required. */
660 tmp = RREG32_PLL(R300_SCLK_CNTL2);
661 tmp |= (R300_SCLK_FORCE_TCL |
662 R300_SCLK_FORCE_GA | R300_SCLK_FORCE_CBA);
663 WREG32_PLL(R300_SCLK_CNTL2, tmp);
664
665 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
666 tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
667 RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
668 | RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
669 R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
670 RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
671 R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
672 R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
673 R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
674 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
675
676 tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
677 tmp |= RADEON_SCLK_MORE_FORCEON;
678 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
679
680 tmp = RREG32_PLL(RADEON_MCLK_CNTL);
681 tmp |= (RADEON_FORCEON_MCLKA |
682 RADEON_FORCEON_MCLKB |
683 RADEON_FORCEON_YCLKA |
684 RADEON_FORCEON_YCLKB | RADEON_FORCEON_MC);
685 WREG32_PLL(RADEON_MCLK_CNTL, tmp);
686
687 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
688 tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
689 RADEON_PIXCLK_DAC_ALWAYS_ONb |
690 R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
691 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
692
693 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
694 tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
695 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
696 RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
697 R300_DVOCLK_ALWAYS_ONb |
698 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
699 RADEON_PIXCLK_GV_ALWAYS_ONb |
700 R300_PIXCLK_DVO_ALWAYS_ONb |
701 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
702 RADEON_PIXCLK_TMDS_ALWAYS_ONb |
703 R300_PIXCLK_TRANS_ALWAYS_ONb |
704 R300_PIXCLK_TVO_ALWAYS_ONb |
705 R300_P2G2CLK_ALWAYS_ONb |
706 R300_P2G2CLK_ALWAYS_ONb |
707 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
708 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
709 } else {
710 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
711 tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_E2);
712 tmp |= RADEON_SCLK_FORCE_SE;
713
714 if (rdev->flags & RADEON_SINGLE_CRTC) {
715 tmp |= (RADEON_SCLK_FORCE_RB |
716 RADEON_SCLK_FORCE_TDM |
717 RADEON_SCLK_FORCE_TAM |
718 RADEON_SCLK_FORCE_PB |
719 RADEON_SCLK_FORCE_RE |
720 RADEON_SCLK_FORCE_VIP |
721 RADEON_SCLK_FORCE_IDCT |
722 RADEON_SCLK_FORCE_TOP |
723 RADEON_SCLK_FORCE_DISP1 |
724 RADEON_SCLK_FORCE_DISP2 |
725 RADEON_SCLK_FORCE_HDP);
726 } else if ((rdev->family == CHIP_R300) ||
727 (rdev->family == CHIP_R350)) {
728 tmp |= (RADEON_SCLK_FORCE_HDP |
729 RADEON_SCLK_FORCE_DISP1 |
730 RADEON_SCLK_FORCE_DISP2 |
731 RADEON_SCLK_FORCE_TOP |
732 RADEON_SCLK_FORCE_IDCT |
733 RADEON_SCLK_FORCE_VIP);
734 }
735 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
736
737 udelay(16000);
738
739 if ((rdev->family == CHIP_R300) ||
740 (rdev->family == CHIP_R350)) {
741 tmp = RREG32_PLL(R300_SCLK_CNTL2);
742 tmp |= (R300_SCLK_FORCE_TCL |
743 R300_SCLK_FORCE_GA |
744 R300_SCLK_FORCE_CBA);
745 WREG32_PLL(R300_SCLK_CNTL2, tmp);
746 udelay(16000);
747 }
748
749 if (rdev->flags & RADEON_IS_IGP) {
750 tmp = RREG32_PLL(RADEON_MCLK_CNTL);
751 tmp &= ~(RADEON_FORCEON_MCLKA |
752 RADEON_FORCEON_YCLKA);
753 WREG32_PLL(RADEON_MCLK_CNTL, tmp);
754 udelay(16000);
755 }
756
757 if ((rdev->family == CHIP_RV200) ||
758 (rdev->family == CHIP_RV250) ||
759 (rdev->family == CHIP_RV280)) {
760 tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
761 tmp |= RADEON_SCLK_MORE_FORCEON;
762 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
763 udelay(16000);
764 }
765
766 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
767 tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
768 RADEON_PIX2CLK_DAC_ALWAYS_ONb |
769 RADEON_PIXCLK_BLEND_ALWAYS_ONb |
770 RADEON_PIXCLK_GV_ALWAYS_ONb |
771 RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
772 RADEON_PIXCLK_LVDS_ALWAYS_ONb |
773 RADEON_PIXCLK_TMDS_ALWAYS_ONb);
774
775 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
776 udelay(16000);
777
778 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
779 tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
780 RADEON_PIXCLK_DAC_ALWAYS_ONb);
781 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
782 }
783 }
784}
785
786static void radeon_apply_clock_quirks(struct radeon_device *rdev)
787{
788 uint32_t tmp;
789
790 /* XXX make sure engine is idle */
791
792 if (rdev->family < CHIP_RS600) {
793 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
794 if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
795 tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
796 if ((rdev->family == CHIP_RV250)
797 || (rdev->family == CHIP_RV280))
798 tmp |=
799 RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
800 if ((rdev->family == CHIP_RV350)
801 || (rdev->family == CHIP_RV380))
802 tmp |= R300_SCLK_FORCE_VAP;
803 if (rdev->family == CHIP_R420)
804 tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
805 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
806 } else if (rdev->family < CHIP_R600) {
807 tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
808 tmp |= AVIVO_CP_FORCEON;
809 WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
810
811 tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
812 tmp |= AVIVO_E2_FORCEON;
813 WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
814
815 tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
816 tmp |= AVIVO_IDCT_FORCEON;
817 WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
818 }
819}
820
821int radeon_static_clocks_init(struct drm_device *dev)
822{
823 struct radeon_device *rdev = dev->dev_private;
824
825 /* XXX make sure engine is idle */
826
827 if (radeon_dynclks != -1) {
828 if (radeon_dynclks)
829 radeon_set_clock_gating(rdev, 1);
830 }
831 radeon_apply_clock_quirks(rdev);
832 return 0;
833}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
new file mode 100644
index 000000000000..06e8038bc4ac
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -0,0 +1,2481 @@
1/*
2 * Copyright 2004 ATI Technologies Inc., Markham, Ontario
3 * Copyright 2007-8 Advanced Micro Devices, Inc.
4 * Copyright 2008 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 */
27#include "drmP.h"
28#include "radeon_drm.h"
29#include "radeon.h"
30#include "atom.h"
31
32#ifdef CONFIG_PPC_PMAC
33/* not sure which of these are needed */
34#include <asm/machdep.h>
35#include <asm/pmac_feature.h>
36#include <asm/prom.h>
37#include <asm/pci-bridge.h>
38#endif /* CONFIG_PPC_PMAC */
39
40/* from radeon_encoder.c */
41extern uint32_t
42radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device,
43 uint8_t dac);
44extern void radeon_link_encoder_connector(struct drm_device *dev);
45
46/* from radeon_connector.c */
47extern void
48radeon_add_legacy_connector(struct drm_device *dev,
49 uint32_t connector_id,
50 uint32_t supported_device,
51 int connector_type,
52 struct radeon_i2c_bus_rec *i2c_bus);
53
54/* from radeon_legacy_encoder.c */
55extern void
56radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id,
57 uint32_t supported_device);
58
59/* old legacy ATI BIOS routines */
60
61/* COMBIOS table offsets */
62enum radeon_combios_table_offset {
63 /* absolute offset tables */
64 COMBIOS_ASIC_INIT_1_TABLE,
65 COMBIOS_BIOS_SUPPORT_TABLE,
66 COMBIOS_DAC_PROGRAMMING_TABLE,
67 COMBIOS_MAX_COLOR_DEPTH_TABLE,
68 COMBIOS_CRTC_INFO_TABLE,
69 COMBIOS_PLL_INFO_TABLE,
70 COMBIOS_TV_INFO_TABLE,
71 COMBIOS_DFP_INFO_TABLE,
72 COMBIOS_HW_CONFIG_INFO_TABLE,
73 COMBIOS_MULTIMEDIA_INFO_TABLE,
74 COMBIOS_TV_STD_PATCH_TABLE,
75 COMBIOS_LCD_INFO_TABLE,
76 COMBIOS_MOBILE_INFO_TABLE,
77 COMBIOS_PLL_INIT_TABLE,
78 COMBIOS_MEM_CONFIG_TABLE,
79 COMBIOS_SAVE_MASK_TABLE,
80 COMBIOS_HARDCODED_EDID_TABLE,
81 COMBIOS_ASIC_INIT_2_TABLE,
82 COMBIOS_CONNECTOR_INFO_TABLE,
83 COMBIOS_DYN_CLK_1_TABLE,
84 COMBIOS_RESERVED_MEM_TABLE,
85 COMBIOS_EXT_TMDS_INFO_TABLE,
86 COMBIOS_MEM_CLK_INFO_TABLE,
87 COMBIOS_EXT_DAC_INFO_TABLE,
88 COMBIOS_MISC_INFO_TABLE,
89 COMBIOS_CRT_INFO_TABLE,
90 COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE,
91 COMBIOS_COMPONENT_VIDEO_INFO_TABLE,
92 COMBIOS_FAN_SPEED_INFO_TABLE,
93 COMBIOS_OVERDRIVE_INFO_TABLE,
94 COMBIOS_OEM_INFO_TABLE,
95 COMBIOS_DYN_CLK_2_TABLE,
96 COMBIOS_POWER_CONNECTOR_INFO_TABLE,
97 COMBIOS_I2C_INFO_TABLE,
98 /* relative offset tables */
99 COMBIOS_ASIC_INIT_3_TABLE, /* offset from misc info */
100 COMBIOS_ASIC_INIT_4_TABLE, /* offset from misc info */
101 COMBIOS_DETECTED_MEM_TABLE, /* offset from misc info */
102 COMBIOS_ASIC_INIT_5_TABLE, /* offset from misc info */
103 COMBIOS_RAM_RESET_TABLE, /* offset from mem config */
104 COMBIOS_POWERPLAY_INFO_TABLE, /* offset from mobile info */
105 COMBIOS_GPIO_INFO_TABLE, /* offset from mobile info */
106 COMBIOS_LCD_DDC_INFO_TABLE, /* offset from mobile info */
107 COMBIOS_TMDS_POWER_TABLE, /* offset from mobile info */
108 COMBIOS_TMDS_POWER_ON_TABLE, /* offset from tmds power */
109 COMBIOS_TMDS_POWER_OFF_TABLE, /* offset from tmds power */
110};
111
112enum radeon_combios_ddc {
113 DDC_NONE_DETECTED,
114 DDC_MONID,
115 DDC_DVI,
116 DDC_VGA,
117 DDC_CRT2,
118 DDC_LCD,
119 DDC_GPIO,
120};
121
122enum radeon_combios_connector {
123 CONNECTOR_NONE_LEGACY,
124 CONNECTOR_PROPRIETARY_LEGACY,
125 CONNECTOR_CRT_LEGACY,
126 CONNECTOR_DVI_I_LEGACY,
127 CONNECTOR_DVI_D_LEGACY,
128 CONNECTOR_CTV_LEGACY,
129 CONNECTOR_STV_LEGACY,
130 CONNECTOR_UNSUPPORTED_LEGACY
131};
132
133const int legacy_connector_convert[] = {
134 DRM_MODE_CONNECTOR_Unknown,
135 DRM_MODE_CONNECTOR_DVID,
136 DRM_MODE_CONNECTOR_VGA,
137 DRM_MODE_CONNECTOR_DVII,
138 DRM_MODE_CONNECTOR_DVID,
139 DRM_MODE_CONNECTOR_Composite,
140 DRM_MODE_CONNECTOR_SVIDEO,
141 DRM_MODE_CONNECTOR_Unknown,
142};
143
144static uint16_t combios_get_table_offset(struct drm_device *dev,
145 enum radeon_combios_table_offset table)
146{
147 struct radeon_device *rdev = dev->dev_private;
148 int rev;
149 uint16_t offset = 0, check_offset;
150
151 switch (table) {
152 /* absolute offset tables */
153 case COMBIOS_ASIC_INIT_1_TABLE:
154 check_offset = RBIOS16(rdev->bios_header_start + 0xc);
155 if (check_offset)
156 offset = check_offset;
157 break;
158 case COMBIOS_BIOS_SUPPORT_TABLE:
159 check_offset = RBIOS16(rdev->bios_header_start + 0x14);
160 if (check_offset)
161 offset = check_offset;
162 break;
163 case COMBIOS_DAC_PROGRAMMING_TABLE:
164 check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
165 if (check_offset)
166 offset = check_offset;
167 break;
168 case COMBIOS_MAX_COLOR_DEPTH_TABLE:
169 check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
170 if (check_offset)
171 offset = check_offset;
172 break;
173 case COMBIOS_CRTC_INFO_TABLE:
174 check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
175 if (check_offset)
176 offset = check_offset;
177 break;
178 case COMBIOS_PLL_INFO_TABLE:
179 check_offset = RBIOS16(rdev->bios_header_start + 0x30);
180 if (check_offset)
181 offset = check_offset;
182 break;
183 case COMBIOS_TV_INFO_TABLE:
184 check_offset = RBIOS16(rdev->bios_header_start + 0x32);
185 if (check_offset)
186 offset = check_offset;
187 break;
188 case COMBIOS_DFP_INFO_TABLE:
189 check_offset = RBIOS16(rdev->bios_header_start + 0x34);
190 if (check_offset)
191 offset = check_offset;
192 break;
193 case COMBIOS_HW_CONFIG_INFO_TABLE:
194 check_offset = RBIOS16(rdev->bios_header_start + 0x36);
195 if (check_offset)
196 offset = check_offset;
197 break;
198 case COMBIOS_MULTIMEDIA_INFO_TABLE:
199 check_offset = RBIOS16(rdev->bios_header_start + 0x38);
200 if (check_offset)
201 offset = check_offset;
202 break;
203 case COMBIOS_TV_STD_PATCH_TABLE:
204 check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
205 if (check_offset)
206 offset = check_offset;
207 break;
208 case COMBIOS_LCD_INFO_TABLE:
209 check_offset = RBIOS16(rdev->bios_header_start + 0x40);
210 if (check_offset)
211 offset = check_offset;
212 break;
213 case COMBIOS_MOBILE_INFO_TABLE:
214 check_offset = RBIOS16(rdev->bios_header_start + 0x42);
215 if (check_offset)
216 offset = check_offset;
217 break;
218 case COMBIOS_PLL_INIT_TABLE:
219 check_offset = RBIOS16(rdev->bios_header_start + 0x46);
220 if (check_offset)
221 offset = check_offset;
222 break;
223 case COMBIOS_MEM_CONFIG_TABLE:
224 check_offset = RBIOS16(rdev->bios_header_start + 0x48);
225 if (check_offset)
226 offset = check_offset;
227 break;
228 case COMBIOS_SAVE_MASK_TABLE:
229 check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
230 if (check_offset)
231 offset = check_offset;
232 break;
233 case COMBIOS_HARDCODED_EDID_TABLE:
234 check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
235 if (check_offset)
236 offset = check_offset;
237 break;
238 case COMBIOS_ASIC_INIT_2_TABLE:
239 check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
240 if (check_offset)
241 offset = check_offset;
242 break;
243 case COMBIOS_CONNECTOR_INFO_TABLE:
244 check_offset = RBIOS16(rdev->bios_header_start + 0x50);
245 if (check_offset)
246 offset = check_offset;
247 break;
248 case COMBIOS_DYN_CLK_1_TABLE:
249 check_offset = RBIOS16(rdev->bios_header_start + 0x52);
250 if (check_offset)
251 offset = check_offset;
252 break;
253 case COMBIOS_RESERVED_MEM_TABLE:
254 check_offset = RBIOS16(rdev->bios_header_start + 0x54);
255 if (check_offset)
256 offset = check_offset;
257 break;
258 case COMBIOS_EXT_TMDS_INFO_TABLE:
259 check_offset = RBIOS16(rdev->bios_header_start + 0x58);
260 if (check_offset)
261 offset = check_offset;
262 break;
263 case COMBIOS_MEM_CLK_INFO_TABLE:
264 check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
265 if (check_offset)
266 offset = check_offset;
267 break;
268 case COMBIOS_EXT_DAC_INFO_TABLE:
269 check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
270 if (check_offset)
271 offset = check_offset;
272 break;
273 case COMBIOS_MISC_INFO_TABLE:
274 check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
275 if (check_offset)
276 offset = check_offset;
277 break;
278 case COMBIOS_CRT_INFO_TABLE:
279 check_offset = RBIOS16(rdev->bios_header_start + 0x60);
280 if (check_offset)
281 offset = check_offset;
282 break;
283 case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
284 check_offset = RBIOS16(rdev->bios_header_start + 0x62);
285 if (check_offset)
286 offset = check_offset;
287 break;
288 case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
289 check_offset = RBIOS16(rdev->bios_header_start + 0x64);
290 if (check_offset)
291 offset = check_offset;
292 break;
293 case COMBIOS_FAN_SPEED_INFO_TABLE:
294 check_offset = RBIOS16(rdev->bios_header_start + 0x66);
295 if (check_offset)
296 offset = check_offset;
297 break;
298 case COMBIOS_OVERDRIVE_INFO_TABLE:
299 check_offset = RBIOS16(rdev->bios_header_start + 0x68);
300 if (check_offset)
301 offset = check_offset;
302 break;
303 case COMBIOS_OEM_INFO_TABLE:
304 check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
305 if (check_offset)
306 offset = check_offset;
307 break;
308 case COMBIOS_DYN_CLK_2_TABLE:
309 check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
310 if (check_offset)
311 offset = check_offset;
312 break;
313 case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
314 check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
315 if (check_offset)
316 offset = check_offset;
317 break;
318 case COMBIOS_I2C_INFO_TABLE:
319 check_offset = RBIOS16(rdev->bios_header_start + 0x70);
320 if (check_offset)
321 offset = check_offset;
322 break;
323 /* relative offset tables */
324 case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
325 check_offset =
326 combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
327 if (check_offset) {
328 rev = RBIOS8(check_offset);
329 if (rev > 0) {
330 check_offset = RBIOS16(check_offset + 0x3);
331 if (check_offset)
332 offset = check_offset;
333 }
334 }
335 break;
336 case COMBIOS_ASIC_INIT_4_TABLE: /* offset from misc info */
337 check_offset =
338 combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
339 if (check_offset) {
340 rev = RBIOS8(check_offset);
341 if (rev > 0) {
342 check_offset = RBIOS16(check_offset + 0x5);
343 if (check_offset)
344 offset = check_offset;
345 }
346 }
347 break;
348 case COMBIOS_DETECTED_MEM_TABLE: /* offset from misc info */
349 check_offset =
350 combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
351 if (check_offset) {
352 rev = RBIOS8(check_offset);
353 if (rev > 0) {
354 check_offset = RBIOS16(check_offset + 0x7);
355 if (check_offset)
356 offset = check_offset;
357 }
358 }
359 break;
360 case COMBIOS_ASIC_INIT_5_TABLE: /* offset from misc info */
361 check_offset =
362 combios_get_table_offset(dev, COMBIOS_MISC_INFO_TABLE);
363 if (check_offset) {
364 rev = RBIOS8(check_offset);
365 if (rev == 2) {
366 check_offset = RBIOS16(check_offset + 0x9);
367 if (check_offset)
368 offset = check_offset;
369 }
370 }
371 break;
372 case COMBIOS_RAM_RESET_TABLE: /* offset from mem config */
373 check_offset =
374 combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
375 if (check_offset) {
376 while (RBIOS8(check_offset++));
377 check_offset += 2;
378 if (check_offset)
379 offset = check_offset;
380 }
381 break;
382 case COMBIOS_POWERPLAY_INFO_TABLE: /* offset from mobile info */
383 check_offset =
384 combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
385 if (check_offset) {
386 check_offset = RBIOS16(check_offset + 0x11);
387 if (check_offset)
388 offset = check_offset;
389 }
390 break;
391 case COMBIOS_GPIO_INFO_TABLE: /* offset from mobile info */
392 check_offset =
393 combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
394 if (check_offset) {
395 check_offset = RBIOS16(check_offset + 0x13);
396 if (check_offset)
397 offset = check_offset;
398 }
399 break;
400 case COMBIOS_LCD_DDC_INFO_TABLE: /* offset from mobile info */
401 check_offset =
402 combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
403 if (check_offset) {
404 check_offset = RBIOS16(check_offset + 0x15);
405 if (check_offset)
406 offset = check_offset;
407 }
408 break;
409 case COMBIOS_TMDS_POWER_TABLE: /* offset from mobile info */
410 check_offset =
411 combios_get_table_offset(dev, COMBIOS_MOBILE_INFO_TABLE);
412 if (check_offset) {
413 check_offset = RBIOS16(check_offset + 0x17);
414 if (check_offset)
415 offset = check_offset;
416 }
417 break;
418 case COMBIOS_TMDS_POWER_ON_TABLE: /* offset from tmds power */
419 check_offset =
420 combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
421 if (check_offset) {
422 check_offset = RBIOS16(check_offset + 0x2);
423 if (check_offset)
424 offset = check_offset;
425 }
426 break;
427 case COMBIOS_TMDS_POWER_OFF_TABLE: /* offset from tmds power */
428 check_offset =
429 combios_get_table_offset(dev, COMBIOS_TMDS_POWER_TABLE);
430 if (check_offset) {
431 check_offset = RBIOS16(check_offset + 0x4);
432 if (check_offset)
433 offset = check_offset;
434 }
435 break;
436 default:
437 break;
438 }
439
440 return offset;
441
442}
443
444struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line)
445{
446 struct radeon_i2c_bus_rec i2c;
447
448 i2c.mask_clk_mask = RADEON_GPIO_EN_1;
449 i2c.mask_data_mask = RADEON_GPIO_EN_0;
450 i2c.a_clk_mask = RADEON_GPIO_A_1;
451 i2c.a_data_mask = RADEON_GPIO_A_0;
452 i2c.put_clk_mask = RADEON_GPIO_EN_1;
453 i2c.put_data_mask = RADEON_GPIO_EN_0;
454 i2c.get_clk_mask = RADEON_GPIO_Y_1;
455 i2c.get_data_mask = RADEON_GPIO_Y_0;
456 if ((ddc_line == RADEON_LCD_GPIO_MASK) ||
457 (ddc_line == RADEON_MDGPIO_EN_REG)) {
458 i2c.mask_clk_reg = ddc_line;
459 i2c.mask_data_reg = ddc_line;
460 i2c.a_clk_reg = ddc_line;
461 i2c.a_data_reg = ddc_line;
462 i2c.put_clk_reg = ddc_line;
463 i2c.put_data_reg = ddc_line;
464 i2c.get_clk_reg = ddc_line + 4;
465 i2c.get_data_reg = ddc_line + 4;
466 } else {
467 i2c.mask_clk_reg = ddc_line;
468 i2c.mask_data_reg = ddc_line;
469 i2c.a_clk_reg = ddc_line;
470 i2c.a_data_reg = ddc_line;
471 i2c.put_clk_reg = ddc_line;
472 i2c.put_data_reg = ddc_line;
473 i2c.get_clk_reg = ddc_line;
474 i2c.get_data_reg = ddc_line;
475 }
476
477 if (ddc_line)
478 i2c.valid = true;
479 else
480 i2c.valid = false;
481
482 return i2c;
483}
484
485bool radeon_combios_get_clock_info(struct drm_device *dev)
486{
487 struct radeon_device *rdev = dev->dev_private;
488 uint16_t pll_info;
489 struct radeon_pll *p1pll = &rdev->clock.p1pll;
490 struct radeon_pll *p2pll = &rdev->clock.p2pll;
491 struct radeon_pll *spll = &rdev->clock.spll;
492 struct radeon_pll *mpll = &rdev->clock.mpll;
493 int8_t rev;
494 uint16_t sclk, mclk;
495
496 if (rdev->bios == NULL)
497 return NULL;
498
499 pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
500 if (pll_info) {
501 rev = RBIOS8(pll_info);
502
503 /* pixel clocks */
504 p1pll->reference_freq = RBIOS16(pll_info + 0xe);
505 p1pll->reference_div = RBIOS16(pll_info + 0x10);
506 p1pll->pll_out_min = RBIOS32(pll_info + 0x12);
507 p1pll->pll_out_max = RBIOS32(pll_info + 0x16);
508
509 if (rev > 9) {
510 p1pll->pll_in_min = RBIOS32(pll_info + 0x36);
511 p1pll->pll_in_max = RBIOS32(pll_info + 0x3a);
512 } else {
513 p1pll->pll_in_min = 40;
514 p1pll->pll_in_max = 500;
515 }
516 *p2pll = *p1pll;
517
518 /* system clock */
519 spll->reference_freq = RBIOS16(pll_info + 0x1a);
520 spll->reference_div = RBIOS16(pll_info + 0x1c);
521 spll->pll_out_min = RBIOS32(pll_info + 0x1e);
522 spll->pll_out_max = RBIOS32(pll_info + 0x22);
523
524 if (rev > 10) {
525 spll->pll_in_min = RBIOS32(pll_info + 0x48);
526 spll->pll_in_max = RBIOS32(pll_info + 0x4c);
527 } else {
528 /* ??? */
529 spll->pll_in_min = 40;
530 spll->pll_in_max = 500;
531 }
532
533 /* memory clock */
534 mpll->reference_freq = RBIOS16(pll_info + 0x26);
535 mpll->reference_div = RBIOS16(pll_info + 0x28);
536 mpll->pll_out_min = RBIOS32(pll_info + 0x2a);
537 mpll->pll_out_max = RBIOS32(pll_info + 0x2e);
538
539 if (rev > 10) {
540 mpll->pll_in_min = RBIOS32(pll_info + 0x5a);
541 mpll->pll_in_max = RBIOS32(pll_info + 0x5e);
542 } else {
543 /* ??? */
544 mpll->pll_in_min = 40;
545 mpll->pll_in_max = 500;
546 }
547
548 /* default sclk/mclk */
549 sclk = RBIOS16(pll_info + 0xa);
550 mclk = RBIOS16(pll_info + 0x8);
551 if (sclk == 0)
552 sclk = 200 * 100;
553 if (mclk == 0)
554 mclk = 200 * 100;
555
556 rdev->clock.default_sclk = sclk;
557 rdev->clock.default_mclk = mclk;
558
559 return true;
560 }
561 return false;
562}
563
564struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
565 radeon_encoder
566 *encoder)
567{
568 struct drm_device *dev = encoder->base.dev;
569 struct radeon_device *rdev = dev->dev_private;
570 uint16_t dac_info;
571 uint8_t rev, bg, dac;
572 struct radeon_encoder_primary_dac *p_dac = NULL;
573
574 if (rdev->bios == NULL)
575 return NULL;
576
577 /* check CRT table */
578 dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
579 if (dac_info) {
580 p_dac =
581 kzalloc(sizeof(struct radeon_encoder_primary_dac),
582 GFP_KERNEL);
583
584 if (!p_dac)
585 return NULL;
586
587 rev = RBIOS8(dac_info) & 0x3;
588 if (rev < 2) {
589 bg = RBIOS8(dac_info + 0x2) & 0xf;
590 dac = (RBIOS8(dac_info + 0x2) >> 4) & 0xf;
591 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
592 } else {
593 bg = RBIOS8(dac_info + 0x2) & 0xf;
594 dac = RBIOS8(dac_info + 0x3) & 0xf;
595 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
596 }
597
598 }
599
600 return p_dac;
601}
602
603static enum radeon_tv_std
604radeon_combios_get_tv_info(struct radeon_encoder *encoder)
605{
606 struct drm_device *dev = encoder->base.dev;
607 struct radeon_device *rdev = dev->dev_private;
608 uint16_t tv_info;
609 enum radeon_tv_std tv_std = TV_STD_NTSC;
610
611 tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
612 if (tv_info) {
613 if (RBIOS8(tv_info + 6) == 'T') {
614 switch (RBIOS8(tv_info + 7) & 0xf) {
615 case 1:
616 tv_std = TV_STD_NTSC;
617 DRM_INFO("Default TV standard: NTSC\n");
618 break;
619 case 2:
620 tv_std = TV_STD_PAL;
621 DRM_INFO("Default TV standard: PAL\n");
622 break;
623 case 3:
624 tv_std = TV_STD_PAL_M;
625 DRM_INFO("Default TV standard: PAL-M\n");
626 break;
627 case 4:
628 tv_std = TV_STD_PAL_60;
629 DRM_INFO("Default TV standard: PAL-60\n");
630 break;
631 case 5:
632 tv_std = TV_STD_NTSC_J;
633 DRM_INFO("Default TV standard: NTSC-J\n");
634 break;
635 case 6:
636 tv_std = TV_STD_SCART_PAL;
637 DRM_INFO("Default TV standard: SCART-PAL\n");
638 break;
639 default:
640 tv_std = TV_STD_NTSC;
641 DRM_INFO
642 ("Unknown TV standard; defaulting to NTSC\n");
643 break;
644 }
645
646 switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) {
647 case 0:
648 DRM_INFO("29.498928713 MHz TV ref clk\n");
649 break;
650 case 1:
651 DRM_INFO("28.636360000 MHz TV ref clk\n");
652 break;
653 case 2:
654 DRM_INFO("14.318180000 MHz TV ref clk\n");
655 break;
656 case 3:
657 DRM_INFO("27.000000000 MHz TV ref clk\n");
658 break;
659 default:
660 break;
661 }
662 }
663 }
664 return tv_std;
665}
666
667static const uint32_t default_tvdac_adj[CHIP_LAST] = {
668 0x00000000, /* r100 */
669 0x00280000, /* rv100 */
670 0x00000000, /* rs100 */
671 0x00880000, /* rv200 */
672 0x00000000, /* rs200 */
673 0x00000000, /* r200 */
674 0x00770000, /* rv250 */
675 0x00290000, /* rs300 */
676 0x00560000, /* rv280 */
677 0x00780000, /* r300 */
678 0x00770000, /* r350 */
679 0x00780000, /* rv350 */
680 0x00780000, /* rv380 */
681 0x01080000, /* r420 */
682 0x01080000, /* r423 */
683 0x01080000, /* rv410 */
684 0x00780000, /* rs400 */
685 0x00780000, /* rs480 */
686};
687
688static struct radeon_encoder_tv_dac
689 *radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev)
690{
691 struct radeon_encoder_tv_dac *tv_dac = NULL;
692
693 tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
694
695 if (!tv_dac)
696 return NULL;
697
698 tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family];
699 if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250))
700 tv_dac->ps2_tvdac_adj = 0x00880000;
701 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
702 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
703
704 return tv_dac;
705}
706
707struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
708 radeon_encoder
709 *encoder)
710{
711 struct drm_device *dev = encoder->base.dev;
712 struct radeon_device *rdev = dev->dev_private;
713 uint16_t dac_info;
714 uint8_t rev, bg, dac;
715 struct radeon_encoder_tv_dac *tv_dac = NULL;
716
717 if (rdev->bios == NULL)
718 return radeon_legacy_get_tv_dac_info_from_table(rdev);
719
720 /* first check TV table */
721 dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
722 if (dac_info) {
723 tv_dac =
724 kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
725
726 if (!tv_dac)
727 return NULL;
728
729 rev = RBIOS8(dac_info + 0x3);
730 if (rev > 4) {
731 bg = RBIOS8(dac_info + 0xc) & 0xf;
732 dac = RBIOS8(dac_info + 0xd) & 0xf;
733 tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
734
735 bg = RBIOS8(dac_info + 0xe) & 0xf;
736 dac = RBIOS8(dac_info + 0xf) & 0xf;
737 tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
738
739 bg = RBIOS8(dac_info + 0x10) & 0xf;
740 dac = RBIOS8(dac_info + 0x11) & 0xf;
741 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
742 } else if (rev > 1) {
743 bg = RBIOS8(dac_info + 0xc) & 0xf;
744 dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
745 tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20);
746
747 bg = RBIOS8(dac_info + 0xd) & 0xf;
748 dac = (RBIOS8(dac_info + 0xd) >> 4) & 0xf;
749 tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20);
750
751 bg = RBIOS8(dac_info + 0xe) & 0xf;
752 dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
753 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
754 }
755
756 tv_dac->tv_std = radeon_combios_get_tv_info(encoder);
757
758 } else {
759 /* then check CRT table */
760 dac_info =
761 combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
762 if (dac_info) {
763 tv_dac =
764 kzalloc(sizeof(struct radeon_encoder_tv_dac),
765 GFP_KERNEL);
766
767 if (!tv_dac)
768 return NULL;
769
770 rev = RBIOS8(dac_info) & 0x3;
771 if (rev < 2) {
772 bg = RBIOS8(dac_info + 0x3) & 0xf;
773 dac = (RBIOS8(dac_info + 0x3) >> 4) & 0xf;
774 tv_dac->ps2_tvdac_adj =
775 (bg << 16) | (dac << 20);
776 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
777 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
778 } else {
779 bg = RBIOS8(dac_info + 0x4) & 0xf;
780 dac = RBIOS8(dac_info + 0x5) & 0xf;
781 tv_dac->ps2_tvdac_adj =
782 (bg << 16) | (dac << 20);
783 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
784 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
785 }
786 } else {
787 DRM_INFO("No TV DAC info found in BIOS\n");
788 return radeon_legacy_get_tv_dac_info_from_table(rdev);
789 }
790 }
791
792 return tv_dac;
793}
794
795static struct radeon_encoder_lvds *radeon_legacy_get_lvds_info_from_regs(struct
796 radeon_device
797 *rdev)
798{
799 struct radeon_encoder_lvds *lvds = NULL;
800 uint32_t fp_vert_stretch, fp_horz_stretch;
801 uint32_t ppll_div_sel, ppll_val;
802
803 lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
804
805 if (!lvds)
806 return NULL;
807
808 fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH);
809 fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH);
810
811 if (fp_vert_stretch & RADEON_VERT_STRETCH_ENABLE)
812 lvds->native_mode.panel_yres =
813 ((fp_vert_stretch & RADEON_VERT_PANEL_SIZE) >>
814 RADEON_VERT_PANEL_SHIFT) + 1;
815 else
816 lvds->native_mode.panel_yres =
817 (RREG32(RADEON_CRTC_V_TOTAL_DISP) >> 16) + 1;
818
819 if (fp_horz_stretch & RADEON_HORZ_STRETCH_ENABLE)
820 lvds->native_mode.panel_xres =
821 (((fp_horz_stretch & RADEON_HORZ_PANEL_SIZE) >>
822 RADEON_HORZ_PANEL_SHIFT) + 1) * 8;
823 else
824 lvds->native_mode.panel_xres =
825 ((RREG32(RADEON_CRTC_H_TOTAL_DISP) >> 16) + 1) * 8;
826
827 if ((lvds->native_mode.panel_xres < 640) ||
828 (lvds->native_mode.panel_yres < 480)) {
829 lvds->native_mode.panel_xres = 640;
830 lvds->native_mode.panel_yres = 480;
831 }
832
833 ppll_div_sel = RREG8(RADEON_CLOCK_CNTL_INDEX + 1) & 0x3;
834 ppll_val = RREG32_PLL(RADEON_PPLL_DIV_0 + ppll_div_sel);
835 if ((ppll_val & 0x000707ff) == 0x1bb)
836 lvds->use_bios_dividers = false;
837 else {
838 lvds->panel_ref_divider =
839 RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
840 lvds->panel_post_divider = (ppll_val >> 16) & 0x7;
841 lvds->panel_fb_divider = ppll_val & 0x7ff;
842
843 if ((lvds->panel_ref_divider != 0) &&
844 (lvds->panel_fb_divider > 3))
845 lvds->use_bios_dividers = true;
846 }
847 lvds->panel_vcc_delay = 200;
848
849 DRM_INFO("Panel info derived from registers\n");
850 DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres,
851 lvds->native_mode.panel_yres);
852
853 return lvds;
854}
855
856struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
857 *encoder)
858{
859 struct drm_device *dev = encoder->base.dev;
860 struct radeon_device *rdev = dev->dev_private;
861 uint16_t lcd_info;
862 uint32_t panel_setup;
863 char stmp[30];
864 int tmp, i;
865 struct radeon_encoder_lvds *lvds = NULL;
866
867 if (rdev->bios == NULL)
868 return radeon_legacy_get_lvds_info_from_regs(rdev);
869
870 lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
871
872 if (lcd_info) {
873 lvds = kzalloc(sizeof(struct radeon_encoder_lvds), GFP_KERNEL);
874
875 if (!lvds)
876 return NULL;
877
878 for (i = 0; i < 24; i++)
879 stmp[i] = RBIOS8(lcd_info + i + 1);
880 stmp[24] = 0;
881
882 DRM_INFO("Panel ID String: %s\n", stmp);
883
884 lvds->native_mode.panel_xres = RBIOS16(lcd_info + 0x19);
885 lvds->native_mode.panel_yres = RBIOS16(lcd_info + 0x1b);
886
887 DRM_INFO("Panel Size %dx%d\n", lvds->native_mode.panel_xres,
888 lvds->native_mode.panel_yres);
889
890 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
891 if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
892 lvds->panel_vcc_delay = 2000;
893
894 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
895 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
896 lvds->panel_blon_delay = (RBIOS16(lcd_info + 0x38) >> 4) & 0xf;
897
898 lvds->panel_ref_divider = RBIOS16(lcd_info + 0x2e);
899 lvds->panel_post_divider = RBIOS8(lcd_info + 0x30);
900 lvds->panel_fb_divider = RBIOS16(lcd_info + 0x31);
901 if ((lvds->panel_ref_divider != 0) &&
902 (lvds->panel_fb_divider > 3))
903 lvds->use_bios_dividers = true;
904
905 panel_setup = RBIOS32(lcd_info + 0x39);
906 lvds->lvds_gen_cntl = 0xff00;
907 if (panel_setup & 0x1)
908 lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_FORMAT;
909
910 if ((panel_setup >> 4) & 0x1)
911 lvds->lvds_gen_cntl |= RADEON_LVDS_PANEL_TYPE;
912
913 switch ((panel_setup >> 8) & 0x7) {
914 case 0:
915 lvds->lvds_gen_cntl |= RADEON_LVDS_NO_FM;
916 break;
917 case 1:
918 lvds->lvds_gen_cntl |= RADEON_LVDS_2_GREY;
919 break;
920 case 2:
921 lvds->lvds_gen_cntl |= RADEON_LVDS_4_GREY;
922 break;
923 default:
924 break;
925 }
926
927 if ((panel_setup >> 16) & 0x1)
928 lvds->lvds_gen_cntl |= RADEON_LVDS_FP_POL_LOW;
929
930 if ((panel_setup >> 17) & 0x1)
931 lvds->lvds_gen_cntl |= RADEON_LVDS_LP_POL_LOW;
932
933 if ((panel_setup >> 18) & 0x1)
934 lvds->lvds_gen_cntl |= RADEON_LVDS_DTM_POL_LOW;
935
936 if ((panel_setup >> 23) & 0x1)
937 lvds->lvds_gen_cntl |= RADEON_LVDS_BL_CLK_SEL;
938
939 lvds->lvds_gen_cntl |= (panel_setup & 0xf0000000);
940
941 for (i = 0; i < 32; i++) {
942 tmp = RBIOS16(lcd_info + 64 + i * 2);
943 if (tmp == 0)
944 break;
945
946 if ((RBIOS16(tmp) == lvds->native_mode.panel_xres) &&
947 (RBIOS16(tmp + 2) ==
948 lvds->native_mode.panel_yres)) {
949 lvds->native_mode.hblank =
950 (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
951 lvds->native_mode.hoverplus =
952 (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) -
953 1) * 8;
954 lvds->native_mode.hsync_width =
955 RBIOS8(tmp + 23) * 8;
956
957 lvds->native_mode.vblank = (RBIOS16(tmp + 24) -
958 RBIOS16(tmp + 26));
959 lvds->native_mode.voverplus =
960 ((RBIOS16(tmp + 28) & 0x7ff) -
961 RBIOS16(tmp + 26));
962 lvds->native_mode.vsync_width =
963 ((RBIOS16(tmp + 28) & 0xf800) >> 11);
964 lvds->native_mode.dotclock =
965 RBIOS16(tmp + 9) * 10;
966 lvds->native_mode.flags = 0;
967 }
968 }
969 encoder->native_mode = lvds->native_mode;
970 } else {
971 DRM_INFO("No panel info found in BIOS\n");
972 return radeon_legacy_get_lvds_info_from_regs(rdev);
973 }
974 return lvds;
975}
976
977static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = {
978 {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_R100 */
979 {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_RV100 */
980 {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RS100 */
981 {{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_RV200 */
982 {{12000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_RS200 */
983 {{15000, 0xa1b}, {0xffffffff, 0xa3f}, {0, 0}, {0, 0}}, /* CHIP_R200 */
984 {{15500, 0x81b}, {0xffffffff, 0x83f}, {0, 0}, {0, 0}}, /* CHIP_RV250 */
985 {{0, 0}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RS300 */
986 {{13000, 0x400f4}, {15000, 0x400f7}, {0xffffffff, 0x40111}, {0, 0}}, /* CHIP_RV280 */
987 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R300 */
988 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R350 */
989 {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RV350 */
990 {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RV380 */
991 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */
992 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */
993 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */
994 {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS400 */
995 {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS480 */
996};
997
998static struct radeon_encoder_int_tmds
999 *radeon_legacy_get_tmds_info_from_table(struct radeon_device *rdev)
1000{
1001 int i;
1002 struct radeon_encoder_int_tmds *tmds = NULL;
1003
1004 tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
1005
1006 if (!tmds)
1007 return NULL;
1008
1009 for (i = 0; i < 4; i++) {
1010 tmds->tmds_pll[i].value =
1011 default_tmds_pll[rdev->family][i].value;
1012 tmds->tmds_pll[i].freq = default_tmds_pll[rdev->family][i].freq;
1013 }
1014
1015 return tmds;
1016}
1017
1018struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct
1019 radeon_encoder
1020 *encoder)
1021{
1022 struct drm_device *dev = encoder->base.dev;
1023 struct radeon_device *rdev = dev->dev_private;
1024 uint16_t tmds_info;
1025 int i, n;
1026 uint8_t ver;
1027 struct radeon_encoder_int_tmds *tmds = NULL;
1028
1029 if (rdev->bios == NULL)
1030 return radeon_legacy_get_tmds_info_from_table(rdev);
1031
1032 tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
1033
1034 if (tmds_info) {
1035 tmds =
1036 kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
1037
1038 if (!tmds)
1039 return NULL;
1040
1041 ver = RBIOS8(tmds_info);
1042 DRM_INFO("DFP table revision: %d\n", ver);
1043 if (ver == 3) {
1044 n = RBIOS8(tmds_info + 5) + 1;
1045 if (n > 4)
1046 n = 4;
1047 for (i = 0; i < n; i++) {
1048 tmds->tmds_pll[i].value =
1049 RBIOS32(tmds_info + i * 10 + 0x08);
1050 tmds->tmds_pll[i].freq =
1051 RBIOS16(tmds_info + i * 10 + 0x10);
1052 DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n",
1053 tmds->tmds_pll[i].freq,
1054 tmds->tmds_pll[i].value);
1055 }
1056 } else if (ver == 4) {
1057 int stride = 0;
1058 n = RBIOS8(tmds_info + 5) + 1;
1059 if (n > 4)
1060 n = 4;
1061 for (i = 0; i < n; i++) {
1062 tmds->tmds_pll[i].value =
1063 RBIOS32(tmds_info + stride + 0x08);
1064 tmds->tmds_pll[i].freq =
1065 RBIOS16(tmds_info + stride + 0x10);
1066 if (i == 0)
1067 stride += 10;
1068 else
1069 stride += 6;
1070 DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n",
1071 tmds->tmds_pll[i].freq,
1072 tmds->tmds_pll[i].value);
1073 }
1074 }
1075 } else
1076 DRM_INFO("No TMDS info found in BIOS\n");
1077 return tmds;
1078}
1079
1080void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder)
1081{
1082 struct drm_device *dev = encoder->base.dev;
1083 struct radeon_device *rdev = dev->dev_private;
1084 uint16_t ext_tmds_info;
1085 uint8_t ver;
1086
1087 if (rdev->bios == NULL)
1088 return;
1089
1090 ext_tmds_info =
1091 combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
1092 if (ext_tmds_info) {
1093 ver = RBIOS8(ext_tmds_info);
1094 DRM_INFO("External TMDS Table revision: %d\n", ver);
1095 // TODO
1096 }
1097}
1098
1099bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1100{
1101 struct radeon_device *rdev = dev->dev_private;
1102 struct radeon_i2c_bus_rec ddc_i2c;
1103
1104 rdev->mode_info.connector_table = radeon_connector_table;
1105 if (rdev->mode_info.connector_table == CT_NONE) {
1106#ifdef CONFIG_PPC_PMAC
1107 if (machine_is_compatible("PowerBook3,3")) {
1108 /* powerbook with VGA */
1109 rdev->mode_info.connector_table = CT_POWERBOOK_VGA;
1110 } else if (machine_is_compatible("PowerBook3,4") ||
1111 machine_is_compatible("PowerBook3,5")) {
1112 /* powerbook with internal tmds */
1113 rdev->mode_info.connector_table = CT_POWERBOOK_INTERNAL;
1114 } else if (machine_is_compatible("PowerBook5,1") ||
1115 machine_is_compatible("PowerBook5,2") ||
1116 machine_is_compatible("PowerBook5,3") ||
1117 machine_is_compatible("PowerBook5,4") ||
1118 machine_is_compatible("PowerBook5,5")) {
1119 /* powerbook with external single link tmds (sil164) */
1120 rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
1121 } else if (machine_is_compatible("PowerBook5,6")) {
1122 /* powerbook with external dual or single link tmds */
1123 rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
1124 } else if (machine_is_compatible("PowerBook5,7") ||
1125 machine_is_compatible("PowerBook5,8") ||
1126 machine_is_compatible("PowerBook5,9")) {
1127 /* PowerBook6,2 ? */
1128 /* powerbook with external dual link tmds (sil1178?) */
1129 rdev->mode_info.connector_table = CT_POWERBOOK_EXTERNAL;
1130 } else if (machine_is_compatible("PowerBook4,1") ||
1131 machine_is_compatible("PowerBook4,2") ||
1132 machine_is_compatible("PowerBook4,3") ||
1133 machine_is_compatible("PowerBook6,3") ||
1134 machine_is_compatible("PowerBook6,5") ||
1135 machine_is_compatible("PowerBook6,7")) {
1136 /* ibook */
1137 rdev->mode_info.connector_table = CT_IBOOK;
1138 } else if (machine_is_compatible("PowerMac4,4")) {
1139 /* emac */
1140 rdev->mode_info.connector_table = CT_EMAC;
1141 } else if (machine_is_compatible("PowerMac10,1")) {
1142 /* mini with internal tmds */
1143 rdev->mode_info.connector_table = CT_MINI_INTERNAL;
1144 } else if (machine_is_compatible("PowerMac10,2")) {
1145 /* mini with external tmds */
1146 rdev->mode_info.connector_table = CT_MINI_EXTERNAL;
1147 } else if (machine_is_compatible("PowerMac12,1")) {
1148 /* PowerMac8,1 ? */
1149 /* imac g5 isight */
1150 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
1151 } else
1152#endif /* CONFIG_PPC_PMAC */
1153 rdev->mode_info.connector_table = CT_GENERIC;
1154 }
1155
1156 switch (rdev->mode_info.connector_table) {
1157 case CT_GENERIC:
1158 DRM_INFO("Connector Table: %d (generic)\n",
1159 rdev->mode_info.connector_table);
1160 /* these are the most common settings */
1161 if (rdev->flags & RADEON_SINGLE_CRTC) {
1162 /* VGA - primary dac */
1163 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1164 radeon_add_legacy_encoder(dev,
1165 radeon_get_encoder_id(dev,
1166 ATOM_DEVICE_CRT1_SUPPORT,
1167 1),
1168 ATOM_DEVICE_CRT1_SUPPORT);
1169 radeon_add_legacy_connector(dev, 0,
1170 ATOM_DEVICE_CRT1_SUPPORT,
1171 DRM_MODE_CONNECTOR_VGA,
1172 &ddc_i2c);
1173 } else if (rdev->flags & RADEON_IS_MOBILITY) {
1174 /* LVDS */
1175 ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK);
1176 radeon_add_legacy_encoder(dev,
1177 radeon_get_encoder_id(dev,
1178 ATOM_DEVICE_LCD1_SUPPORT,
1179 0),
1180 ATOM_DEVICE_LCD1_SUPPORT);
1181 radeon_add_legacy_connector(dev, 0,
1182 ATOM_DEVICE_LCD1_SUPPORT,
1183 DRM_MODE_CONNECTOR_LVDS,
1184 &ddc_i2c);
1185
1186 /* VGA - primary dac */
1187 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1188 radeon_add_legacy_encoder(dev,
1189 radeon_get_encoder_id(dev,
1190 ATOM_DEVICE_CRT1_SUPPORT,
1191 1),
1192 ATOM_DEVICE_CRT1_SUPPORT);
1193 radeon_add_legacy_connector(dev, 1,
1194 ATOM_DEVICE_CRT1_SUPPORT,
1195 DRM_MODE_CONNECTOR_VGA,
1196 &ddc_i2c);
1197 } else {
1198 /* DVI-I - tv dac, int tmds */
1199 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1200 radeon_add_legacy_encoder(dev,
1201 radeon_get_encoder_id(dev,
1202 ATOM_DEVICE_DFP1_SUPPORT,
1203 0),
1204 ATOM_DEVICE_DFP1_SUPPORT);
1205 radeon_add_legacy_encoder(dev,
1206 radeon_get_encoder_id(dev,
1207 ATOM_DEVICE_CRT2_SUPPORT,
1208 2),
1209 ATOM_DEVICE_CRT2_SUPPORT);
1210 radeon_add_legacy_connector(dev, 0,
1211 ATOM_DEVICE_DFP1_SUPPORT |
1212 ATOM_DEVICE_CRT2_SUPPORT,
1213 DRM_MODE_CONNECTOR_DVII,
1214 &ddc_i2c);
1215
1216 /* VGA - primary dac */
1217 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1218 radeon_add_legacy_encoder(dev,
1219 radeon_get_encoder_id(dev,
1220 ATOM_DEVICE_CRT1_SUPPORT,
1221 1),
1222 ATOM_DEVICE_CRT1_SUPPORT);
1223 radeon_add_legacy_connector(dev, 1,
1224 ATOM_DEVICE_CRT1_SUPPORT,
1225 DRM_MODE_CONNECTOR_VGA,
1226 &ddc_i2c);
1227 }
1228
1229 if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
1230 /* TV - tv dac */
1231 radeon_add_legacy_encoder(dev,
1232 radeon_get_encoder_id(dev,
1233 ATOM_DEVICE_TV1_SUPPORT,
1234 2),
1235 ATOM_DEVICE_TV1_SUPPORT);
1236 radeon_add_legacy_connector(dev, 2,
1237 ATOM_DEVICE_TV1_SUPPORT,
1238 DRM_MODE_CONNECTOR_SVIDEO,
1239 &ddc_i2c);
1240 }
1241 break;
1242 case CT_IBOOK:
1243 DRM_INFO("Connector Table: %d (ibook)\n",
1244 rdev->mode_info.connector_table);
1245 /* LVDS */
1246 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1247 radeon_add_legacy_encoder(dev,
1248 radeon_get_encoder_id(dev,
1249 ATOM_DEVICE_LCD1_SUPPORT,
1250 0),
1251 ATOM_DEVICE_LCD1_SUPPORT);
1252 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1253 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
1254 /* VGA - TV DAC */
1255 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1256 radeon_add_legacy_encoder(dev,
1257 radeon_get_encoder_id(dev,
1258 ATOM_DEVICE_CRT2_SUPPORT,
1259 2),
1260 ATOM_DEVICE_CRT2_SUPPORT);
1261 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1262 DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
1263 /* TV - TV DAC */
1264 radeon_add_legacy_encoder(dev,
1265 radeon_get_encoder_id(dev,
1266 ATOM_DEVICE_TV1_SUPPORT,
1267 2),
1268 ATOM_DEVICE_TV1_SUPPORT);
1269 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1270 DRM_MODE_CONNECTOR_SVIDEO,
1271 &ddc_i2c);
1272 break;
1273 case CT_POWERBOOK_EXTERNAL:
1274 DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
1275 rdev->mode_info.connector_table);
1276 /* LVDS */
1277 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1278 radeon_add_legacy_encoder(dev,
1279 radeon_get_encoder_id(dev,
1280 ATOM_DEVICE_LCD1_SUPPORT,
1281 0),
1282 ATOM_DEVICE_LCD1_SUPPORT);
1283 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1284 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
1285 /* DVI-I - primary dac, ext tmds */
1286 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1287 radeon_add_legacy_encoder(dev,
1288 radeon_get_encoder_id(dev,
1289 ATOM_DEVICE_DFP2_SUPPORT,
1290 0),
1291 ATOM_DEVICE_DFP2_SUPPORT);
1292 radeon_add_legacy_encoder(dev,
1293 radeon_get_encoder_id(dev,
1294 ATOM_DEVICE_CRT1_SUPPORT,
1295 1),
1296 ATOM_DEVICE_CRT1_SUPPORT);
1297 radeon_add_legacy_connector(dev, 1,
1298 ATOM_DEVICE_DFP2_SUPPORT |
1299 ATOM_DEVICE_CRT1_SUPPORT,
1300 DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
1301 /* TV - TV DAC */
1302 radeon_add_legacy_encoder(dev,
1303 radeon_get_encoder_id(dev,
1304 ATOM_DEVICE_TV1_SUPPORT,
1305 2),
1306 ATOM_DEVICE_TV1_SUPPORT);
1307 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1308 DRM_MODE_CONNECTOR_SVIDEO,
1309 &ddc_i2c);
1310 break;
1311 case CT_POWERBOOK_INTERNAL:
1312 DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
1313 rdev->mode_info.connector_table);
1314 /* LVDS */
1315 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1316 radeon_add_legacy_encoder(dev,
1317 radeon_get_encoder_id(dev,
1318 ATOM_DEVICE_LCD1_SUPPORT,
1319 0),
1320 ATOM_DEVICE_LCD1_SUPPORT);
1321 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1322 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
1323 /* DVI-I - primary dac, int tmds */
1324 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1325 radeon_add_legacy_encoder(dev,
1326 radeon_get_encoder_id(dev,
1327 ATOM_DEVICE_DFP1_SUPPORT,
1328 0),
1329 ATOM_DEVICE_DFP1_SUPPORT);
1330 radeon_add_legacy_encoder(dev,
1331 radeon_get_encoder_id(dev,
1332 ATOM_DEVICE_CRT1_SUPPORT,
1333 1),
1334 ATOM_DEVICE_CRT1_SUPPORT);
1335 radeon_add_legacy_connector(dev, 1,
1336 ATOM_DEVICE_DFP1_SUPPORT |
1337 ATOM_DEVICE_CRT1_SUPPORT,
1338 DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
1339 /* TV - TV DAC */
1340 radeon_add_legacy_encoder(dev,
1341 radeon_get_encoder_id(dev,
1342 ATOM_DEVICE_TV1_SUPPORT,
1343 2),
1344 ATOM_DEVICE_TV1_SUPPORT);
1345 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1346 DRM_MODE_CONNECTOR_SVIDEO,
1347 &ddc_i2c);
1348 break;
1349 case CT_POWERBOOK_VGA:
1350 DRM_INFO("Connector Table: %d (powerbook vga)\n",
1351 rdev->mode_info.connector_table);
1352 /* LVDS */
1353 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1354 radeon_add_legacy_encoder(dev,
1355 radeon_get_encoder_id(dev,
1356 ATOM_DEVICE_LCD1_SUPPORT,
1357 0),
1358 ATOM_DEVICE_LCD1_SUPPORT);
1359 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1360 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c);
1361 /* VGA - primary dac */
1362 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1363 radeon_add_legacy_encoder(dev,
1364 radeon_get_encoder_id(dev,
1365 ATOM_DEVICE_CRT1_SUPPORT,
1366 1),
1367 ATOM_DEVICE_CRT1_SUPPORT);
1368 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
1369 DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
1370 /* TV - TV DAC */
1371 radeon_add_legacy_encoder(dev,
1372 radeon_get_encoder_id(dev,
1373 ATOM_DEVICE_TV1_SUPPORT,
1374 2),
1375 ATOM_DEVICE_TV1_SUPPORT);
1376 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1377 DRM_MODE_CONNECTOR_SVIDEO,
1378 &ddc_i2c);
1379 break;
1380 case CT_MINI_EXTERNAL:
1381 DRM_INFO("Connector Table: %d (mini external tmds)\n",
1382 rdev->mode_info.connector_table);
1383 /* DVI-I - tv dac, ext tmds */
1384 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
1385 radeon_add_legacy_encoder(dev,
1386 radeon_get_encoder_id(dev,
1387 ATOM_DEVICE_DFP2_SUPPORT,
1388 0),
1389 ATOM_DEVICE_DFP2_SUPPORT);
1390 radeon_add_legacy_encoder(dev,
1391 radeon_get_encoder_id(dev,
1392 ATOM_DEVICE_CRT2_SUPPORT,
1393 2),
1394 ATOM_DEVICE_CRT2_SUPPORT);
1395 radeon_add_legacy_connector(dev, 0,
1396 ATOM_DEVICE_DFP2_SUPPORT |
1397 ATOM_DEVICE_CRT2_SUPPORT,
1398 DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
1399 /* TV - TV DAC */
1400 radeon_add_legacy_encoder(dev,
1401 radeon_get_encoder_id(dev,
1402 ATOM_DEVICE_TV1_SUPPORT,
1403 2),
1404 ATOM_DEVICE_TV1_SUPPORT);
1405 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
1406 DRM_MODE_CONNECTOR_SVIDEO,
1407 &ddc_i2c);
1408 break;
1409 case CT_MINI_INTERNAL:
1410 DRM_INFO("Connector Table: %d (mini internal tmds)\n",
1411 rdev->mode_info.connector_table);
1412 /* DVI-I - tv dac, int tmds */
1413 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
1414 radeon_add_legacy_encoder(dev,
1415 radeon_get_encoder_id(dev,
1416 ATOM_DEVICE_DFP1_SUPPORT,
1417 0),
1418 ATOM_DEVICE_DFP1_SUPPORT);
1419 radeon_add_legacy_encoder(dev,
1420 radeon_get_encoder_id(dev,
1421 ATOM_DEVICE_CRT2_SUPPORT,
1422 2),
1423 ATOM_DEVICE_CRT2_SUPPORT);
1424 radeon_add_legacy_connector(dev, 0,
1425 ATOM_DEVICE_DFP1_SUPPORT |
1426 ATOM_DEVICE_CRT2_SUPPORT,
1427 DRM_MODE_CONNECTOR_DVII, &ddc_i2c);
1428 /* TV - TV DAC */
1429 radeon_add_legacy_encoder(dev,
1430 radeon_get_encoder_id(dev,
1431 ATOM_DEVICE_TV1_SUPPORT,
1432 2),
1433 ATOM_DEVICE_TV1_SUPPORT);
1434 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
1435 DRM_MODE_CONNECTOR_SVIDEO,
1436 &ddc_i2c);
1437 break;
1438 case CT_IMAC_G5_ISIGHT:
1439 DRM_INFO("Connector Table: %d (imac g5 isight)\n",
1440 rdev->mode_info.connector_table);
1441 /* DVI-D - int tmds */
1442 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
1443 radeon_add_legacy_encoder(dev,
1444 radeon_get_encoder_id(dev,
1445 ATOM_DEVICE_DFP1_SUPPORT,
1446 0),
1447 ATOM_DEVICE_DFP1_SUPPORT);
1448 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
1449 DRM_MODE_CONNECTOR_DVID, &ddc_i2c);
1450 /* VGA - tv dac */
1451 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1452 radeon_add_legacy_encoder(dev,
1453 radeon_get_encoder_id(dev,
1454 ATOM_DEVICE_CRT2_SUPPORT,
1455 2),
1456 ATOM_DEVICE_CRT2_SUPPORT);
1457 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1458 DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
1459 /* TV - TV DAC */
1460 radeon_add_legacy_encoder(dev,
1461 radeon_get_encoder_id(dev,
1462 ATOM_DEVICE_TV1_SUPPORT,
1463 2),
1464 ATOM_DEVICE_TV1_SUPPORT);
1465 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1466 DRM_MODE_CONNECTOR_SVIDEO,
1467 &ddc_i2c);
1468 break;
1469 case CT_EMAC:
1470 DRM_INFO("Connector Table: %d (emac)\n",
1471 rdev->mode_info.connector_table);
1472 /* VGA - primary dac */
1473 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1474 radeon_add_legacy_encoder(dev,
1475 radeon_get_encoder_id(dev,
1476 ATOM_DEVICE_CRT1_SUPPORT,
1477 1),
1478 ATOM_DEVICE_CRT1_SUPPORT);
1479 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
1480 DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
1481 /* VGA - tv dac */
1482 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
1483 radeon_add_legacy_encoder(dev,
1484 radeon_get_encoder_id(dev,
1485 ATOM_DEVICE_CRT2_SUPPORT,
1486 2),
1487 ATOM_DEVICE_CRT2_SUPPORT);
1488 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1489 DRM_MODE_CONNECTOR_VGA, &ddc_i2c);
1490 /* TV - TV DAC */
1491 radeon_add_legacy_encoder(dev,
1492 radeon_get_encoder_id(dev,
1493 ATOM_DEVICE_TV1_SUPPORT,
1494 2),
1495 ATOM_DEVICE_TV1_SUPPORT);
1496 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1497 DRM_MODE_CONNECTOR_SVIDEO,
1498 &ddc_i2c);
1499 break;
1500 default:
1501 DRM_INFO("Connector table: %d (invalid)\n",
1502 rdev->mode_info.connector_table);
1503 return false;
1504 }
1505
1506 radeon_link_encoder_connector(dev);
1507
1508 return true;
1509}
1510
1511static bool radeon_apply_legacy_quirks(struct drm_device *dev,
1512 int bios_index,
1513 enum radeon_combios_connector
1514 *legacy_connector,
1515 struct radeon_i2c_bus_rec *ddc_i2c)
1516{
1517 struct radeon_device *rdev = dev->dev_private;
1518
1519 /* XPRESS DDC quirks */
1520 if ((rdev->family == CHIP_RS400 ||
1521 rdev->family == CHIP_RS480) &&
1522 ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
1523 *ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
1524 else if ((rdev->family == CHIP_RS400 ||
1525 rdev->family == CHIP_RS480) &&
1526 ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) {
1527 ddc_i2c->valid = true;
1528 ddc_i2c->mask_clk_mask = (0x20 << 8);
1529 ddc_i2c->mask_data_mask = 0x80;
1530 ddc_i2c->a_clk_mask = (0x20 << 8);
1531 ddc_i2c->a_data_mask = 0x80;
1532 ddc_i2c->put_clk_mask = (0x20 << 8);
1533 ddc_i2c->put_data_mask = 0x80;
1534 ddc_i2c->get_clk_mask = (0x20 << 8);
1535 ddc_i2c->get_data_mask = 0x80;
1536 ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK;
1537 ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK;
1538 ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A;
1539 ddc_i2c->a_data_reg = RADEON_GPIOPAD_A;
1540 ddc_i2c->put_clk_reg = RADEON_GPIOPAD_EN;
1541 ddc_i2c->put_data_reg = RADEON_GPIOPAD_EN;
1542 ddc_i2c->get_clk_reg = RADEON_LCD_GPIO_Y_REG;
1543 ddc_i2c->get_data_reg = RADEON_LCD_GPIO_Y_REG;
1544 }
1545
1546 /* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
1547 one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
1548 if (dev->pdev->device == 0x515e &&
1549 dev->pdev->subsystem_vendor == 0x1014) {
1550 if (*legacy_connector == CONNECTOR_CRT_LEGACY &&
1551 ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
1552 return false;
1553 }
1554
1555 /* Some RV100 cards with 2 VGA ports show up with DVI+VGA */
1556 if (dev->pdev->device == 0x5159 &&
1557 dev->pdev->subsystem_vendor == 0x1002 &&
1558 dev->pdev->subsystem_device == 0x013a) {
1559 if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
1560 *legacy_connector = CONNECTOR_CRT_LEGACY;
1561
1562 }
1563
1564 /* X300 card with extra non-existent DVI port */
1565 if (dev->pdev->device == 0x5B60 &&
1566 dev->pdev->subsystem_vendor == 0x17af &&
1567 dev->pdev->subsystem_device == 0x201e && bios_index == 2) {
1568 if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
1569 return false;
1570 }
1571
1572 return true;
1573}
1574
1575bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1576{
1577 struct radeon_device *rdev = dev->dev_private;
1578 uint32_t conn_info, entry, devices;
1579 uint16_t tmp;
1580 enum radeon_combios_ddc ddc_type;
1581 enum radeon_combios_connector connector;
1582 int i = 0;
1583 struct radeon_i2c_bus_rec ddc_i2c;
1584
1585 if (rdev->bios == NULL)
1586 return false;
1587
1588 conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE);
1589 if (conn_info) {
1590 for (i = 0; i < 4; i++) {
1591 entry = conn_info + 2 + i * 2;
1592
1593 if (!RBIOS16(entry))
1594 break;
1595
1596 tmp = RBIOS16(entry);
1597
1598 connector = (tmp >> 12) & 0xf;
1599
1600 ddc_type = (tmp >> 8) & 0xf;
1601 switch (ddc_type) {
1602 case DDC_MONID:
1603 ddc_i2c =
1604 combios_setup_i2c_bus(RADEON_GPIO_MONID);
1605 break;
1606 case DDC_DVI:
1607 ddc_i2c =
1608 combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1609 break;
1610 case DDC_VGA:
1611 ddc_i2c =
1612 combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
1613 break;
1614 case DDC_CRT2:
1615 ddc_i2c =
1616 combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
1617 break;
1618 default:
1619 break;
1620 }
1621
1622 radeon_apply_legacy_quirks(dev, i, &connector,
1623 &ddc_i2c);
1624
1625 switch (connector) {
1626 case CONNECTOR_PROPRIETARY_LEGACY:
1627 if ((tmp >> 4) & 0x1)
1628 devices = ATOM_DEVICE_DFP2_SUPPORT;
1629 else
1630 devices = ATOM_DEVICE_DFP1_SUPPORT;
1631 radeon_add_legacy_encoder(dev,
1632 radeon_get_encoder_id
1633 (dev, devices, 0),
1634 devices);
1635 radeon_add_legacy_connector(dev, i, devices,
1636 legacy_connector_convert
1637 [connector],
1638 &ddc_i2c);
1639 break;
1640 case CONNECTOR_CRT_LEGACY:
1641 if (tmp & 0x1) {
1642 devices = ATOM_DEVICE_CRT2_SUPPORT;
1643 radeon_add_legacy_encoder(dev,
1644 radeon_get_encoder_id
1645 (dev,
1646 ATOM_DEVICE_CRT2_SUPPORT,
1647 2),
1648 ATOM_DEVICE_CRT2_SUPPORT);
1649 } else {
1650 devices = ATOM_DEVICE_CRT1_SUPPORT;
1651 radeon_add_legacy_encoder(dev,
1652 radeon_get_encoder_id
1653 (dev,
1654 ATOM_DEVICE_CRT1_SUPPORT,
1655 1),
1656 ATOM_DEVICE_CRT1_SUPPORT);
1657 }
1658 radeon_add_legacy_connector(dev,
1659 i,
1660 devices,
1661 legacy_connector_convert
1662 [connector],
1663 &ddc_i2c);
1664 break;
1665 case CONNECTOR_DVI_I_LEGACY:
1666 devices = 0;
1667 if (tmp & 0x1) {
1668 devices |= ATOM_DEVICE_CRT2_SUPPORT;
1669 radeon_add_legacy_encoder(dev,
1670 radeon_get_encoder_id
1671 (dev,
1672 ATOM_DEVICE_CRT2_SUPPORT,
1673 2),
1674 ATOM_DEVICE_CRT2_SUPPORT);
1675 } else {
1676 devices |= ATOM_DEVICE_CRT1_SUPPORT;
1677 radeon_add_legacy_encoder(dev,
1678 radeon_get_encoder_id
1679 (dev,
1680 ATOM_DEVICE_CRT1_SUPPORT,
1681 1),
1682 ATOM_DEVICE_CRT1_SUPPORT);
1683 }
1684 if ((tmp >> 4) & 0x1) {
1685 devices |= ATOM_DEVICE_DFP2_SUPPORT;
1686 radeon_add_legacy_encoder(dev,
1687 radeon_get_encoder_id
1688 (dev,
1689 ATOM_DEVICE_DFP2_SUPPORT,
1690 0),
1691 ATOM_DEVICE_DFP2_SUPPORT);
1692 } else {
1693 devices |= ATOM_DEVICE_DFP1_SUPPORT;
1694 radeon_add_legacy_encoder(dev,
1695 radeon_get_encoder_id
1696 (dev,
1697 ATOM_DEVICE_DFP1_SUPPORT,
1698 0),
1699 ATOM_DEVICE_DFP1_SUPPORT);
1700 }
1701 radeon_add_legacy_connector(dev,
1702 i,
1703 devices,
1704 legacy_connector_convert
1705 [connector],
1706 &ddc_i2c);
1707 break;
1708 case CONNECTOR_DVI_D_LEGACY:
1709 if ((tmp >> 4) & 0x1)
1710 devices = ATOM_DEVICE_DFP2_SUPPORT;
1711 else
1712 devices = ATOM_DEVICE_DFP1_SUPPORT;
1713 radeon_add_legacy_encoder(dev,
1714 radeon_get_encoder_id
1715 (dev, devices, 0),
1716 devices);
1717 radeon_add_legacy_connector(dev, i, devices,
1718 legacy_connector_convert
1719 [connector],
1720 &ddc_i2c);
1721 break;
1722 case CONNECTOR_CTV_LEGACY:
1723 case CONNECTOR_STV_LEGACY:
1724 radeon_add_legacy_encoder(dev,
1725 radeon_get_encoder_id
1726 (dev,
1727 ATOM_DEVICE_TV1_SUPPORT,
1728 2),
1729 ATOM_DEVICE_TV1_SUPPORT);
1730 radeon_add_legacy_connector(dev, i,
1731 ATOM_DEVICE_TV1_SUPPORT,
1732 legacy_connector_convert
1733 [connector],
1734 &ddc_i2c);
1735 break;
1736 default:
1737 DRM_ERROR("Unknown connector type: %d\n",
1738 connector);
1739 continue;
1740 }
1741
1742 }
1743 } else {
1744 uint16_t tmds_info =
1745 combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
1746 if (tmds_info) {
1747 DRM_DEBUG("Found DFP table, assuming DVI connector\n");
1748
1749 radeon_add_legacy_encoder(dev,
1750 radeon_get_encoder_id(dev,
1751 ATOM_DEVICE_CRT1_SUPPORT,
1752 1),
1753 ATOM_DEVICE_CRT1_SUPPORT);
1754 radeon_add_legacy_encoder(dev,
1755 radeon_get_encoder_id(dev,
1756 ATOM_DEVICE_DFP1_SUPPORT,
1757 0),
1758 ATOM_DEVICE_DFP1_SUPPORT);
1759
1760 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
1761 radeon_add_legacy_connector(dev,
1762 0,
1763 ATOM_DEVICE_CRT1_SUPPORT |
1764 ATOM_DEVICE_DFP1_SUPPORT,
1765 DRM_MODE_CONNECTOR_DVII,
1766 &ddc_i2c);
1767 } else {
1768 DRM_DEBUG("No connector info found\n");
1769 return false;
1770 }
1771 }
1772
1773 if (rdev->flags & RADEON_IS_MOBILITY || rdev->flags & RADEON_IS_IGP) {
1774 uint16_t lcd_info =
1775 combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
1776 if (lcd_info) {
1777 uint16_t lcd_ddc_info =
1778 combios_get_table_offset(dev,
1779 COMBIOS_LCD_DDC_INFO_TABLE);
1780
1781 radeon_add_legacy_encoder(dev,
1782 radeon_get_encoder_id(dev,
1783 ATOM_DEVICE_LCD1_SUPPORT,
1784 0),
1785 ATOM_DEVICE_LCD1_SUPPORT);
1786
1787 if (lcd_ddc_info) {
1788 ddc_type = RBIOS8(lcd_ddc_info + 2);
1789 switch (ddc_type) {
1790 case DDC_MONID:
1791 ddc_i2c =
1792 combios_setup_i2c_bus
1793 (RADEON_GPIO_MONID);
1794 break;
1795 case DDC_DVI:
1796 ddc_i2c =
1797 combios_setup_i2c_bus
1798 (RADEON_GPIO_DVI_DDC);
1799 break;
1800 case DDC_VGA:
1801 ddc_i2c =
1802 combios_setup_i2c_bus
1803 (RADEON_GPIO_VGA_DDC);
1804 break;
1805 case DDC_CRT2:
1806 ddc_i2c =
1807 combios_setup_i2c_bus
1808 (RADEON_GPIO_CRT2_DDC);
1809 break;
1810 case DDC_LCD:
1811 ddc_i2c =
1812 combios_setup_i2c_bus
1813 (RADEON_LCD_GPIO_MASK);
1814 ddc_i2c.mask_clk_mask =
1815 RBIOS32(lcd_ddc_info + 3);
1816 ddc_i2c.mask_data_mask =
1817 RBIOS32(lcd_ddc_info + 7);
1818 ddc_i2c.a_clk_mask =
1819 RBIOS32(lcd_ddc_info + 3);
1820 ddc_i2c.a_data_mask =
1821 RBIOS32(lcd_ddc_info + 7);
1822 ddc_i2c.put_clk_mask =
1823 RBIOS32(lcd_ddc_info + 3);
1824 ddc_i2c.put_data_mask =
1825 RBIOS32(lcd_ddc_info + 7);
1826 ddc_i2c.get_clk_mask =
1827 RBIOS32(lcd_ddc_info + 3);
1828 ddc_i2c.get_data_mask =
1829 RBIOS32(lcd_ddc_info + 7);
1830 break;
1831 case DDC_GPIO:
1832 ddc_i2c =
1833 combios_setup_i2c_bus
1834 (RADEON_MDGPIO_EN_REG);
1835 ddc_i2c.mask_clk_mask =
1836 RBIOS32(lcd_ddc_info + 3);
1837 ddc_i2c.mask_data_mask =
1838 RBIOS32(lcd_ddc_info + 7);
1839 ddc_i2c.a_clk_mask =
1840 RBIOS32(lcd_ddc_info + 3);
1841 ddc_i2c.a_data_mask =
1842 RBIOS32(lcd_ddc_info + 7);
1843 ddc_i2c.put_clk_mask =
1844 RBIOS32(lcd_ddc_info + 3);
1845 ddc_i2c.put_data_mask =
1846 RBIOS32(lcd_ddc_info + 7);
1847 ddc_i2c.get_clk_mask =
1848 RBIOS32(lcd_ddc_info + 3);
1849 ddc_i2c.get_data_mask =
1850 RBIOS32(lcd_ddc_info + 7);
1851 break;
1852 default:
1853 ddc_i2c.valid = false;
1854 break;
1855 }
1856 DRM_DEBUG("LCD DDC Info Table found!\n");
1857 } else
1858 ddc_i2c.valid = false;
1859
1860 radeon_add_legacy_connector(dev,
1861 5,
1862 ATOM_DEVICE_LCD1_SUPPORT,
1863 DRM_MODE_CONNECTOR_LVDS,
1864 &ddc_i2c);
1865 }
1866 }
1867
1868 /* check TV table */
1869 if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
1870 uint32_t tv_info =
1871 combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
1872 if (tv_info) {
1873 if (RBIOS8(tv_info + 6) == 'T') {
1874 radeon_add_legacy_encoder(dev,
1875 radeon_get_encoder_id
1876 (dev,
1877 ATOM_DEVICE_TV1_SUPPORT,
1878 2),
1879 ATOM_DEVICE_TV1_SUPPORT);
1880 radeon_add_legacy_connector(dev, 6,
1881 ATOM_DEVICE_TV1_SUPPORT,
1882 DRM_MODE_CONNECTOR_SVIDEO,
1883 &ddc_i2c);
1884 }
1885 }
1886 }
1887
1888 radeon_link_encoder_connector(dev);
1889
1890 return true;
1891}
1892
1893static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
1894{
1895 struct radeon_device *rdev = dev->dev_private;
1896
1897 if (offset) {
1898 while (RBIOS16(offset)) {
1899 uint16_t cmd = ((RBIOS16(offset) & 0xe000) >> 13);
1900 uint32_t addr = (RBIOS16(offset) & 0x1fff);
1901 uint32_t val, and_mask, or_mask;
1902 uint32_t tmp;
1903
1904 offset += 2;
1905 switch (cmd) {
1906 case 0:
1907 val = RBIOS32(offset);
1908 offset += 4;
1909 WREG32(addr, val);
1910 break;
1911 case 1:
1912 val = RBIOS32(offset);
1913 offset += 4;
1914 WREG32(addr, val);
1915 break;
1916 case 2:
1917 and_mask = RBIOS32(offset);
1918 offset += 4;
1919 or_mask = RBIOS32(offset);
1920 offset += 4;
1921 tmp = RREG32(addr);
1922 tmp &= and_mask;
1923 tmp |= or_mask;
1924 WREG32(addr, tmp);
1925 break;
1926 case 3:
1927 and_mask = RBIOS32(offset);
1928 offset += 4;
1929 or_mask = RBIOS32(offset);
1930 offset += 4;
1931 tmp = RREG32(addr);
1932 tmp &= and_mask;
1933 tmp |= or_mask;
1934 WREG32(addr, tmp);
1935 break;
1936 case 4:
1937 val = RBIOS16(offset);
1938 offset += 2;
1939 udelay(val);
1940 break;
1941 case 5:
1942 val = RBIOS16(offset);
1943 offset += 2;
1944 switch (addr) {
1945 case 8:
1946 while (val--) {
1947 if (!
1948 (RREG32_PLL
1949 (RADEON_CLK_PWRMGT_CNTL) &
1950 RADEON_MC_BUSY))
1951 break;
1952 }
1953 break;
1954 case 9:
1955 while (val--) {
1956 if ((RREG32(RADEON_MC_STATUS) &
1957 RADEON_MC_IDLE))
1958 break;
1959 }
1960 break;
1961 default:
1962 break;
1963 }
1964 break;
1965 default:
1966 break;
1967 }
1968 }
1969 }
1970}
1971
1972static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
1973{
1974 struct radeon_device *rdev = dev->dev_private;
1975
1976 if (offset) {
1977 while (RBIOS8(offset)) {
1978 uint8_t cmd = ((RBIOS8(offset) & 0xc0) >> 6);
1979 uint8_t addr = (RBIOS8(offset) & 0x3f);
1980 uint32_t val, shift, tmp;
1981 uint32_t and_mask, or_mask;
1982
1983 offset++;
1984 switch (cmd) {
1985 case 0:
1986 val = RBIOS32(offset);
1987 offset += 4;
1988 WREG32_PLL(addr, val);
1989 break;
1990 case 1:
1991 shift = RBIOS8(offset) * 8;
1992 offset++;
1993 and_mask = RBIOS8(offset) << shift;
1994 and_mask |= ~(0xff << shift);
1995 offset++;
1996 or_mask = RBIOS8(offset) << shift;
1997 offset++;
1998 tmp = RREG32_PLL(addr);
1999 tmp &= and_mask;
2000 tmp |= or_mask;
2001 WREG32_PLL(addr, tmp);
2002 break;
2003 case 2:
2004 case 3:
2005 tmp = 1000;
2006 switch (addr) {
2007 case 1:
2008 udelay(150);
2009 break;
2010 case 2:
2011 udelay(1000);
2012 break;
2013 case 3:
2014 while (tmp--) {
2015 if (!
2016 (RREG32_PLL
2017 (RADEON_CLK_PWRMGT_CNTL) &
2018 RADEON_MC_BUSY))
2019 break;
2020 }
2021 break;
2022 case 4:
2023 while (tmp--) {
2024 if (RREG32_PLL
2025 (RADEON_CLK_PWRMGT_CNTL) &
2026 RADEON_DLL_READY)
2027 break;
2028 }
2029 break;
2030 case 5:
2031 tmp =
2032 RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
2033 if (tmp & RADEON_CG_NO1_DEBUG_0) {
2034#if 0
2035 uint32_t mclk_cntl =
2036 RREG32_PLL
2037 (RADEON_MCLK_CNTL);
2038 mclk_cntl &= 0xffff0000;
2039 /*mclk_cntl |= 0x00001111;*//* ??? */
2040 WREG32_PLL(RADEON_MCLK_CNTL,
2041 mclk_cntl);
2042 udelay(10000);
2043#endif
2044 WREG32_PLL
2045 (RADEON_CLK_PWRMGT_CNTL,
2046 tmp &
2047 ~RADEON_CG_NO1_DEBUG_0);
2048 udelay(10000);
2049 }
2050 break;
2051 default:
2052 break;
2053 }
2054 break;
2055 default:
2056 break;
2057 }
2058 }
2059 }
2060}
2061
2062static void combios_parse_ram_reset_table(struct drm_device *dev,
2063 uint16_t offset)
2064{
2065 struct radeon_device *rdev = dev->dev_private;
2066 uint32_t tmp;
2067
2068 if (offset) {
2069 uint8_t val = RBIOS8(offset);
2070 while (val != 0xff) {
2071 offset++;
2072
2073 if (val == 0x0f) {
2074 uint32_t channel_complete_mask;
2075
2076 if (ASIC_IS_R300(rdev))
2077 channel_complete_mask =
2078 R300_MEM_PWRUP_COMPLETE;
2079 else
2080 channel_complete_mask =
2081 RADEON_MEM_PWRUP_COMPLETE;
2082 tmp = 20000;
2083 while (tmp--) {
2084 if ((RREG32(RADEON_MEM_STR_CNTL) &
2085 channel_complete_mask) ==
2086 channel_complete_mask)
2087 break;
2088 }
2089 } else {
2090 uint32_t or_mask = RBIOS16(offset);
2091 offset += 2;
2092
2093 tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
2094 tmp &= RADEON_SDRAM_MODE_MASK;
2095 tmp |= or_mask;
2096 WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
2097
2098 or_mask = val << 24;
2099 tmp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
2100 tmp &= RADEON_B3MEM_RESET_MASK;
2101 tmp |= or_mask;
2102 WREG32(RADEON_MEM_SDRAM_MODE_REG, tmp);
2103 }
2104 val = RBIOS8(offset);
2105 }
2106 }
2107}
2108
2109static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
2110 int mem_addr_mapping)
2111{
2112 struct radeon_device *rdev = dev->dev_private;
2113 uint32_t mem_cntl;
2114 uint32_t mem_size;
2115 uint32_t addr = 0;
2116
2117 mem_cntl = RREG32(RADEON_MEM_CNTL);
2118 if (mem_cntl & RV100_HALF_MODE)
2119 ram /= 2;
2120 mem_size = ram;
2121 mem_cntl &= ~(0xff << 8);
2122 mem_cntl |= (mem_addr_mapping & 0xff) << 8;
2123 WREG32(RADEON_MEM_CNTL, mem_cntl);
2124 RREG32(RADEON_MEM_CNTL);
2125
2126 /* sdram reset ? */
2127
2128 /* something like this???? */
2129 while (ram--) {
2130 addr = ram * 1024 * 1024;
2131 /* write to each page */
2132 WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
2133 WREG32(RADEON_MM_DATA, 0xdeadbeef);
2134 /* read back and verify */
2135 WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
2136 if (RREG32(RADEON_MM_DATA) != 0xdeadbeef)
2137 return 0;
2138 }
2139
2140 return mem_size;
2141}
2142
2143static void combios_write_ram_size(struct drm_device *dev)
2144{
2145 struct radeon_device *rdev = dev->dev_private;
2146 uint8_t rev;
2147 uint16_t offset;
2148 uint32_t mem_size = 0;
2149 uint32_t mem_cntl = 0;
2150
2151 /* should do something smarter here I guess... */
2152 if (rdev->flags & RADEON_IS_IGP)
2153 return;
2154
2155 /* first check detected mem table */
2156 offset = combios_get_table_offset(dev, COMBIOS_DETECTED_MEM_TABLE);
2157 if (offset) {
2158 rev = RBIOS8(offset);
2159 if (rev < 3) {
2160 mem_cntl = RBIOS32(offset + 1);
2161 mem_size = RBIOS16(offset + 5);
2162 if (((rdev->flags & RADEON_FAMILY_MASK) < CHIP_R200) &&
2163 ((dev->pdev->device != 0x515e)
2164 && (dev->pdev->device != 0x5969)))
2165 WREG32(RADEON_MEM_CNTL, mem_cntl);
2166 }
2167 }
2168
2169 if (!mem_size) {
2170 offset =
2171 combios_get_table_offset(dev, COMBIOS_MEM_CONFIG_TABLE);
2172 if (offset) {
2173 rev = RBIOS8(offset - 1);
2174 if (rev < 1) {
2175 if (((rdev->flags & RADEON_FAMILY_MASK) <
2176 CHIP_R200)
2177 && ((dev->pdev->device != 0x515e)
2178 && (dev->pdev->device != 0x5969))) {
2179 int ram = 0;
2180 int mem_addr_mapping = 0;
2181
2182 while (RBIOS8(offset)) {
2183 ram = RBIOS8(offset);
2184 mem_addr_mapping =
2185 RBIOS8(offset + 1);
2186 if (mem_addr_mapping != 0x25)
2187 ram *= 2;
2188 mem_size =
2189 combios_detect_ram(dev, ram,
2190 mem_addr_mapping);
2191 if (mem_size)
2192 break;
2193 offset += 2;
2194 }
2195 } else
2196 mem_size = RBIOS8(offset);
2197 } else {
2198 mem_size = RBIOS8(offset);
2199 mem_size *= 2; /* convert to MB */
2200 }
2201 }
2202 }
2203
2204 mem_size *= (1024 * 1024); /* convert to bytes */
2205 WREG32(RADEON_CONFIG_MEMSIZE, mem_size);
2206}
2207
2208void radeon_combios_dyn_clk_setup(struct drm_device *dev, int enable)
2209{
2210 uint16_t dyn_clk_info =
2211 combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
2212
2213 if (dyn_clk_info)
2214 combios_parse_pll_table(dev, dyn_clk_info);
2215}
2216
2217void radeon_combios_asic_init(struct drm_device *dev)
2218{
2219 struct radeon_device *rdev = dev->dev_private;
2220 uint16_t table;
2221
2222 /* port hardcoded mac stuff from radeonfb */
2223 if (rdev->bios == NULL)
2224 return;
2225
2226 /* ASIC INIT 1 */
2227 table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_1_TABLE);
2228 if (table)
2229 combios_parse_mmio_table(dev, table);
2230
2231 /* PLL INIT */
2232 table = combios_get_table_offset(dev, COMBIOS_PLL_INIT_TABLE);
2233 if (table)
2234 combios_parse_pll_table(dev, table);
2235
2236 /* ASIC INIT 2 */
2237 table = combios_get_table_offset(dev, COMBIOS_ASIC_INIT_2_TABLE);
2238 if (table)
2239 combios_parse_mmio_table(dev, table);
2240
2241 if (!(rdev->flags & RADEON_IS_IGP)) {
2242 /* ASIC INIT 4 */
2243 table =
2244 combios_get_table_offset(dev, COMBIOS_ASIC_INIT_4_TABLE);
2245 if (table)
2246 combios_parse_mmio_table(dev, table);
2247
2248 /* RAM RESET */
2249 table = combios_get_table_offset(dev, COMBIOS_RAM_RESET_TABLE);
2250 if (table)
2251 combios_parse_ram_reset_table(dev, table);
2252
2253 /* ASIC INIT 3 */
2254 table =
2255 combios_get_table_offset(dev, COMBIOS_ASIC_INIT_3_TABLE);
2256 if (table)
2257 combios_parse_mmio_table(dev, table);
2258
2259 /* write CONFIG_MEMSIZE */
2260 combios_write_ram_size(dev);
2261 }
2262
2263 /* DYN CLK 1 */
2264 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
2265 if (table)
2266 combios_parse_pll_table(dev, table);
2267
2268}
2269
2270void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev)
2271{
2272 struct radeon_device *rdev = dev->dev_private;
2273 uint32_t bios_0_scratch, bios_6_scratch, bios_7_scratch;
2274
2275 bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
2276 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
2277 bios_7_scratch = RREG32(RADEON_BIOS_7_SCRATCH);
2278
2279 /* let the bios control the backlight */
2280 bios_0_scratch &= ~RADEON_DRIVER_BRIGHTNESS_EN;
2281
2282 /* tell the bios not to handle mode switching */
2283 bios_6_scratch |= (RADEON_DISPLAY_SWITCHING_DIS |
2284 RADEON_ACC_MODE_CHANGE);
2285
2286 /* tell the bios a driver is loaded */
2287 bios_7_scratch |= RADEON_DRV_LOADED;
2288
2289 WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch);
2290 WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
2291 WREG32(RADEON_BIOS_7_SCRATCH, bios_7_scratch);
2292}
2293
2294void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock)
2295{
2296 struct drm_device *dev = encoder->dev;
2297 struct radeon_device *rdev = dev->dev_private;
2298 uint32_t bios_6_scratch;
2299
2300 bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
2301
2302 if (lock)
2303 bios_6_scratch |= RADEON_DRIVER_CRITICAL;
2304 else
2305 bios_6_scratch &= ~RADEON_DRIVER_CRITICAL;
2306
2307 WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
2308}
2309
2310void
2311radeon_combios_connected_scratch_regs(struct drm_connector *connector,
2312 struct drm_encoder *encoder,
2313 bool connected)
2314{
2315 struct drm_device *dev = connector->dev;
2316 struct radeon_device *rdev = dev->dev_private;
2317 struct radeon_connector *radeon_connector =
2318 to_radeon_connector(connector);
2319 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2320 uint32_t bios_4_scratch = RREG32(RADEON_BIOS_4_SCRATCH);
2321 uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
2322
2323 if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
2324 (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
2325 if (connected) {
2326 DRM_DEBUG("TV1 connected\n");
2327 /* fix me */
2328 bios_4_scratch |= RADEON_TV1_ATTACHED_SVIDEO;
2329 /*save->bios_4_scratch |= RADEON_TV1_ATTACHED_COMP; */
2330 bios_5_scratch |= RADEON_TV1_ON;
2331 bios_5_scratch |= RADEON_ACC_REQ_TV1;
2332 } else {
2333 DRM_DEBUG("TV1 disconnected\n");
2334 bios_4_scratch &= ~RADEON_TV1_ATTACHED_MASK;
2335 bios_5_scratch &= ~RADEON_TV1_ON;
2336 bios_5_scratch &= ~RADEON_ACC_REQ_TV1;
2337 }
2338 }
2339 if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
2340 (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
2341 if (connected) {
2342 DRM_DEBUG("LCD1 connected\n");
2343 bios_4_scratch |= RADEON_LCD1_ATTACHED;
2344 bios_5_scratch |= RADEON_LCD1_ON;
2345 bios_5_scratch |= RADEON_ACC_REQ_LCD1;
2346 } else {
2347 DRM_DEBUG("LCD1 disconnected\n");
2348 bios_4_scratch &= ~RADEON_LCD1_ATTACHED;
2349 bios_5_scratch &= ~RADEON_LCD1_ON;
2350 bios_5_scratch &= ~RADEON_ACC_REQ_LCD1;
2351 }
2352 }
2353 if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
2354 (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
2355 if (connected) {
2356 DRM_DEBUG("CRT1 connected\n");
2357 bios_4_scratch |= RADEON_CRT1_ATTACHED_COLOR;
2358 bios_5_scratch |= RADEON_CRT1_ON;
2359 bios_5_scratch |= RADEON_ACC_REQ_CRT1;
2360 } else {
2361 DRM_DEBUG("CRT1 disconnected\n");
2362 bios_4_scratch &= ~RADEON_CRT1_ATTACHED_MASK;
2363 bios_5_scratch &= ~RADEON_CRT1_ON;
2364 bios_5_scratch &= ~RADEON_ACC_REQ_CRT1;
2365 }
2366 }
2367 if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
2368 (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
2369 if (connected) {
2370 DRM_DEBUG("CRT2 connected\n");
2371 bios_4_scratch |= RADEON_CRT2_ATTACHED_COLOR;
2372 bios_5_scratch |= RADEON_CRT2_ON;
2373 bios_5_scratch |= RADEON_ACC_REQ_CRT2;
2374 } else {
2375 DRM_DEBUG("CRT2 disconnected\n");
2376 bios_4_scratch &= ~RADEON_CRT2_ATTACHED_MASK;
2377 bios_5_scratch &= ~RADEON_CRT2_ON;
2378 bios_5_scratch &= ~RADEON_ACC_REQ_CRT2;
2379 }
2380 }
2381 if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
2382 (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
2383 if (connected) {
2384 DRM_DEBUG("DFP1 connected\n");
2385 bios_4_scratch |= RADEON_DFP1_ATTACHED;
2386 bios_5_scratch |= RADEON_DFP1_ON;
2387 bios_5_scratch |= RADEON_ACC_REQ_DFP1;
2388 } else {
2389 DRM_DEBUG("DFP1 disconnected\n");
2390 bios_4_scratch &= ~RADEON_DFP1_ATTACHED;
2391 bios_5_scratch &= ~RADEON_DFP1_ON;
2392 bios_5_scratch &= ~RADEON_ACC_REQ_DFP1;
2393 }
2394 }
2395 if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
2396 (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
2397 if (connected) {
2398 DRM_DEBUG("DFP2 connected\n");
2399 bios_4_scratch |= RADEON_DFP2_ATTACHED;
2400 bios_5_scratch |= RADEON_DFP2_ON;
2401 bios_5_scratch |= RADEON_ACC_REQ_DFP2;
2402 } else {
2403 DRM_DEBUG("DFP2 disconnected\n");
2404 bios_4_scratch &= ~RADEON_DFP2_ATTACHED;
2405 bios_5_scratch &= ~RADEON_DFP2_ON;
2406 bios_5_scratch &= ~RADEON_ACC_REQ_DFP2;
2407 }
2408 }
2409 WREG32(RADEON_BIOS_4_SCRATCH, bios_4_scratch);
2410 WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
2411}
2412
2413void
2414radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
2415{
2416 struct drm_device *dev = encoder->dev;
2417 struct radeon_device *rdev = dev->dev_private;
2418 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2419 uint32_t bios_5_scratch = RREG32(RADEON_BIOS_5_SCRATCH);
2420
2421 if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
2422 bios_5_scratch &= ~RADEON_TV1_CRTC_MASK;
2423 bios_5_scratch |= (crtc << RADEON_TV1_CRTC_SHIFT);
2424 }
2425 if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
2426 bios_5_scratch &= ~RADEON_CRT1_CRTC_MASK;
2427 bios_5_scratch |= (crtc << RADEON_CRT1_CRTC_SHIFT);
2428 }
2429 if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
2430 bios_5_scratch &= ~RADEON_CRT2_CRTC_MASK;
2431 bios_5_scratch |= (crtc << RADEON_CRT2_CRTC_SHIFT);
2432 }
2433 if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
2434 bios_5_scratch &= ~RADEON_LCD1_CRTC_MASK;
2435 bios_5_scratch |= (crtc << RADEON_LCD1_CRTC_SHIFT);
2436 }
2437 if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) {
2438 bios_5_scratch &= ~RADEON_DFP1_CRTC_MASK;
2439 bios_5_scratch |= (crtc << RADEON_DFP1_CRTC_SHIFT);
2440 }
2441 if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) {
2442 bios_5_scratch &= ~RADEON_DFP2_CRTC_MASK;
2443 bios_5_scratch |= (crtc << RADEON_DFP2_CRTC_SHIFT);
2444 }
2445 WREG32(RADEON_BIOS_5_SCRATCH, bios_5_scratch);
2446}
2447
2448void
2449radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
2450{
2451 struct drm_device *dev = encoder->dev;
2452 struct radeon_device *rdev = dev->dev_private;
2453 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2454 uint32_t bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
2455
2456 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
2457 if (on)
2458 bios_6_scratch |= RADEON_TV_DPMS_ON;
2459 else
2460 bios_6_scratch &= ~RADEON_TV_DPMS_ON;
2461 }
2462 if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
2463 if (on)
2464 bios_6_scratch |= RADEON_CRT_DPMS_ON;
2465 else
2466 bios_6_scratch &= ~RADEON_CRT_DPMS_ON;
2467 }
2468 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2469 if (on)
2470 bios_6_scratch |= RADEON_LCD_DPMS_ON;
2471 else
2472 bios_6_scratch &= ~RADEON_LCD_DPMS_ON;
2473 }
2474 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
2475 if (on)
2476 bios_6_scratch |= RADEON_DFP_DPMS_ON;
2477 else
2478 bios_6_scratch &= ~RADEON_DFP_DPMS_ON;
2479 }
2480 WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch);
2481}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
new file mode 100644
index 000000000000..70ede6a52d4e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -0,0 +1,603 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "drm_edid.h"
28#include "drm_crtc_helper.h"
29#include "radeon_drm.h"
30#include "radeon.h"
31
32extern void
33radeon_combios_connected_scratch_regs(struct drm_connector *connector,
34 struct drm_encoder *encoder,
35 bool connected);
36extern void
37radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
38 struct drm_encoder *encoder,
39 bool connected);
40
41static void
42radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
43{
44 struct drm_device *dev = connector->dev;
45 struct radeon_device *rdev = dev->dev_private;
46 struct drm_encoder *best_encoder = NULL;
47 struct drm_encoder *encoder = NULL;
48 struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
49 struct drm_mode_object *obj;
50 bool connected;
51 int i;
52
53 best_encoder = connector_funcs->best_encoder(connector);
54
55 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
56 if (connector->encoder_ids[i] == 0)
57 break;
58
59 obj = drm_mode_object_find(connector->dev,
60 connector->encoder_ids[i],
61 DRM_MODE_OBJECT_ENCODER);
62 if (!obj)
63 continue;
64
65 encoder = obj_to_encoder(obj);
66
67 if ((encoder == best_encoder) && (status == connector_status_connected))
68 connected = true;
69 else
70 connected = false;
71
72 if (rdev->is_atom_bios)
73 radeon_atombios_connected_scratch_regs(connector, encoder, connected);
74 else
75 radeon_combios_connected_scratch_regs(connector, encoder, connected);
76
77 }
78}
79
80struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
81{
82 int enc_id = connector->encoder_ids[0];
83 struct drm_mode_object *obj;
84 struct drm_encoder *encoder;
85
86 /* pick the encoder ids */
87 if (enc_id) {
88 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
89 if (!obj)
90 return NULL;
91 encoder = obj_to_encoder(obj);
92 return encoder;
93 }
94 return NULL;
95}
96
97static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encoder)
98{
99 struct drm_device *dev = encoder->dev;
100 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
101 struct drm_display_mode *mode = NULL;
102 struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
103
104 if (native_mode->panel_xres != 0 &&
105 native_mode->panel_yres != 0 &&
106 native_mode->dotclock != 0) {
107 mode = drm_mode_create(dev);
108
109 mode->hdisplay = native_mode->panel_xres;
110 mode->vdisplay = native_mode->panel_yres;
111
112 mode->htotal = mode->hdisplay + native_mode->hblank;
113 mode->hsync_start = mode->hdisplay + native_mode->hoverplus;
114 mode->hsync_end = mode->hsync_start + native_mode->hsync_width;
115 mode->vtotal = mode->vdisplay + native_mode->vblank;
116 mode->vsync_start = mode->vdisplay + native_mode->voverplus;
117 mode->vsync_end = mode->vsync_start + native_mode->vsync_width;
118 mode->clock = native_mode->dotclock;
119 mode->flags = 0;
120
121 mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
122 drm_mode_set_name(mode);
123
124 DRM_DEBUG("Adding native panel mode %s\n", mode->name);
125 }
126 return mode;
127}
128
129int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
130 uint64_t val)
131{
132 return 0;
133}
134
135
136static int radeon_lvds_get_modes(struct drm_connector *connector)
137{
138 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
139 struct drm_encoder *encoder;
140 int ret = 0;
141 struct drm_display_mode *mode;
142
143 if (radeon_connector->ddc_bus) {
144 ret = radeon_ddc_get_modes(radeon_connector);
145 if (ret > 0) {
146 return ret;
147 }
148 }
149
150 encoder = radeon_best_single_encoder(connector);
151 if (!encoder)
152 return 0;
153
154 /* we have no EDID modes */
155 mode = radeon_fp_native_mode(encoder);
156 if (mode) {
157 ret = 1;
158 drm_mode_probed_add(connector, mode);
159 }
160 return ret;
161}
162
163static int radeon_lvds_mode_valid(struct drm_connector *connector,
164 struct drm_display_mode *mode)
165{
166 return MODE_OK;
167}
168
169static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector)
170{
171 enum drm_connector_status ret = connector_status_connected;
172 /* check acpi lid status ??? */
173 radeon_connector_update_scratch_regs(connector, ret);
174 return ret;
175}
176
177static void radeon_connector_destroy(struct drm_connector *connector)
178{
179 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
180
181 if (radeon_connector->ddc_bus)
182 radeon_i2c_destroy(radeon_connector->ddc_bus);
183 kfree(radeon_connector->con_priv);
184 drm_sysfs_connector_remove(connector);
185 drm_connector_cleanup(connector);
186 kfree(connector);
187}
188
189struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
190 .get_modes = radeon_lvds_get_modes,
191 .mode_valid = radeon_lvds_mode_valid,
192 .best_encoder = radeon_best_single_encoder,
193};
194
195struct drm_connector_funcs radeon_lvds_connector_funcs = {
196 .dpms = drm_helper_connector_dpms,
197 .detect = radeon_lvds_detect,
198 .fill_modes = drm_helper_probe_single_connector_modes,
199 .destroy = radeon_connector_destroy,
200 .set_property = radeon_connector_set_property,
201};
202
203static int radeon_vga_get_modes(struct drm_connector *connector)
204{
205 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
206 int ret;
207
208 ret = radeon_ddc_get_modes(radeon_connector);
209
210 return ret;
211}
212
213static int radeon_vga_mode_valid(struct drm_connector *connector,
214 struct drm_display_mode *mode)
215{
216
217 return MODE_OK;
218}
219
220static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector)
221{
222 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
223 struct drm_encoder *encoder;
224 struct drm_encoder_helper_funcs *encoder_funcs;
225 bool dret;
226 enum drm_connector_status ret = connector_status_disconnected;
227
228 radeon_i2c_do_lock(radeon_connector, 1);
229 dret = radeon_ddc_probe(radeon_connector);
230 radeon_i2c_do_lock(radeon_connector, 0);
231 if (dret)
232 ret = connector_status_connected;
233 else {
234 /* if EDID fails to a load detect */
235 encoder = radeon_best_single_encoder(connector);
236 if (!encoder)
237 ret = connector_status_disconnected;
238 else {
239 encoder_funcs = encoder->helper_private;
240 ret = encoder_funcs->detect(encoder, connector);
241 }
242 }
243
244 radeon_connector_update_scratch_regs(connector, ret);
245 return ret;
246}
247
248struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
249 .get_modes = radeon_vga_get_modes,
250 .mode_valid = radeon_vga_mode_valid,
251 .best_encoder = radeon_best_single_encoder,
252};
253
254struct drm_connector_funcs radeon_vga_connector_funcs = {
255 .dpms = drm_helper_connector_dpms,
256 .detect = radeon_vga_detect,
257 .fill_modes = drm_helper_probe_single_connector_modes,
258 .destroy = radeon_connector_destroy,
259 .set_property = radeon_connector_set_property,
260};
261
262static int radeon_dvi_get_modes(struct drm_connector *connector)
263{
264 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
265 int ret;
266
267 ret = radeon_ddc_get_modes(radeon_connector);
268 /* reset scratch regs here since radeon_dvi_detect doesn't check digital bit */
269 radeon_connector_update_scratch_regs(connector, connector_status_connected);
270 return ret;
271}
272
273static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector)
274{
275 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
276 struct drm_encoder *encoder;
277 struct drm_encoder_helper_funcs *encoder_funcs;
278 struct drm_mode_object *obj;
279 int i;
280 enum drm_connector_status ret = connector_status_disconnected;
281 bool dret;
282
283 radeon_i2c_do_lock(radeon_connector, 1);
284 dret = radeon_ddc_probe(radeon_connector);
285 radeon_i2c_do_lock(radeon_connector, 0);
286 if (dret)
287 ret = connector_status_connected;
288 else {
289 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
290 if (connector->encoder_ids[i] == 0)
291 break;
292
293 obj = drm_mode_object_find(connector->dev,
294 connector->encoder_ids[i],
295 DRM_MODE_OBJECT_ENCODER);
296 if (!obj)
297 continue;
298
299 encoder = obj_to_encoder(obj);
300
301 encoder_funcs = encoder->helper_private;
302 if (encoder_funcs->detect) {
303 ret = encoder_funcs->detect(encoder, connector);
304 if (ret == connector_status_connected) {
305 radeon_connector->use_digital = 0;
306 break;
307 }
308 }
309 }
310 }
311
312 /* updated in get modes as well since we need to know if it's analog or digital */
313 radeon_connector_update_scratch_regs(connector, ret);
314 return ret;
315}
316
317/* okay need to be smart in here about which encoder to pick */
318struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
319{
320 int enc_id = connector->encoder_ids[0];
321 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
322 struct drm_mode_object *obj;
323 struct drm_encoder *encoder;
324 int i;
325 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
326 if (connector->encoder_ids[i] == 0)
327 break;
328
329 obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
330 if (!obj)
331 continue;
332
333 encoder = obj_to_encoder(obj);
334
335 if (radeon_connector->use_digital) {
336 if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
337 return encoder;
338 } else {
339 if (encoder->encoder_type == DRM_MODE_ENCODER_DAC ||
340 encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
341 return encoder;
342 }
343 }
344
345 /* see if we have a default encoder TODO */
346
347 /* then check use digitial */
348 /* pick the first one */
349 if (enc_id) {
350 obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
351 if (!obj)
352 return NULL;
353 encoder = obj_to_encoder(obj);
354 return encoder;
355 }
356 return NULL;
357}
358
359struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
360 .get_modes = radeon_dvi_get_modes,
361 .mode_valid = radeon_vga_mode_valid,
362 .best_encoder = radeon_dvi_encoder,
363};
364
365struct drm_connector_funcs radeon_dvi_connector_funcs = {
366 .dpms = drm_helper_connector_dpms,
367 .detect = radeon_dvi_detect,
368 .fill_modes = drm_helper_probe_single_connector_modes,
369 .set_property = radeon_connector_set_property,
370 .destroy = radeon_connector_destroy,
371};
372
373void
374radeon_add_atom_connector(struct drm_device *dev,
375 uint32_t connector_id,
376 uint32_t supported_device,
377 int connector_type,
378 struct radeon_i2c_bus_rec *i2c_bus,
379 bool linkb,
380 uint32_t igp_lane_info)
381{
382 struct drm_connector *connector;
383 struct radeon_connector *radeon_connector;
384 struct radeon_connector_atom_dig *radeon_dig_connector;
385 uint32_t subpixel_order = SubPixelNone;
386
387 /* fixme - tv/cv/din */
388 if ((connector_type == DRM_MODE_CONNECTOR_Unknown) ||
389 (connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
390 (connector_type == DRM_MODE_CONNECTOR_Composite) ||
391 (connector_type == DRM_MODE_CONNECTOR_9PinDIN))
392 return;
393
394 /* see if we already added it */
395 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
396 radeon_connector = to_radeon_connector(connector);
397 if (radeon_connector->connector_id == connector_id) {
398 radeon_connector->devices |= supported_device;
399 return;
400 }
401 }
402
403 radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
404 if (!radeon_connector)
405 return;
406
407 connector = &radeon_connector->base;
408
409 radeon_connector->connector_id = connector_id;
410 radeon_connector->devices = supported_device;
411 switch (connector_type) {
412 case DRM_MODE_CONNECTOR_VGA:
413 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
414 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
415 if (i2c_bus->valid) {
416 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
417 if (!radeon_connector->ddc_bus)
418 goto failed;
419 }
420 break;
421 case DRM_MODE_CONNECTOR_DVIA:
422 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
423 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
424 if (i2c_bus->valid) {
425 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
426 if (!radeon_connector->ddc_bus)
427 goto failed;
428 }
429 break;
430 case DRM_MODE_CONNECTOR_DVII:
431 case DRM_MODE_CONNECTOR_DVID:
432 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
433 if (!radeon_dig_connector)
434 goto failed;
435 radeon_dig_connector->linkb = linkb;
436 radeon_dig_connector->igp_lane_info = igp_lane_info;
437 radeon_connector->con_priv = radeon_dig_connector;
438 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
439 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
440 if (i2c_bus->valid) {
441 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
442 if (!radeon_connector->ddc_bus)
443 goto failed;
444 }
445 subpixel_order = SubPixelHorizontalRGB;
446 break;
447 case DRM_MODE_CONNECTOR_HDMIA:
448 case DRM_MODE_CONNECTOR_HDMIB:
449 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
450 if (!radeon_dig_connector)
451 goto failed;
452 radeon_dig_connector->linkb = linkb;
453 radeon_dig_connector->igp_lane_info = igp_lane_info;
454 radeon_connector->con_priv = radeon_dig_connector;
455 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
456 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
457 if (i2c_bus->valid) {
458 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
459 if (!radeon_connector->ddc_bus)
460 goto failed;
461 }
462 subpixel_order = SubPixelHorizontalRGB;
463 break;
464 case DRM_MODE_CONNECTOR_DisplayPort:
465 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
466 if (!radeon_dig_connector)
467 goto failed;
468 radeon_dig_connector->linkb = linkb;
469 radeon_dig_connector->igp_lane_info = igp_lane_info;
470 radeon_connector->con_priv = radeon_dig_connector;
471 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
472 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
473 if (i2c_bus->valid) {
474 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
475 if (!radeon_connector->ddc_bus)
476 goto failed;
477 }
478 subpixel_order = SubPixelHorizontalRGB;
479 break;
480 case DRM_MODE_CONNECTOR_SVIDEO:
481 case DRM_MODE_CONNECTOR_Composite:
482 case DRM_MODE_CONNECTOR_9PinDIN:
483 break;
484 case DRM_MODE_CONNECTOR_LVDS:
485 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
486 if (!radeon_dig_connector)
487 goto failed;
488 radeon_dig_connector->linkb = linkb;
489 radeon_dig_connector->igp_lane_info = igp_lane_info;
490 radeon_connector->con_priv = radeon_dig_connector;
491 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
492 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
493 if (i2c_bus->valid) {
494 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
495 if (!radeon_connector->ddc_bus)
496 goto failed;
497 }
498 subpixel_order = SubPixelHorizontalRGB;
499 break;
500 }
501
502 connector->display_info.subpixel_order = subpixel_order;
503 drm_sysfs_connector_add(connector);
504 return;
505
506failed:
507 if (radeon_connector->ddc_bus)
508 radeon_i2c_destroy(radeon_connector->ddc_bus);
509 drm_connector_cleanup(connector);
510 kfree(connector);
511}
512
513void
514radeon_add_legacy_connector(struct drm_device *dev,
515 uint32_t connector_id,
516 uint32_t supported_device,
517 int connector_type,
518 struct radeon_i2c_bus_rec *i2c_bus)
519{
520 struct drm_connector *connector;
521 struct radeon_connector *radeon_connector;
522 uint32_t subpixel_order = SubPixelNone;
523
524 /* fixme - tv/cv/din */
525 if ((connector_type == DRM_MODE_CONNECTOR_Unknown) ||
526 (connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
527 (connector_type == DRM_MODE_CONNECTOR_Composite) ||
528 (connector_type == DRM_MODE_CONNECTOR_9PinDIN))
529 return;
530
531 /* see if we already added it */
532 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
533 radeon_connector = to_radeon_connector(connector);
534 if (radeon_connector->connector_id == connector_id) {
535 radeon_connector->devices |= supported_device;
536 return;
537 }
538 }
539
540 radeon_connector = kzalloc(sizeof(struct radeon_connector), GFP_KERNEL);
541 if (!radeon_connector)
542 return;
543
544 connector = &radeon_connector->base;
545
546 radeon_connector->connector_id = connector_id;
547 radeon_connector->devices = supported_device;
548 switch (connector_type) {
549 case DRM_MODE_CONNECTOR_VGA:
550 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
551 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
552 if (i2c_bus->valid) {
553 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
554 if (!radeon_connector->ddc_bus)
555 goto failed;
556 }
557 break;
558 case DRM_MODE_CONNECTOR_DVIA:
559 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
560 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
561 if (i2c_bus->valid) {
562 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
563 if (!radeon_connector->ddc_bus)
564 goto failed;
565 }
566 break;
567 case DRM_MODE_CONNECTOR_DVII:
568 case DRM_MODE_CONNECTOR_DVID:
569 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
570 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
571 if (i2c_bus->valid) {
572 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
573 if (!radeon_connector->ddc_bus)
574 goto failed;
575 }
576 subpixel_order = SubPixelHorizontalRGB;
577 break;
578 case DRM_MODE_CONNECTOR_SVIDEO:
579 case DRM_MODE_CONNECTOR_Composite:
580 case DRM_MODE_CONNECTOR_9PinDIN:
581 break;
582 case DRM_MODE_CONNECTOR_LVDS:
583 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
584 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
585 if (i2c_bus->valid) {
586 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
587 if (!radeon_connector->ddc_bus)
588 goto failed;
589 }
590 subpixel_order = SubPixelHorizontalRGB;
591 break;
592 }
593
594 connector->display_info.subpixel_order = subpixel_order;
595 drm_sysfs_connector_add(connector);
596 return;
597
598failed:
599 if (radeon_connector->ddc_bus)
600 radeon_i2c_destroy(radeon_connector->ddc_bus);
601 drm_connector_cleanup(connector);
602 kfree(connector);
603}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
new file mode 100644
index 000000000000..b843f9bdfb14
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -0,0 +1,249 @@
1/*
2 * Copyright 2008 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Jerome Glisse <glisse@freedesktop.org>
26 */
27#include "drmP.h"
28#include "radeon_drm.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32void r100_cs_dump_packet(struct radeon_cs_parser *p,
33 struct radeon_cs_packet *pkt);
34
35int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
36{
37 struct drm_device *ddev = p->rdev->ddev;
38 struct radeon_cs_chunk *chunk;
39 unsigned i, j;
40 bool duplicate;
41
42 if (p->chunk_relocs_idx == -1) {
43 return 0;
44 }
45 chunk = &p->chunks[p->chunk_relocs_idx];
46 /* FIXME: we assume that each relocs use 4 dwords */
47 p->nrelocs = chunk->length_dw / 4;
48 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
49 if (p->relocs_ptr == NULL) {
50 return -ENOMEM;
51 }
52 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
53 if (p->relocs == NULL) {
54 return -ENOMEM;
55 }
56 for (i = 0; i < p->nrelocs; i++) {
57 struct drm_radeon_cs_reloc *r;
58
59 duplicate = false;
60 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
61 for (j = 0; j < p->nrelocs; j++) {
62 if (r->handle == p->relocs[j].handle) {
63 p->relocs_ptr[i] = &p->relocs[j];
64 duplicate = true;
65 break;
66 }
67 }
68 if (!duplicate) {
69 p->relocs[i].gobj = drm_gem_object_lookup(ddev,
70 p->filp,
71 r->handle);
72 if (p->relocs[i].gobj == NULL) {
73 DRM_ERROR("gem object lookup failed 0x%x\n",
74 r->handle);
75 return -EINVAL;
76 }
77 p->relocs_ptr[i] = &p->relocs[i];
78 p->relocs[i].robj = p->relocs[i].gobj->driver_private;
79 p->relocs[i].lobj.robj = p->relocs[i].robj;
80 p->relocs[i].lobj.rdomain = r->read_domains;
81 p->relocs[i].lobj.wdomain = r->write_domain;
82 p->relocs[i].handle = r->handle;
83 p->relocs[i].flags = r->flags;
84 INIT_LIST_HEAD(&p->relocs[i].lobj.list);
85 radeon_object_list_add_object(&p->relocs[i].lobj,
86 &p->validated);
87 }
88 }
89 return radeon_object_list_validate(&p->validated, p->ib->fence);
90}
91
92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
93{
94 struct drm_radeon_cs *cs = data;
95 uint64_t *chunk_array_ptr;
96 unsigned size, i;
97
98 if (!cs->num_chunks) {
99 return 0;
100 }
101 /* get chunks */
102 INIT_LIST_HEAD(&p->validated);
103 p->idx = 0;
104 p->chunk_ib_idx = -1;
105 p->chunk_relocs_idx = -1;
106 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
107 if (p->chunks_array == NULL) {
108 return -ENOMEM;
109 }
110 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
111 if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
112 sizeof(uint64_t)*cs->num_chunks)) {
113 return -EFAULT;
114 }
115 p->nchunks = cs->num_chunks;
116 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
117 if (p->chunks == NULL) {
118 return -ENOMEM;
119 }
120 for (i = 0; i < p->nchunks; i++) {
121 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
122 struct drm_radeon_cs_chunk user_chunk;
123 uint32_t __user *cdata;
124
125 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
126 if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
127 sizeof(struct drm_radeon_cs_chunk))) {
128 return -EFAULT;
129 }
130 p->chunks[i].chunk_id = user_chunk.chunk_id;
131 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
132 p->chunk_relocs_idx = i;
133 }
134 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
135 p->chunk_ib_idx = i;
136 }
137 p->chunks[i].length_dw = user_chunk.length_dw;
138 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
139
140 p->chunks[i].kdata = NULL;
141 size = p->chunks[i].length_dw * sizeof(uint32_t);
142 p->chunks[i].kdata = kzalloc(size, GFP_KERNEL);
143 if (p->chunks[i].kdata == NULL) {
144 return -ENOMEM;
145 }
146 if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) {
147 return -EFAULT;
148 }
149 }
150 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
151 DRM_ERROR("cs IB too big: %d\n",
152 p->chunks[p->chunk_ib_idx].length_dw);
153 return -EINVAL;
154 }
155 return 0;
156}
157
158/**
159 * cs_parser_fini() - clean parser states
160 * @parser: parser structure holding parsing context.
161 * @error: error number
162 *
163 * If error is set than unvalidate buffer, otherwise just free memory
164 * used by parsing context.
165 **/
166static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
167{
168 unsigned i;
169
170 if (error) {
171 radeon_object_list_unvalidate(&parser->validated);
172 } else {
173 radeon_object_list_clean(&parser->validated);
174 }
175 for (i = 0; i < parser->nrelocs; i++) {
176 if (parser->relocs[i].gobj) {
177 mutex_lock(&parser->rdev->ddev->struct_mutex);
178 drm_gem_object_unreference(parser->relocs[i].gobj);
179 mutex_unlock(&parser->rdev->ddev->struct_mutex);
180 }
181 }
182 kfree(parser->relocs);
183 kfree(parser->relocs_ptr);
184 for (i = 0; i < parser->nchunks; i++) {
185 kfree(parser->chunks[i].kdata);
186 }
187 kfree(parser->chunks);
188 kfree(parser->chunks_array);
189 radeon_ib_free(parser->rdev, &parser->ib);
190}
191
192int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
193{
194 struct radeon_device *rdev = dev->dev_private;
195 struct radeon_cs_parser parser;
196 struct radeon_cs_chunk *ib_chunk;
197 int r;
198
199 mutex_lock(&rdev->cs_mutex);
200 if (rdev->gpu_lockup) {
201 mutex_unlock(&rdev->cs_mutex);
202 return -EINVAL;
203 }
204 /* initialize parser */
205 memset(&parser, 0, sizeof(struct radeon_cs_parser));
206 parser.filp = filp;
207 parser.rdev = rdev;
208 r = radeon_cs_parser_init(&parser, data);
209 if (r) {
210 DRM_ERROR("Failed to initialize parser !\n");
211 radeon_cs_parser_fini(&parser, r);
212 mutex_unlock(&rdev->cs_mutex);
213 return r;
214 }
215 r = radeon_ib_get(rdev, &parser.ib);
216 if (r) {
217 DRM_ERROR("Failed to get ib !\n");
218 radeon_cs_parser_fini(&parser, r);
219 mutex_unlock(&rdev->cs_mutex);
220 return r;
221 }
222 r = radeon_cs_parser_relocs(&parser);
223 if (r) {
224 DRM_ERROR("Failed to parse relocation !\n");
225 radeon_cs_parser_fini(&parser, r);
226 mutex_unlock(&rdev->cs_mutex);
227 return r;
228 }
229 /* Copy the packet into the IB, the parser will read from the
230 * input memory (cached) and write to the IB (which can be
231 * uncached). */
232 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
233 parser.ib->length_dw = ib_chunk->length_dw;
234 memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
235 r = radeon_cs_parse(&parser);
236 if (r) {
237 DRM_ERROR("Invalid command stream !\n");
238 radeon_cs_parser_fini(&parser, r);
239 mutex_unlock(&rdev->cs_mutex);
240 return r;
241 }
242 r = radeon_ib_schedule(rdev, parser.ib);
243 if (r) {
244 DRM_ERROR("Faild to schedule IB !\n");
245 }
246 radeon_cs_parser_fini(&parser, r);
247 mutex_unlock(&rdev->cs_mutex);
248 return r;
249}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
new file mode 100644
index 000000000000..5232441f119b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -0,0 +1,252 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "radeon_drm.h"
28#include "radeon.h"
29
30#define CURSOR_WIDTH 64
31#define CURSOR_HEIGHT 64
32
33static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
34{
35 struct radeon_device *rdev = crtc->dev->dev_private;
36 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
37 uint32_t cur_lock;
38
39 if (ASIC_IS_AVIVO(rdev)) {
40 cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
41 if (lock)
42 cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
43 else
44 cur_lock &= ~AVIVO_D1CURSOR_UPDATE_LOCK;
45 WREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
46 } else {
47 cur_lock = RREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset);
48 if (lock)
49 cur_lock |= RADEON_CUR_LOCK;
50 else
51 cur_lock &= ~RADEON_CUR_LOCK;
52 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, cur_lock);
53 }
54}
55
56static void radeon_hide_cursor(struct drm_crtc *crtc)
57{
58 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
59 struct radeon_device *rdev = crtc->dev->dev_private;
60
61 if (ASIC_IS_AVIVO(rdev)) {
62 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
63 WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
64 } else {
65 switch (radeon_crtc->crtc_id) {
66 case 0:
67 WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
68 break;
69 case 1:
70 WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
71 break;
72 default:
73 return;
74 }
75 WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN);
76 }
77}
78
79static void radeon_show_cursor(struct drm_crtc *crtc)
80{
81 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
82 struct radeon_device *rdev = crtc->dev->dev_private;
83
84 if (ASIC_IS_AVIVO(rdev)) {
85 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
86 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
87 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
88 } else {
89 switch (radeon_crtc->crtc_id) {
90 case 0:
91 WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
92 break;
93 case 1:
94 WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
95 break;
96 default:
97 return;
98 }
99
100 WREG32_P(RADEON_MM_DATA, (RADEON_CRTC_CUR_EN |
101 (RADEON_CRTC_CUR_MODE_24BPP << RADEON_CRTC_CUR_MODE_SHIFT)),
102 ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_CUR_MODE_MASK));
103 }
104}
105
106static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
107 uint32_t gpu_addr)
108{
109 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
110 struct radeon_device *rdev = crtc->dev->dev_private;
111
112 if (ASIC_IS_AVIVO(rdev))
113 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
114 else
115 /* offset is from DISP(2)_BASE_ADDRESS */
116 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, gpu_addr);
117}
118
119int radeon_crtc_cursor_set(struct drm_crtc *crtc,
120 struct drm_file *file_priv,
121 uint32_t handle,
122 uint32_t width,
123 uint32_t height)
124{
125 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
126 struct drm_gem_object *obj;
127 uint64_t gpu_addr;
128 int ret;
129
130 if (!handle) {
131 /* turn off cursor */
132 radeon_hide_cursor(crtc);
133 obj = NULL;
134 goto unpin;
135 }
136
137 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
138 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
139 return -EINVAL;
140 }
141
142 radeon_crtc->cursor_width = width;
143 radeon_crtc->cursor_height = height;
144
145 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
146 if (!obj) {
147 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
148 return -EINVAL;
149 }
150
151 ret = radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
152 if (ret)
153 goto fail;
154
155 radeon_lock_cursor(crtc, true);
156 /* XXX only 27 bit offset for legacy cursor */
157 radeon_set_cursor(crtc, obj, gpu_addr);
158 radeon_show_cursor(crtc);
159 radeon_lock_cursor(crtc, false);
160
161unpin:
162 if (radeon_crtc->cursor_bo) {
163 radeon_gem_object_unpin(radeon_crtc->cursor_bo);
164 mutex_lock(&crtc->dev->struct_mutex);
165 drm_gem_object_unreference(radeon_crtc->cursor_bo);
166 mutex_unlock(&crtc->dev->struct_mutex);
167 }
168
169 radeon_crtc->cursor_bo = obj;
170 return 0;
171fail:
172 mutex_lock(&crtc->dev->struct_mutex);
173 drm_gem_object_unreference(obj);
174 mutex_unlock(&crtc->dev->struct_mutex);
175
176 return 0;
177}
178
179int radeon_crtc_cursor_move(struct drm_crtc *crtc,
180 int x, int y)
181{
182 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
183 struct radeon_device *rdev = crtc->dev->dev_private;
184 int xorigin = 0, yorigin = 0;
185
186 if (x < 0)
187 xorigin = -x + 1;
188 if (y < 0)
189 yorigin = -y + 1;
190 if (xorigin >= CURSOR_WIDTH)
191 xorigin = CURSOR_WIDTH - 1;
192 if (yorigin >= CURSOR_HEIGHT)
193 yorigin = CURSOR_HEIGHT - 1;
194
195 radeon_lock_cursor(crtc, true);
196 if (ASIC_IS_AVIVO(rdev)) {
197 int w = radeon_crtc->cursor_width;
198 int i = 0;
199 struct drm_crtc *crtc_p;
200
201 /* avivo cursor are offset into the total surface */
202 x += crtc->x;
203 y += crtc->y;
204 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
205
206 /* avivo cursor image can't end on 128 pixel boundry or
207 * go past the end of the frame if both crtcs are enabled
208 */
209 list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
210 if (crtc_p->enabled)
211 i++;
212 }
213 if (i > 1) {
214 int cursor_end, frame_end;
215
216 cursor_end = x - xorigin + w;
217 frame_end = crtc->x + crtc->mode.crtc_hdisplay;
218 if (cursor_end >= frame_end) {
219 w = w - (cursor_end - frame_end);
220 if (!(frame_end & 0x7f))
221 w--;
222 } else {
223 if (!(cursor_end & 0x7f))
224 w--;
225 }
226 if (w <= 0)
227 w = 1;
228 }
229
230 WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset,
231 ((xorigin ? 0 : x) << 16) |
232 (yorigin ? 0 : y));
233 WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
234 WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
235 ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
236 } else {
237 if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
238 y *= 2;
239
240 WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
241 (RADEON_CUR_LOCK
242 | (xorigin << 16)
243 | yorigin));
244 WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
245 (RADEON_CUR_LOCK
246 | ((xorigin ? 0 : x) << 16)
247 | (yorigin ? 0 : y)));
248 }
249 radeon_lock_cursor(crtc, false);
250
251 return 0;
252}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
new file mode 100644
index 000000000000..5fd2b639bf66
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -0,0 +1,813 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
29#include <drm/drmP.h>
30#include <drm/drm_crtc_helper.h>
31#include <drm/radeon_drm.h>
32#include "radeon_reg.h"
33#include "radeon.h"
34#include "radeon_asic.h"
35#include "atom.h"
36
37/*
38 * GPU scratch registers helpers function.
39 */
40static void radeon_scratch_init(struct radeon_device *rdev)
41{
42 int i;
43
44 /* FIXME: check this out */
45 if (rdev->family < CHIP_R300) {
46 rdev->scratch.num_reg = 5;
47 } else {
48 rdev->scratch.num_reg = 7;
49 }
50 for (i = 0; i < rdev->scratch.num_reg; i++) {
51 rdev->scratch.free[i] = true;
52 rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
53 }
54}
55
56int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
57{
58 int i;
59
60 for (i = 0; i < rdev->scratch.num_reg; i++) {
61 if (rdev->scratch.free[i]) {
62 rdev->scratch.free[i] = false;
63 *reg = rdev->scratch.reg[i];
64 return 0;
65 }
66 }
67 return -EINVAL;
68}
69
70void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
71{
72 int i;
73
74 for (i = 0; i < rdev->scratch.num_reg; i++) {
75 if (rdev->scratch.reg[i] == reg) {
76 rdev->scratch.free[i] = true;
77 return;
78 }
79 }
80}
81
82/*
83 * MC common functions
84 */
85int radeon_mc_setup(struct radeon_device *rdev)
86{
87 uint32_t tmp;
88
89 /* Some chips have an "issue" with the memory controller, the
90 * location must be aligned to the size. We just align it down,
91 * too bad if we walk over the top of system memory, we don't
92 * use DMA without a remapped anyway.
93 * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
94 */
95 /* FGLRX seems to setup like this, VRAM a 0, then GART.
96 */
97 /*
98 * Note: from R6xx the address space is 40bits but here we only
99 * use 32bits (still have to see a card which would exhaust 4G
100 * address space).
101 */
102 if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
103 /* vram location was already setup try to put gtt after
104 * if it fits */
105 tmp = rdev->mc.vram_location + rdev->mc.vram_size;
106 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
107 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
108 rdev->mc.gtt_location = tmp;
109 } else {
110 if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
111 printk(KERN_ERR "[drm] GTT too big to fit "
112 "before or after vram location.\n");
113 return -EINVAL;
114 }
115 rdev->mc.gtt_location = 0;
116 }
117 } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
118 /* gtt location was already setup try to put vram before
119 * if it fits */
120 if (rdev->mc.vram_size < rdev->mc.gtt_location) {
121 rdev->mc.vram_location = 0;
122 } else {
123 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
124 tmp += (rdev->mc.vram_size - 1);
125 tmp &= ~(rdev->mc.vram_size - 1);
126 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) {
127 rdev->mc.vram_location = tmp;
128 } else {
129 printk(KERN_ERR "[drm] vram too big to fit "
130 "before or after GTT location.\n");
131 return -EINVAL;
132 }
133 }
134 } else {
135 rdev->mc.vram_location = 0;
136 rdev->mc.gtt_location = rdev->mc.vram_size;
137 }
138 DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20);
139 DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
140 rdev->mc.vram_location,
141 rdev->mc.vram_location + rdev->mc.vram_size - 1);
142 DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20);
143 DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
144 rdev->mc.gtt_location,
145 rdev->mc.gtt_location + rdev->mc.gtt_size - 1);
146 return 0;
147}
148
149
150/*
151 * GPU helpers function.
152 */
153static bool radeon_card_posted(struct radeon_device *rdev)
154{
155 uint32_t reg;
156
157 /* first check CRTCs */
158 if (ASIC_IS_AVIVO(rdev)) {
159 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
160 RREG32(AVIVO_D2CRTC_CONTROL);
161 if (reg & AVIVO_CRTC_EN) {
162 return true;
163 }
164 } else {
165 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
166 RREG32(RADEON_CRTC2_GEN_CNTL);
167 if (reg & RADEON_CRTC_EN) {
168 return true;
169 }
170 }
171
172 /* then check MEM_SIZE, in case the crtcs are off */
173 if (rdev->family >= CHIP_R600)
174 reg = RREG32(R600_CONFIG_MEMSIZE);
175 else
176 reg = RREG32(RADEON_CONFIG_MEMSIZE);
177
178 if (reg)
179 return true;
180
181 return false;
182
183}
184
185
186/*
187 * Registers accessors functions.
188 */
189uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
190{
191 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
192 BUG_ON(1);
193 return 0;
194}
195
196void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
197{
198 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
199 reg, v);
200 BUG_ON(1);
201}
202
203void radeon_register_accessor_init(struct radeon_device *rdev)
204{
205 rdev->mm_rreg = &r100_mm_rreg;
206 rdev->mm_wreg = &r100_mm_wreg;
207 rdev->mc_rreg = &radeon_invalid_rreg;
208 rdev->mc_wreg = &radeon_invalid_wreg;
209 rdev->pll_rreg = &radeon_invalid_rreg;
210 rdev->pll_wreg = &radeon_invalid_wreg;
211 rdev->pcie_rreg = &radeon_invalid_rreg;
212 rdev->pcie_wreg = &radeon_invalid_wreg;
213 rdev->pciep_rreg = &radeon_invalid_rreg;
214 rdev->pciep_wreg = &radeon_invalid_wreg;
215
216 /* Don't change order as we are overridding accessor. */
217 if (rdev->family < CHIP_RV515) {
218 rdev->pcie_rreg = &rv370_pcie_rreg;
219 rdev->pcie_wreg = &rv370_pcie_wreg;
220 }
221 if (rdev->family >= CHIP_RV515) {
222 rdev->pcie_rreg = &rv515_pcie_rreg;
223 rdev->pcie_wreg = &rv515_pcie_wreg;
224 }
225 /* FIXME: not sure here */
226 if (rdev->family <= CHIP_R580) {
227 rdev->pll_rreg = &r100_pll_rreg;
228 rdev->pll_wreg = &r100_pll_wreg;
229 }
230 if (rdev->family >= CHIP_RV515) {
231 rdev->mc_rreg = &rv515_mc_rreg;
232 rdev->mc_wreg = &rv515_mc_wreg;
233 }
234 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
235 rdev->mc_rreg = &rs400_mc_rreg;
236 rdev->mc_wreg = &rs400_mc_wreg;
237 }
238 if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
239 rdev->mc_rreg = &rs690_mc_rreg;
240 rdev->mc_wreg = &rs690_mc_wreg;
241 }
242 if (rdev->family == CHIP_RS600) {
243 rdev->mc_rreg = &rs600_mc_rreg;
244 rdev->mc_wreg = &rs600_mc_wreg;
245 }
246 if (rdev->family >= CHIP_R600) {
247 rdev->pciep_rreg = &r600_pciep_rreg;
248 rdev->pciep_wreg = &r600_pciep_wreg;
249 }
250}
251
252
253/*
254 * ASIC
255 */
256int radeon_asic_init(struct radeon_device *rdev)
257{
258 radeon_register_accessor_init(rdev);
259 switch (rdev->family) {
260 case CHIP_R100:
261 case CHIP_RV100:
262 case CHIP_RS100:
263 case CHIP_RV200:
264 case CHIP_RS200:
265 case CHIP_R200:
266 case CHIP_RV250:
267 case CHIP_RS300:
268 case CHIP_RV280:
269 rdev->asic = &r100_asic;
270 break;
271 case CHIP_R300:
272 case CHIP_R350:
273 case CHIP_RV350:
274 case CHIP_RV380:
275 rdev->asic = &r300_asic;
276 break;
277 case CHIP_R420:
278 case CHIP_R423:
279 case CHIP_RV410:
280 rdev->asic = &r420_asic;
281 break;
282 case CHIP_RS400:
283 case CHIP_RS480:
284 rdev->asic = &rs400_asic;
285 break;
286 case CHIP_RS600:
287 rdev->asic = &rs600_asic;
288 break;
289 case CHIP_RS690:
290 case CHIP_RS740:
291 rdev->asic = &rs690_asic;
292 break;
293 case CHIP_RV515:
294 rdev->asic = &rv515_asic;
295 break;
296 case CHIP_R520:
297 case CHIP_RV530:
298 case CHIP_RV560:
299 case CHIP_RV570:
300 case CHIP_R580:
301 rdev->asic = &r520_asic;
302 break;
303 case CHIP_R600:
304 case CHIP_RV610:
305 case CHIP_RV630:
306 case CHIP_RV620:
307 case CHIP_RV635:
308 case CHIP_RV670:
309 case CHIP_RS780:
310 case CHIP_RV770:
311 case CHIP_RV730:
312 case CHIP_RV710:
313 default:
314 /* FIXME: not supported yet */
315 return -EINVAL;
316 }
317 return 0;
318}
319
320
321/*
322 * Wrapper around modesetting bits.
323 */
324int radeon_clocks_init(struct radeon_device *rdev)
325{
326 int r;
327
328 radeon_get_clock_info(rdev->ddev);
329 r = radeon_static_clocks_init(rdev->ddev);
330 if (r) {
331 return r;
332 }
333 DRM_INFO("Clocks initialized !\n");
334 return 0;
335}
336
337void radeon_clocks_fini(struct radeon_device *rdev)
338{
339}
340
341/* ATOM accessor methods */
342static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
343{
344 struct radeon_device *rdev = info->dev->dev_private;
345 uint32_t r;
346
347 r = rdev->pll_rreg(rdev, reg);
348 return r;
349}
350
351static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
352{
353 struct radeon_device *rdev = info->dev->dev_private;
354
355 rdev->pll_wreg(rdev, reg, val);
356}
357
358static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
359{
360 struct radeon_device *rdev = info->dev->dev_private;
361 uint32_t r;
362
363 r = rdev->mc_rreg(rdev, reg);
364 return r;
365}
366
367static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
368{
369 struct radeon_device *rdev = info->dev->dev_private;
370
371 rdev->mc_wreg(rdev, reg, val);
372}
373
374static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
375{
376 struct radeon_device *rdev = info->dev->dev_private;
377
378 WREG32(reg*4, val);
379}
380
381static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
382{
383 struct radeon_device *rdev = info->dev->dev_private;
384 uint32_t r;
385
386 r = RREG32(reg*4);
387 return r;
388}
389
390static struct card_info atom_card_info = {
391 .dev = NULL,
392 .reg_read = cail_reg_read,
393 .reg_write = cail_reg_write,
394 .mc_read = cail_mc_read,
395 .mc_write = cail_mc_write,
396 .pll_read = cail_pll_read,
397 .pll_write = cail_pll_write,
398};
399
400int radeon_atombios_init(struct radeon_device *rdev)
401{
402 atom_card_info.dev = rdev->ddev;
403 rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
404 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
405 return 0;
406}
407
408void radeon_atombios_fini(struct radeon_device *rdev)
409{
410 kfree(rdev->mode_info.atom_context);
411}
412
413int radeon_combios_init(struct radeon_device *rdev)
414{
415 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
416 return 0;
417}
418
419void radeon_combios_fini(struct radeon_device *rdev)
420{
421}
422
423int radeon_modeset_init(struct radeon_device *rdev);
424void radeon_modeset_fini(struct radeon_device *rdev);
425
426
427/*
428 * Radeon device.
429 */
430int radeon_device_init(struct radeon_device *rdev,
431 struct drm_device *ddev,
432 struct pci_dev *pdev,
433 uint32_t flags)
434{
435 int r, ret;
436
437 DRM_INFO("radeon: Initializing kernel modesetting.\n");
438 rdev->shutdown = false;
439 rdev->ddev = ddev;
440 rdev->pdev = pdev;
441 rdev->flags = flags;
442 rdev->family = flags & RADEON_FAMILY_MASK;
443 rdev->is_atom_bios = false;
444 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
445 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
446 rdev->gpu_lockup = false;
447 /* mutex initialization are all done here so we
448 * can recall function without having locking issues */
449 mutex_init(&rdev->cs_mutex);
450 mutex_init(&rdev->ib_pool.mutex);
451 mutex_init(&rdev->cp.mutex);
452 rwlock_init(&rdev->fence_drv.lock);
453
454 if (radeon_agpmode == -1) {
455 rdev->flags &= ~RADEON_IS_AGP;
456 if (rdev->family > CHIP_RV515 ||
457 rdev->family == CHIP_RV380 ||
458 rdev->family == CHIP_RV410 ||
459 rdev->family == CHIP_R423) {
460 DRM_INFO("Forcing AGP to PCIE mode\n");
461 rdev->flags |= RADEON_IS_PCIE;
462 } else {
463 DRM_INFO("Forcing AGP to PCI mode\n");
464 rdev->flags |= RADEON_IS_PCI;
465 }
466 }
467
468 /* Set asic functions */
469 r = radeon_asic_init(rdev);
470 if (r) {
471 return r;
472 }
473
474 /* Report DMA addressing limitation */
475 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
476 if (r) {
477 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
478 }
479
480 /* Registers mapping */
481 /* TODO: block userspace mapping of io register */
482 rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2);
483 rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2);
484 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
485 if (rdev->rmmio == NULL) {
486 return -ENOMEM;
487 }
488 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
489 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
490
491 /* Setup errata flags */
492 radeon_errata(rdev);
493 /* Initialize scratch registers */
494 radeon_scratch_init(rdev);
495
496 /* TODO: disable VGA need to use VGA request */
497 /* BIOS*/
498 if (!radeon_get_bios(rdev)) {
499 if (ASIC_IS_AVIVO(rdev))
500 return -EINVAL;
501 }
502 if (rdev->is_atom_bios) {
503 r = radeon_atombios_init(rdev);
504 if (r) {
505 return r;
506 }
507 } else {
508 r = radeon_combios_init(rdev);
509 if (r) {
510 return r;
511 }
512 }
513 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
514 if (radeon_gpu_reset(rdev)) {
515 /* FIXME: what do we want to do here ? */
516 }
517 /* check if cards are posted or not */
518 if (!radeon_card_posted(rdev) && rdev->bios) {
519 DRM_INFO("GPU not posted. posting now...\n");
520 if (rdev->is_atom_bios) {
521 atom_asic_init(rdev->mode_info.atom_context);
522 } else {
523 radeon_combios_asic_init(rdev->ddev);
524 }
525 }
526 /* Get vram informations */
527 radeon_vram_info(rdev);
528 /* Device is severly broken if aper size > vram size.
529 * for RN50/M6/M7 - Novell bug 204882 ?
530 */
531 if (rdev->mc.vram_size < rdev->mc.aper_size) {
532 rdev->mc.aper_size = rdev->mc.vram_size;
533 }
534 /* Add an MTRR for the VRAM */
535 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
536 MTRR_TYPE_WRCOMB, 1);
537 DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
538 rdev->mc.vram_size >> 20,
539 (unsigned)rdev->mc.aper_size >> 20);
540 DRM_INFO("RAM width %dbits %cDR\n",
541 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
542 /* Initialize clocks */
543 r = radeon_clocks_init(rdev);
544 if (r) {
545 return r;
546 }
547 /* Initialize memory controller (also test AGP) */
548 r = radeon_mc_init(rdev);
549 if (r) {
550 return r;
551 }
552 /* Fence driver */
553 r = radeon_fence_driver_init(rdev);
554 if (r) {
555 return r;
556 }
557 r = radeon_irq_kms_init(rdev);
558 if (r) {
559 return r;
560 }
561 /* Memory manager */
562 r = radeon_object_init(rdev);
563 if (r) {
564 return r;
565 }
566 /* Initialize GART (initialize after TTM so we can allocate
567 * memory through TTM but finalize after TTM) */
568 r = radeon_gart_enable(rdev);
569 if (!r) {
570 r = radeon_gem_init(rdev);
571 }
572
573 /* 1M ring buffer */
574 if (!r) {
575 r = radeon_cp_init(rdev, 1024 * 1024);
576 }
577 if (!r) {
578 r = radeon_wb_init(rdev);
579 if (r) {
580 DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
581 return r;
582 }
583 }
584 if (!r) {
585 r = radeon_ib_pool_init(rdev);
586 if (r) {
587 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
588 return r;
589 }
590 }
591 if (!r) {
592 r = radeon_ib_test(rdev);
593 if (r) {
594 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
595 return r;
596 }
597 }
598 ret = r;
599 r = radeon_modeset_init(rdev);
600 if (r) {
601 return r;
602 }
603 if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) {
604 rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private;
605 }
606 if (!ret) {
607 DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
608 }
609 if (radeon_benchmarking) {
610 radeon_benchmark(rdev);
611 }
612 return ret;
613}
614
615void radeon_device_fini(struct radeon_device *rdev)
616{
617 if (rdev == NULL || rdev->rmmio == NULL) {
618 return;
619 }
620 DRM_INFO("radeon: finishing device.\n");
621 rdev->shutdown = true;
622 /* Order matter so becarefull if you rearrange anythings */
623 radeon_modeset_fini(rdev);
624 radeon_ib_pool_fini(rdev);
625 radeon_cp_fini(rdev);
626 radeon_wb_fini(rdev);
627 radeon_gem_fini(rdev);
628 radeon_object_fini(rdev);
629 /* mc_fini must be after object_fini */
630 radeon_mc_fini(rdev);
631#if __OS_HAS_AGP
632 radeon_agp_fini(rdev);
633#endif
634 radeon_irq_kms_fini(rdev);
635 radeon_fence_driver_fini(rdev);
636 radeon_clocks_fini(rdev);
637 if (rdev->is_atom_bios) {
638 radeon_atombios_fini(rdev);
639 } else {
640 radeon_combios_fini(rdev);
641 }
642 kfree(rdev->bios);
643 rdev->bios = NULL;
644 iounmap(rdev->rmmio);
645 rdev->rmmio = NULL;
646}
647
648
649/*
650 * Suspend & resume.
651 */
652int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
653{
654 struct radeon_device *rdev = dev->dev_private;
655 struct drm_crtc *crtc;
656
657 if (dev == NULL || rdev == NULL) {
658 return -ENODEV;
659 }
660 if (state.event == PM_EVENT_PRETHAW) {
661 return 0;
662 }
663 /* unpin the front buffers */
664 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
665 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
666 struct radeon_object *robj;
667
668 if (rfb == NULL || rfb->obj == NULL) {
669 continue;
670 }
671 robj = rfb->obj->driver_private;
672 if (robj != rdev->fbdev_robj) {
673 radeon_object_unpin(robj);
674 }
675 }
676 /* evict vram memory */
677 radeon_object_evict_vram(rdev);
678 /* wait for gpu to finish processing current batch */
679 radeon_fence_wait_last(rdev);
680
681 radeon_cp_disable(rdev);
682 radeon_gart_disable(rdev);
683
684 /* evict remaining vram memory */
685 radeon_object_evict_vram(rdev);
686
687 rdev->irq.sw_int = false;
688 radeon_irq_set(rdev);
689
690 pci_save_state(dev->pdev);
691 if (state.event == PM_EVENT_SUSPEND) {
692 /* Shut down the device */
693 pci_disable_device(dev->pdev);
694 pci_set_power_state(dev->pdev, PCI_D3hot);
695 }
696 acquire_console_sem();
697 fb_set_suspend(rdev->fbdev_info, 1);
698 release_console_sem();
699 return 0;
700}
701
702int radeon_resume_kms(struct drm_device *dev)
703{
704 struct radeon_device *rdev = dev->dev_private;
705 int r;
706
707 acquire_console_sem();
708 pci_set_power_state(dev->pdev, PCI_D0);
709 pci_restore_state(dev->pdev);
710 if (pci_enable_device(dev->pdev)) {
711 release_console_sem();
712 return -1;
713 }
714 pci_set_master(dev->pdev);
715 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
716 if (radeon_gpu_reset(rdev)) {
717 /* FIXME: what do we want to do here ? */
718 }
719 /* post card */
720 if (rdev->is_atom_bios) {
721 atom_asic_init(rdev->mode_info.atom_context);
722 } else {
723 radeon_combios_asic_init(rdev->ddev);
724 }
725 /* Initialize clocks */
726 r = radeon_clocks_init(rdev);
727 if (r) {
728 release_console_sem();
729 return r;
730 }
731 /* Enable IRQ */
732 rdev->irq.sw_int = true;
733 radeon_irq_set(rdev);
734 /* Initialize GPU Memory Controller */
735 r = radeon_mc_init(rdev);
736 if (r) {
737 goto out;
738 }
739 r = radeon_gart_enable(rdev);
740 if (r) {
741 goto out;
742 }
743 r = radeon_cp_init(rdev, rdev->cp.ring_size);
744 if (r) {
745 goto out;
746 }
747out:
748 fb_set_suspend(rdev->fbdev_info, 0);
749 release_console_sem();
750
751 /* blat the mode back in */
752 drm_helper_resume_force_mode(dev);
753 return 0;
754}
755
756
757/*
758 * Debugfs
759 */
760struct radeon_debugfs {
761 struct drm_info_list *files;
762 unsigned num_files;
763};
764static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES];
765static unsigned _radeon_debugfs_count = 0;
766
767int radeon_debugfs_add_files(struct radeon_device *rdev,
768 struct drm_info_list *files,
769 unsigned nfiles)
770{
771 unsigned i;
772
773 for (i = 0; i < _radeon_debugfs_count; i++) {
774 if (_radeon_debugfs[i].files == files) {
775 /* Already registered */
776 return 0;
777 }
778 }
779 if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) {
780 DRM_ERROR("Reached maximum number of debugfs files.\n");
781 DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n");
782 return -EINVAL;
783 }
784 _radeon_debugfs[_radeon_debugfs_count].files = files;
785 _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
786 _radeon_debugfs_count++;
787#if defined(CONFIG_DEBUG_FS)
788 drm_debugfs_create_files(files, nfiles,
789 rdev->ddev->control->debugfs_root,
790 rdev->ddev->control);
791 drm_debugfs_create_files(files, nfiles,
792 rdev->ddev->primary->debugfs_root,
793 rdev->ddev->primary);
794#endif
795 return 0;
796}
797
798#if defined(CONFIG_DEBUG_FS)
799int radeon_debugfs_init(struct drm_minor *minor)
800{
801 return 0;
802}
803
804void radeon_debugfs_cleanup(struct drm_minor *minor)
805{
806 unsigned i;
807
808 for (i = 0; i < _radeon_debugfs_count; i++) {
809 drm_debugfs_remove_files(_radeon_debugfs[i].files,
810 _radeon_debugfs[i].num_files, minor);
811 }
812}
813#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
new file mode 100644
index 000000000000..5452bb9d925e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -0,0 +1,692 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "radeon_drm.h"
28#include "radeon.h"
29
30#include "atom.h"
31#include <asm/div64.h>
32
33#include "drm_crtc_helper.h"
34#include "drm_edid.h"
35
36static int radeon_ddc_dump(struct drm_connector *connector);
37
38static void avivo_crtc_load_lut(struct drm_crtc *crtc)
39{
40 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
41 struct drm_device *dev = crtc->dev;
42 struct radeon_device *rdev = dev->dev_private;
43 int i;
44
45 DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
46 WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
47
48 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
49 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
50 WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
51
52 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
53 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
54 WREG32(AVIVO_DC_LUTA_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
55
56 WREG32(AVIVO_DC_LUT_RW_SELECT, radeon_crtc->crtc_id);
57 WREG32(AVIVO_DC_LUT_RW_MODE, 0);
58 WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f);
59
60 WREG8(AVIVO_DC_LUT_RW_INDEX, 0);
61 for (i = 0; i < 256; i++) {
62 WREG32(AVIVO_DC_LUT_30_COLOR,
63 (radeon_crtc->lut_r[i] << 20) |
64 (radeon_crtc->lut_g[i] << 10) |
65 (radeon_crtc->lut_b[i] << 0));
66 }
67
68 WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
69}
70
71static void legacy_crtc_load_lut(struct drm_crtc *crtc)
72{
73 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
74 struct drm_device *dev = crtc->dev;
75 struct radeon_device *rdev = dev->dev_private;
76 int i;
77 uint32_t dac2_cntl;
78
79 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
80 if (radeon_crtc->crtc_id == 0)
81 dac2_cntl &= (uint32_t)~RADEON_DAC2_PALETTE_ACC_CTL;
82 else
83 dac2_cntl |= RADEON_DAC2_PALETTE_ACC_CTL;
84 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
85
86 WREG8(RADEON_PALETTE_INDEX, 0);
87 for (i = 0; i < 256; i++) {
88 WREG32(RADEON_PALETTE_30_DATA,
89 (radeon_crtc->lut_r[i] << 20) |
90 (radeon_crtc->lut_g[i] << 10) |
91 (radeon_crtc->lut_b[i] << 0));
92 }
93}
94
95void radeon_crtc_load_lut(struct drm_crtc *crtc)
96{
97 struct drm_device *dev = crtc->dev;
98 struct radeon_device *rdev = dev->dev_private;
99
100 if (!crtc->enabled)
101 return;
102
103 if (ASIC_IS_AVIVO(rdev))
104 avivo_crtc_load_lut(crtc);
105 else
106 legacy_crtc_load_lut(crtc);
107}
108
109/** Sets the color ramps on behalf of RandR */
110void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
111 u16 blue, int regno)
112{
113 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
114
115 if (regno == 0)
116 DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id);
117 radeon_crtc->lut_r[regno] = red >> 6;
118 radeon_crtc->lut_g[regno] = green >> 6;
119 radeon_crtc->lut_b[regno] = blue >> 6;
120}
121
122static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
123 u16 *blue, uint32_t size)
124{
125 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
126 int i, j;
127
128 if (size != 256) {
129 return;
130 }
131 if (crtc->fb == NULL) {
132 return;
133 }
134
135 if (crtc->fb->depth == 16) {
136 for (i = 0; i < 64; i++) {
137 if (i <= 31) {
138 for (j = 0; j < 8; j++) {
139 radeon_crtc->lut_r[i * 8 + j] = red[i] >> 6;
140 radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 6;
141 }
142 }
143 for (j = 0; j < 4; j++)
144 radeon_crtc->lut_g[i * 4 + j] = green[i] >> 6;
145 }
146 } else {
147 for (i = 0; i < 256; i++) {
148 radeon_crtc->lut_r[i] = red[i] >> 6;
149 radeon_crtc->lut_g[i] = green[i] >> 6;
150 radeon_crtc->lut_b[i] = blue[i] >> 6;
151 }
152 }
153
154 radeon_crtc_load_lut(crtc);
155}
156
157static void radeon_crtc_destroy(struct drm_crtc *crtc)
158{
159 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
160
161 if (radeon_crtc->mode_set.mode) {
162 drm_mode_destroy(crtc->dev, radeon_crtc->mode_set.mode);
163 }
164 drm_crtc_cleanup(crtc);
165 kfree(radeon_crtc);
166}
167
168static const struct drm_crtc_funcs radeon_crtc_funcs = {
169 .cursor_set = radeon_crtc_cursor_set,
170 .cursor_move = radeon_crtc_cursor_move,
171 .gamma_set = radeon_crtc_gamma_set,
172 .set_config = drm_crtc_helper_set_config,
173 .destroy = radeon_crtc_destroy,
174};
175
176static void radeon_crtc_init(struct drm_device *dev, int index)
177{
178 struct radeon_device *rdev = dev->dev_private;
179 struct radeon_crtc *radeon_crtc;
180 int i;
181
182 radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
183 if (radeon_crtc == NULL)
184 return;
185
186 drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs);
187
188 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
189 radeon_crtc->crtc_id = index;
190
191 radeon_crtc->mode_set.crtc = &radeon_crtc->base;
192 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
193 radeon_crtc->mode_set.num_connectors = 0;
194
195 for (i = 0; i < 256; i++) {
196 radeon_crtc->lut_r[i] = i << 2;
197 radeon_crtc->lut_g[i] = i << 2;
198 radeon_crtc->lut_b[i] = i << 2;
199 }
200
201 if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom))
202 radeon_atombios_init_crtc(dev, radeon_crtc);
203 else
204 radeon_legacy_init_crtc(dev, radeon_crtc);
205}
206
207static const char *encoder_names[34] = {
208 "NONE",
209 "INTERNAL_LVDS",
210 "INTERNAL_TMDS1",
211 "INTERNAL_TMDS2",
212 "INTERNAL_DAC1",
213 "INTERNAL_DAC2",
214 "INTERNAL_SDVOA",
215 "INTERNAL_SDVOB",
216 "SI170B",
217 "CH7303",
218 "CH7301",
219 "INTERNAL_DVO1",
220 "EXTERNAL_SDVOA",
221 "EXTERNAL_SDVOB",
222 "TITFP513",
223 "INTERNAL_LVTM1",
224 "VT1623",
225 "HDMI_SI1930",
226 "HDMI_INTERNAL",
227 "INTERNAL_KLDSCP_TMDS1",
228 "INTERNAL_KLDSCP_DVO1",
229 "INTERNAL_KLDSCP_DAC1",
230 "INTERNAL_KLDSCP_DAC2",
231 "SI178",
232 "MVPU_FPGA",
233 "INTERNAL_DDI",
234 "VT1625",
235 "HDMI_SI1932",
236 "DP_AN9801",
237 "DP_DP501",
238 "INTERNAL_UNIPHY",
239 "INTERNAL_KLDSCP_LVTMA",
240 "INTERNAL_UNIPHY1",
241 "INTERNAL_UNIPHY2",
242};
243
244static const char *connector_names[13] = {
245 "Unknown",
246 "VGA",
247 "DVI-I",
248 "DVI-D",
249 "DVI-A",
250 "Composite",
251 "S-video",
252 "LVDS",
253 "Component",
254 "DIN",
255 "DisplayPort",
256 "HDMI-A",
257 "HDMI-B",
258};
259
260static void radeon_print_display_setup(struct drm_device *dev)
261{
262 struct drm_connector *connector;
263 struct radeon_connector *radeon_connector;
264 struct drm_encoder *encoder;
265 struct radeon_encoder *radeon_encoder;
266 uint32_t devices;
267 int i = 0;
268
269 DRM_INFO("Radeon Display Connectors\n");
270 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
271 radeon_connector = to_radeon_connector(connector);
272 DRM_INFO("Connector %d:\n", i);
273 DRM_INFO(" %s\n", connector_names[connector->connector_type]);
274 if (radeon_connector->ddc_bus)
275 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
276 radeon_connector->ddc_bus->rec.mask_clk_reg,
277 radeon_connector->ddc_bus->rec.mask_data_reg,
278 radeon_connector->ddc_bus->rec.a_clk_reg,
279 radeon_connector->ddc_bus->rec.a_data_reg,
280 radeon_connector->ddc_bus->rec.put_clk_reg,
281 radeon_connector->ddc_bus->rec.put_data_reg,
282 radeon_connector->ddc_bus->rec.get_clk_reg,
283 radeon_connector->ddc_bus->rec.get_data_reg);
284 DRM_INFO(" Encoders:\n");
285 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
286 radeon_encoder = to_radeon_encoder(encoder);
287 devices = radeon_encoder->devices & radeon_connector->devices;
288 if (devices) {
289 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
290 DRM_INFO(" CRT1: %s\n", encoder_names[radeon_encoder->encoder_id]);
291 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
292 DRM_INFO(" CRT2: %s\n", encoder_names[radeon_encoder->encoder_id]);
293 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
294 DRM_INFO(" LCD1: %s\n", encoder_names[radeon_encoder->encoder_id]);
295 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
296 DRM_INFO(" DFP1: %s\n", encoder_names[radeon_encoder->encoder_id]);
297 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
298 DRM_INFO(" DFP2: %s\n", encoder_names[radeon_encoder->encoder_id]);
299 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
300 DRM_INFO(" DFP3: %s\n", encoder_names[radeon_encoder->encoder_id]);
301 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
302 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
303 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
304 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
305 if (devices & ATOM_DEVICE_TV1_SUPPORT)
306 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
307 if (devices & ATOM_DEVICE_CV_SUPPORT)
308 DRM_INFO(" CV: %s\n", encoder_names[radeon_encoder->encoder_id]);
309 }
310 }
311 i++;
312 }
313}
314
315bool radeon_setup_enc_conn(struct drm_device *dev)
316{
317 struct radeon_device *rdev = dev->dev_private;
318 struct drm_connector *drm_connector;
319 bool ret = false;
320
321 if (rdev->bios) {
322 if (rdev->is_atom_bios) {
323 if (rdev->family >= CHIP_R600)
324 ret = radeon_get_atom_connector_info_from_object_table(dev);
325 else
326 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
327 } else
328 ret = radeon_get_legacy_connector_info_from_bios(dev);
329 } else {
330 if (!ASIC_IS_AVIVO(rdev))
331 ret = radeon_get_legacy_connector_info_from_table(dev);
332 }
333 if (ret) {
334 radeon_print_display_setup(dev);
335 list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
336 radeon_ddc_dump(drm_connector);
337 }
338
339 return ret;
340}
341
342int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
343{
344 struct edid *edid;
345 int ret = 0;
346
347 if (!radeon_connector->ddc_bus)
348 return -1;
349 radeon_i2c_do_lock(radeon_connector, 1);
350 edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
351 radeon_i2c_do_lock(radeon_connector, 0);
352 if (edid) {
353 /* update digital bits here */
354 if (edid->digital)
355 radeon_connector->use_digital = 1;
356 else
357 radeon_connector->use_digital = 0;
358 drm_mode_connector_update_edid_property(&radeon_connector->base, edid);
359 ret = drm_add_edid_modes(&radeon_connector->base, edid);
360 kfree(edid);
361 return ret;
362 }
363 drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
364 return -1;
365}
366
367static int radeon_ddc_dump(struct drm_connector *connector)
368{
369 struct edid *edid;
370 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
371 int ret = 0;
372
373 if (!radeon_connector->ddc_bus)
374 return -1;
375 radeon_i2c_do_lock(radeon_connector, 1);
376 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
377 radeon_i2c_do_lock(radeon_connector, 0);
378 if (edid) {
379 kfree(edid);
380 }
381 return ret;
382}
383
384static inline uint32_t radeon_div(uint64_t n, uint32_t d)
385{
386 uint64_t mod;
387
388 n += d / 2;
389
390 mod = do_div(n, d);
391 return n;
392}
393
394void radeon_compute_pll(struct radeon_pll *pll,
395 uint64_t freq,
396 uint32_t *dot_clock_p,
397 uint32_t *fb_div_p,
398 uint32_t *frac_fb_div_p,
399 uint32_t *ref_div_p,
400 uint32_t *post_div_p,
401 int flags)
402{
403 uint32_t min_ref_div = pll->min_ref_div;
404 uint32_t max_ref_div = pll->max_ref_div;
405 uint32_t min_fractional_feed_div = 0;
406 uint32_t max_fractional_feed_div = 0;
407 uint32_t best_vco = pll->best_vco;
408 uint32_t best_post_div = 1;
409 uint32_t best_ref_div = 1;
410 uint32_t best_feedback_div = 1;
411 uint32_t best_frac_feedback_div = 0;
412 uint32_t best_freq = -1;
413 uint32_t best_error = 0xffffffff;
414 uint32_t best_vco_diff = 1;
415 uint32_t post_div;
416
417 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
418 freq = freq * 1000;
419
420 if (flags & RADEON_PLL_USE_REF_DIV)
421 min_ref_div = max_ref_div = pll->reference_div;
422 else {
423 while (min_ref_div < max_ref_div-1) {
424 uint32_t mid = (min_ref_div + max_ref_div) / 2;
425 uint32_t pll_in = pll->reference_freq / mid;
426 if (pll_in < pll->pll_in_min)
427 max_ref_div = mid;
428 else if (pll_in > pll->pll_in_max)
429 min_ref_div = mid;
430 else
431 break;
432 }
433 }
434
435 if (flags & RADEON_PLL_USE_FRAC_FB_DIV) {
436 min_fractional_feed_div = pll->min_frac_feedback_div;
437 max_fractional_feed_div = pll->max_frac_feedback_div;
438 }
439
440 for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) {
441 uint32_t ref_div;
442
443 if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
444 continue;
445
446 /* legacy radeons only have a few post_divs */
447 if (flags & RADEON_PLL_LEGACY) {
448 if ((post_div == 5) ||
449 (post_div == 7) ||
450 (post_div == 9) ||
451 (post_div == 10) ||
452 (post_div == 11) ||
453 (post_div == 13) ||
454 (post_div == 14) ||
455 (post_div == 15))
456 continue;
457 }
458
459 for (ref_div = min_ref_div; ref_div <= max_ref_div; ++ref_div) {
460 uint32_t feedback_div, current_freq = 0, error, vco_diff;
461 uint32_t pll_in = pll->reference_freq / ref_div;
462 uint32_t min_feed_div = pll->min_feedback_div;
463 uint32_t max_feed_div = pll->max_feedback_div + 1;
464
465 if (pll_in < pll->pll_in_min || pll_in > pll->pll_in_max)
466 continue;
467
468 while (min_feed_div < max_feed_div) {
469 uint32_t vco;
470 uint32_t min_frac_feed_div = min_fractional_feed_div;
471 uint32_t max_frac_feed_div = max_fractional_feed_div + 1;
472 uint32_t frac_feedback_div;
473 uint64_t tmp;
474
475 feedback_div = (min_feed_div + max_feed_div) / 2;
476
477 tmp = (uint64_t)pll->reference_freq * feedback_div;
478 vco = radeon_div(tmp, ref_div);
479
480 if (vco < pll->pll_out_min) {
481 min_feed_div = feedback_div + 1;
482 continue;
483 } else if (vco > pll->pll_out_max) {
484 max_feed_div = feedback_div;
485 continue;
486 }
487
488 while (min_frac_feed_div < max_frac_feed_div) {
489 frac_feedback_div = (min_frac_feed_div + max_frac_feed_div) / 2;
490 tmp = (uint64_t)pll->reference_freq * 10000 * feedback_div;
491 tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
492 current_freq = radeon_div(tmp, ref_div * post_div);
493
494 error = abs(current_freq - freq);
495 vco_diff = abs(vco - best_vco);
496
497 if ((best_vco == 0 && error < best_error) ||
498 (best_vco != 0 &&
499 (error < best_error - 100 ||
500 (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
501 best_post_div = post_div;
502 best_ref_div = ref_div;
503 best_feedback_div = feedback_div;
504 best_frac_feedback_div = frac_feedback_div;
505 best_freq = current_freq;
506 best_error = error;
507 best_vco_diff = vco_diff;
508 } else if (current_freq == freq) {
509 if (best_freq == -1) {
510 best_post_div = post_div;
511 best_ref_div = ref_div;
512 best_feedback_div = feedback_div;
513 best_frac_feedback_div = frac_feedback_div;
514 best_freq = current_freq;
515 best_error = error;
516 best_vco_diff = vco_diff;
517 } else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
518 ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
519 ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
520 ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
521 ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
522 ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
523 best_post_div = post_div;
524 best_ref_div = ref_div;
525 best_feedback_div = feedback_div;
526 best_frac_feedback_div = frac_feedback_div;
527 best_freq = current_freq;
528 best_error = error;
529 best_vco_diff = vco_diff;
530 }
531 }
532 if (current_freq < freq)
533 min_frac_feed_div = frac_feedback_div + 1;
534 else
535 max_frac_feed_div = frac_feedback_div;
536 }
537 if (current_freq < freq)
538 min_feed_div = feedback_div + 1;
539 else
540 max_feed_div = feedback_div;
541 }
542 }
543 }
544
545 *dot_clock_p = best_freq / 10000;
546 *fb_div_p = best_feedback_div;
547 *frac_fb_div_p = best_frac_feedback_div;
548 *ref_div_p = best_ref_div;
549 *post_div_p = best_post_div;
550}
551
552static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
553{
554 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
555 struct drm_device *dev = fb->dev;
556
557 if (fb->fbdev)
558 radeonfb_remove(dev, fb);
559
560 if (radeon_fb->obj) {
561 radeon_gem_object_unpin(radeon_fb->obj);
562 mutex_lock(&dev->struct_mutex);
563 drm_gem_object_unreference(radeon_fb->obj);
564 mutex_unlock(&dev->struct_mutex);
565 }
566 drm_framebuffer_cleanup(fb);
567 kfree(radeon_fb);
568}
569
570static int radeon_user_framebuffer_create_handle(struct drm_framebuffer *fb,
571 struct drm_file *file_priv,
572 unsigned int *handle)
573{
574 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
575
576 return drm_gem_handle_create(file_priv, radeon_fb->obj, handle);
577}
578
579static const struct drm_framebuffer_funcs radeon_fb_funcs = {
580 .destroy = radeon_user_framebuffer_destroy,
581 .create_handle = radeon_user_framebuffer_create_handle,
582};
583
584struct drm_framebuffer *
585radeon_framebuffer_create(struct drm_device *dev,
586 struct drm_mode_fb_cmd *mode_cmd,
587 struct drm_gem_object *obj)
588{
589 struct radeon_framebuffer *radeon_fb;
590
591 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
592 if (radeon_fb == NULL) {
593 return NULL;
594 }
595 drm_framebuffer_init(dev, &radeon_fb->base, &radeon_fb_funcs);
596 drm_helper_mode_fill_fb_struct(&radeon_fb->base, mode_cmd);
597 radeon_fb->obj = obj;
598 return &radeon_fb->base;
599}
600
601static struct drm_framebuffer *
602radeon_user_framebuffer_create(struct drm_device *dev,
603 struct drm_file *file_priv,
604 struct drm_mode_fb_cmd *mode_cmd)
605{
606 struct drm_gem_object *obj;
607
608 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
609
610 return radeon_framebuffer_create(dev, mode_cmd, obj);
611}
612
613static const struct drm_mode_config_funcs radeon_mode_funcs = {
614 .fb_create = radeon_user_framebuffer_create,
615 .fb_changed = radeonfb_probe,
616};
617
618int radeon_modeset_init(struct radeon_device *rdev)
619{
620 int num_crtc = 2, i;
621 int ret;
622
623 drm_mode_config_init(rdev->ddev);
624 rdev->mode_info.mode_config_initialized = true;
625
626 rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
627
628 if (ASIC_IS_AVIVO(rdev)) {
629 rdev->ddev->mode_config.max_width = 8192;
630 rdev->ddev->mode_config.max_height = 8192;
631 } else {
632 rdev->ddev->mode_config.max_width = 4096;
633 rdev->ddev->mode_config.max_height = 4096;
634 }
635
636 rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
637
638 /* allocate crtcs - TODO single crtc */
639 for (i = 0; i < num_crtc; i++) {
640 radeon_crtc_init(rdev->ddev, i);
641 }
642
643 /* okay we should have all the bios connectors */
644 ret = radeon_setup_enc_conn(rdev->ddev);
645 if (!ret) {
646 return ret;
647 }
648 drm_helper_initial_config(rdev->ddev);
649 return 0;
650}
651
652void radeon_modeset_fini(struct radeon_device *rdev)
653{
654 if (rdev->mode_info.mode_config_initialized) {
655 drm_mode_config_cleanup(rdev->ddev);
656 rdev->mode_info.mode_config_initialized = false;
657 }
658}
659
660void radeon_init_disp_bandwidth(struct drm_device *dev)
661{
662 struct radeon_device *rdev = dev->dev_private;
663 struct drm_display_mode *modes[2];
664 int pixel_bytes[2];
665 struct drm_crtc *crtc;
666
667 pixel_bytes[0] = pixel_bytes[1] = 0;
668 modes[0] = modes[1] = NULL;
669
670 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
671 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
672
673 if (crtc->enabled && crtc->fb) {
674 modes[radeon_crtc->crtc_id] = &crtc->mode;
675 pixel_bytes[radeon_crtc->crtc_id] = crtc->fb->bits_per_pixel / 8;
676 }
677 }
678
679 if (ASIC_IS_AVIVO(rdev)) {
680 radeon_init_disp_bw_avivo(dev,
681 modes[0],
682 pixel_bytes[0],
683 modes[1],
684 pixel_bytes[1]);
685 } else {
686 radeon_init_disp_bw_legacy(dev,
687 modes[0],
688 pixel_bytes[0],
689 modes[1],
690 pixel_bytes[1]);
691 }
692}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 13a60f4d4227..c815a2cbf7b3 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -35,12 +35,92 @@
35#include "radeon_drv.h" 35#include "radeon_drv.h"
36 36
37#include "drm_pciids.h" 37#include "drm_pciids.h"
38#include <linux/console.h>
39
40
41#if defined(CONFIG_DRM_RADEON_KMS)
42/*
43 * KMS wrapper.
44 */
45#define KMS_DRIVER_MAJOR 2
46#define KMS_DRIVER_MINOR 0
47#define KMS_DRIVER_PATCHLEVEL 0
48int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
49int radeon_driver_unload_kms(struct drm_device *dev);
50int radeon_driver_firstopen_kms(struct drm_device *dev);
51void radeon_driver_lastclose_kms(struct drm_device *dev);
52int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
53void radeon_driver_postclose_kms(struct drm_device *dev,
54 struct drm_file *file_priv);
55void radeon_driver_preclose_kms(struct drm_device *dev,
56 struct drm_file *file_priv);
57int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
58int radeon_resume_kms(struct drm_device *dev);
59u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
60int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
61void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
62void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
63int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
64void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
65irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
66int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master);
67void radeon_master_destroy_kms(struct drm_device *dev,
68 struct drm_master *master);
69int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
70 struct drm_file *file_priv);
71int radeon_gem_object_init(struct drm_gem_object *obj);
72void radeon_gem_object_free(struct drm_gem_object *obj);
73extern struct drm_ioctl_desc radeon_ioctls_kms[];
74extern int radeon_max_kms_ioctl;
75int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
76#if defined(CONFIG_DEBUG_FS)
77int radeon_debugfs_init(struct drm_minor *minor);
78void radeon_debugfs_cleanup(struct drm_minor *minor);
79#endif
80#endif
81
38 82
39int radeon_no_wb; 83int radeon_no_wb;
84#if defined(CONFIG_DRM_RADEON_KMS)
85int radeon_modeset = -1;
86int radeon_dynclks = -1;
87int radeon_r4xx_atom = 0;
88int radeon_agpmode = 0;
89int radeon_vram_limit = 0;
90int radeon_gart_size = 512; /* default gart size */
91int radeon_benchmarking = 0;
92int radeon_connector_table = 0;
93#endif
40 94
41MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 95MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
42module_param_named(no_wb, radeon_no_wb, int, 0444); 96module_param_named(no_wb, radeon_no_wb, int, 0444);
43 97
98#if defined(CONFIG_DRM_RADEON_KMS)
99MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
100module_param_named(modeset, radeon_modeset, int, 0400);
101
102MODULE_PARM_DESC(dynclks, "Disable/Enable dynamic clocks");
103module_param_named(dynclks, radeon_dynclks, int, 0444);
104
105MODULE_PARM_DESC(r4xx_atom, "Enable ATOMBIOS modesetting for R4xx");
106module_param_named(r4xx_atom, radeon_r4xx_atom, int, 0444);
107
108MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing");
109module_param_named(vramlimit, radeon_vram_limit, int, 0600);
110
111MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
112module_param_named(agpmode, radeon_agpmode, int, 0444);
113
114MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32,64, etc)\n");
115module_param_named(gartsize, radeon_gart_size, int, 0600);
116
117MODULE_PARM_DESC(benchmark, "Run benchmark");
118module_param_named(benchmark, radeon_benchmarking, int, 0444);
119
120MODULE_PARM_DESC(connector_table, "Force connector table");
121module_param_named(connector_table, radeon_connector_table, int, 0444);
122#endif
123
44static int radeon_suspend(struct drm_device *dev, pm_message_t state) 124static int radeon_suspend(struct drm_device *dev, pm_message_t state)
45{ 125{
46 drm_radeon_private_t *dev_priv = dev->dev_private; 126 drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -73,7 +153,11 @@ static struct pci_device_id pciidlist[] = {
73 radeon_PCI_IDS 153 radeon_PCI_IDS
74}; 154};
75 155
76static struct drm_driver driver = { 156#if defined(CONFIG_DRM_RADEON_KMS)
157MODULE_DEVICE_TABLE(pci, pciidlist);
158#endif
159
160static struct drm_driver driver_old = {
77 .driver_features = 161 .driver_features =
78 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 162 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
79 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED, 163 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
@@ -127,18 +211,141 @@ static struct drm_driver driver = {
127 .patchlevel = DRIVER_PATCHLEVEL, 211 .patchlevel = DRIVER_PATCHLEVEL,
128}; 212};
129 213
214#if defined(CONFIG_DRM_RADEON_KMS)
215static struct drm_driver kms_driver;
216
217static int __devinit
218radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
219{
220 return drm_get_dev(pdev, ent, &kms_driver);
221}
222
223static void
224radeon_pci_remove(struct pci_dev *pdev)
225{
226 struct drm_device *dev = pci_get_drvdata(pdev);
227
228 drm_put_dev(dev);
229}
230
231static int
232radeon_pci_suspend(struct pci_dev *pdev, pm_message_t state)
233{
234 struct drm_device *dev = pci_get_drvdata(pdev);
235 return radeon_suspend_kms(dev, state);
236}
237
238static int
239radeon_pci_resume(struct pci_dev *pdev)
240{
241 struct drm_device *dev = pci_get_drvdata(pdev);
242 return radeon_resume_kms(dev);
243}
244
245static struct drm_driver kms_driver = {
246 .driver_features =
247 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
248 DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM,
249 .dev_priv_size = 0,
250 .load = radeon_driver_load_kms,
251 .firstopen = radeon_driver_firstopen_kms,
252 .open = radeon_driver_open_kms,
253 .preclose = radeon_driver_preclose_kms,
254 .postclose = radeon_driver_postclose_kms,
255 .lastclose = radeon_driver_lastclose_kms,
256 .unload = radeon_driver_unload_kms,
257 .suspend = radeon_suspend_kms,
258 .resume = radeon_resume_kms,
259 .get_vblank_counter = radeon_get_vblank_counter_kms,
260 .enable_vblank = radeon_enable_vblank_kms,
261 .disable_vblank = radeon_disable_vblank_kms,
262 .master_create = radeon_master_create_kms,
263 .master_destroy = radeon_master_destroy_kms,
264#if defined(CONFIG_DEBUG_FS)
265 .debugfs_init = radeon_debugfs_init,
266 .debugfs_cleanup = radeon_debugfs_cleanup,
267#endif
268 .irq_preinstall = radeon_driver_irq_preinstall_kms,
269 .irq_postinstall = radeon_driver_irq_postinstall_kms,
270 .irq_uninstall = radeon_driver_irq_uninstall_kms,
271 .irq_handler = radeon_driver_irq_handler_kms,
272 .reclaim_buffers = drm_core_reclaim_buffers,
273 .get_map_ofs = drm_core_get_map_ofs,
274 .get_reg_ofs = drm_core_get_reg_ofs,
275 .ioctls = radeon_ioctls_kms,
276 .gem_init_object = radeon_gem_object_init,
277 .gem_free_object = radeon_gem_object_free,
278 .dma_ioctl = radeon_dma_ioctl_kms,
279 .fops = {
280 .owner = THIS_MODULE,
281 .open = drm_open,
282 .release = drm_release,
283 .ioctl = drm_ioctl,
284 .mmap = radeon_mmap,
285 .poll = drm_poll,
286 .fasync = drm_fasync,
287#ifdef CONFIG_COMPAT
288 .compat_ioctl = NULL,
289#endif
290 },
291
292 .pci_driver = {
293 .name = DRIVER_NAME,
294 .id_table = pciidlist,
295 .probe = radeon_pci_probe,
296 .remove = radeon_pci_remove,
297 .suspend = radeon_pci_suspend,
298 .resume = radeon_pci_resume,
299 },
300
301 .name = DRIVER_NAME,
302 .desc = DRIVER_DESC,
303 .date = DRIVER_DATE,
304 .major = KMS_DRIVER_MAJOR,
305 .minor = KMS_DRIVER_MINOR,
306 .patchlevel = KMS_DRIVER_PATCHLEVEL,
307};
308#endif
309
310static struct drm_driver *driver;
311
130static int __init radeon_init(void) 312static int __init radeon_init(void)
131{ 313{
132 driver.num_ioctls = radeon_max_ioctl; 314 driver = &driver_old;
133 return drm_init(&driver); 315 driver->num_ioctls = radeon_max_ioctl;
316#if defined(CONFIG_DRM_RADEON_KMS) && defined(CONFIG_X86)
317 /* if enabled by default */
318 if (radeon_modeset == -1) {
319 DRM_INFO("radeon default to kernel modesetting.\n");
320 radeon_modeset = 1;
321 }
322 if (radeon_modeset == 1) {
323 DRM_INFO("radeon kernel modesetting enabled.\n");
324 driver = &kms_driver;
325 driver->driver_features |= DRIVER_MODESET;
326 driver->num_ioctls = radeon_max_kms_ioctl;
327 }
328
329 /* if the vga console setting is enabled still
330 * let modprobe override it */
331#ifdef CONFIG_VGA_CONSOLE
332 if (vgacon_text_force() && radeon_modeset == -1) {
333 DRM_INFO("VGACON disable radeon kernel modesetting.\n");
334 driver = &driver_old;
335 driver->driver_features &= ~DRIVER_MODESET;
336 radeon_modeset = 0;
337 }
338#endif
339#endif
340 return drm_init(driver);
134} 341}
135 342
136static void __exit radeon_exit(void) 343static void __exit radeon_exit(void)
137{ 344{
138 drm_exit(&driver); 345 drm_exit(driver);
139} 346}
140 347
141module_init(radeon_init); 348late_initcall(radeon_init);
142module_exit(radeon_exit); 349module_exit(radeon_exit);
143 350
144MODULE_AUTHOR(DRIVER_AUTHOR); 351MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
new file mode 100644
index 000000000000..c8ef0d14ffab
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -0,0 +1,1708 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "drm_crtc_helper.h"
28#include "radeon_drm.h"
29#include "radeon.h"
30#include "atom.h"
31
32extern int atom_debug;
33
34uint32_t
35radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
36{
37 struct radeon_device *rdev = dev->dev_private;
38 uint32_t ret = 0;
39
40 switch (supported_device) {
41 case ATOM_DEVICE_CRT1_SUPPORT:
42 case ATOM_DEVICE_TV1_SUPPORT:
43 case ATOM_DEVICE_TV2_SUPPORT:
44 case ATOM_DEVICE_CRT2_SUPPORT:
45 case ATOM_DEVICE_CV_SUPPORT:
46 switch (dac) {
47 case 1: /* dac a */
48 if ((rdev->family == CHIP_RS300) ||
49 (rdev->family == CHIP_RS400) ||
50 (rdev->family == CHIP_RS480))
51 ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
52 else if (ASIC_IS_AVIVO(rdev))
53 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
54 else
55 ret = ENCODER_OBJECT_ID_INTERNAL_DAC1;
56 break;
57 case 2: /* dac b */
58 if (ASIC_IS_AVIVO(rdev))
59 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
60 else {
61 /*if (rdev->family == CHIP_R200)
62 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
63 else*/
64 ret = ENCODER_OBJECT_ID_INTERNAL_DAC2;
65 }
66 break;
67 case 3: /* external dac */
68 if (ASIC_IS_AVIVO(rdev))
69 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
70 else
71 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
72 break;
73 }
74 break;
75 case ATOM_DEVICE_LCD1_SUPPORT:
76 if (ASIC_IS_AVIVO(rdev))
77 ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
78 else
79 ret = ENCODER_OBJECT_ID_INTERNAL_LVDS;
80 break;
81 case ATOM_DEVICE_DFP1_SUPPORT:
82 if ((rdev->family == CHIP_RS300) ||
83 (rdev->family == CHIP_RS400) ||
84 (rdev->family == CHIP_RS480))
85 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
86 else if (ASIC_IS_AVIVO(rdev))
87 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
88 else
89 ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1;
90 break;
91 case ATOM_DEVICE_LCD2_SUPPORT:
92 case ATOM_DEVICE_DFP2_SUPPORT:
93 if ((rdev->family == CHIP_RS600) ||
94 (rdev->family == CHIP_RS690) ||
95 (rdev->family == CHIP_RS740))
96 ret = ENCODER_OBJECT_ID_INTERNAL_DDI;
97 else if (ASIC_IS_AVIVO(rdev))
98 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1;
99 else
100 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1;
101 break;
102 case ATOM_DEVICE_DFP3_SUPPORT:
103 ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1;
104 break;
105 }
106
107 return ret;
108}
109
110void
111radeon_link_encoder_connector(struct drm_device *dev)
112{
113 struct drm_connector *connector;
114 struct radeon_connector *radeon_connector;
115 struct drm_encoder *encoder;
116 struct radeon_encoder *radeon_encoder;
117
118 /* walk the list and link encoders to connectors */
119 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
120 radeon_connector = to_radeon_connector(connector);
121 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
122 radeon_encoder = to_radeon_encoder(encoder);
123 if (radeon_encoder->devices & radeon_connector->devices)
124 drm_mode_connector_attach_encoder(connector, encoder);
125 }
126 }
127}
128
129static struct drm_connector *
130radeon_get_connector_for_encoder(struct drm_encoder *encoder)
131{
132 struct drm_device *dev = encoder->dev;
133 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
134 struct drm_connector *connector;
135 struct radeon_connector *radeon_connector;
136
137 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
138 radeon_connector = to_radeon_connector(connector);
139 if (radeon_encoder->devices & radeon_connector->devices)
140 return connector;
141 }
142 return NULL;
143}
144
145/* used for both atom and legacy */
146void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
147 struct drm_display_mode *mode,
148 struct drm_display_mode *adjusted_mode)
149{
150 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
151 struct drm_device *dev = encoder->dev;
152 struct radeon_device *rdev = dev->dev_private;
153 struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
154
155 if (mode->hdisplay < native_mode->panel_xres ||
156 mode->vdisplay < native_mode->panel_yres) {
157 radeon_encoder->flags |= RADEON_USE_RMX;
158 if (ASIC_IS_AVIVO(rdev)) {
159 adjusted_mode->hdisplay = native_mode->panel_xres;
160 adjusted_mode->vdisplay = native_mode->panel_yres;
161 adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank;
162 adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus;
163 adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width;
164 adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank;
165 adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus;
166 adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width;
167 /* update crtc values */
168 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
169 /* adjust crtc values */
170 adjusted_mode->crtc_hdisplay = native_mode->panel_xres;
171 adjusted_mode->crtc_vdisplay = native_mode->panel_yres;
172 adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank;
173 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus;
174 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width;
175 adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank;
176 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus;
177 adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width;
178 } else {
179 adjusted_mode->htotal = native_mode->panel_xres + native_mode->hblank;
180 adjusted_mode->hsync_start = native_mode->panel_xres + native_mode->hoverplus;
181 adjusted_mode->hsync_end = adjusted_mode->hsync_start + native_mode->hsync_width;
182 adjusted_mode->vtotal = native_mode->panel_yres + native_mode->vblank;
183 adjusted_mode->vsync_start = native_mode->panel_yres + native_mode->voverplus;
184 adjusted_mode->vsync_end = adjusted_mode->vsync_start + native_mode->vsync_width;
185 /* update crtc values */
186 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
187 /* adjust crtc values */
188 adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + native_mode->hblank;
189 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + native_mode->hoverplus;
190 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + native_mode->hsync_width;
191 adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + native_mode->vblank;
192 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + native_mode->voverplus;
193 adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + native_mode->vsync_width;
194 }
195 adjusted_mode->flags = native_mode->flags;
196 adjusted_mode->clock = native_mode->dotclock;
197 }
198}
199
200static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
201 struct drm_display_mode *mode,
202 struct drm_display_mode *adjusted_mode)
203{
204
205 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
206
207 radeon_encoder->flags &= ~RADEON_USE_RMX;
208
209 drm_mode_set_crtcinfo(adjusted_mode, 0);
210
211 if (radeon_encoder->rmx_type != RMX_OFF)
212 radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
213
214 /* hw bug */
215 if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
216 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
217 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
218
219 return true;
220}
221
222static void
223atombios_dac_setup(struct drm_encoder *encoder, int action)
224{
225 struct drm_device *dev = encoder->dev;
226 struct radeon_device *rdev = dev->dev_private;
227 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
228 DAC_ENCODER_CONTROL_PS_ALLOCATION args;
229 int index = 0, num = 0;
230 /* fixme - fill in enc_priv for atom dac */
231 enum radeon_tv_std tv_std = TV_STD_NTSC;
232
233 memset(&args, 0, sizeof(args));
234
235 switch (radeon_encoder->encoder_id) {
236 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
237 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
238 index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
239 num = 1;
240 break;
241 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
242 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
243 index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
244 num = 2;
245 break;
246 }
247
248 args.ucAction = action;
249
250 if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
251 args.ucDacStandard = ATOM_DAC1_PS2;
252 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
253 args.ucDacStandard = ATOM_DAC1_CV;
254 else {
255 switch (tv_std) {
256 case TV_STD_PAL:
257 case TV_STD_PAL_M:
258 case TV_STD_SCART_PAL:
259 case TV_STD_SECAM:
260 case TV_STD_PAL_CN:
261 args.ucDacStandard = ATOM_DAC1_PAL;
262 break;
263 case TV_STD_NTSC:
264 case TV_STD_NTSC_J:
265 case TV_STD_PAL_60:
266 default:
267 args.ucDacStandard = ATOM_DAC1_NTSC;
268 break;
269 }
270 }
271 args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
272
273 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
274
275}
276
277static void
278atombios_tv_setup(struct drm_encoder *encoder, int action)
279{
280 struct drm_device *dev = encoder->dev;
281 struct radeon_device *rdev = dev->dev_private;
282 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
283 TV_ENCODER_CONTROL_PS_ALLOCATION args;
284 int index = 0;
285 /* fixme - fill in enc_priv for atom dac */
286 enum radeon_tv_std tv_std = TV_STD_NTSC;
287
288 memset(&args, 0, sizeof(args));
289
290 index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
291
292 args.sTVEncoder.ucAction = action;
293
294 if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
295 args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
296 else {
297 switch (tv_std) {
298 case TV_STD_NTSC:
299 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
300 break;
301 case TV_STD_PAL:
302 args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
303 break;
304 case TV_STD_PAL_M:
305 args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
306 break;
307 case TV_STD_PAL_60:
308 args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
309 break;
310 case TV_STD_NTSC_J:
311 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
312 break;
313 case TV_STD_SCART_PAL:
314 args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
315 break;
316 case TV_STD_SECAM:
317 args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
318 break;
319 case TV_STD_PAL_CN:
320 args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
321 break;
322 default:
323 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
324 break;
325 }
326 }
327
328 args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
329
330 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
331
332}
333
334void
335atombios_external_tmds_setup(struct drm_encoder *encoder, int action)
336{
337 struct drm_device *dev = encoder->dev;
338 struct radeon_device *rdev = dev->dev_private;
339 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
340 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args;
341 int index = 0;
342
343 memset(&args, 0, sizeof(args));
344
345 index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
346
347 args.sXTmdsEncoder.ucEnable = action;
348
349 if (radeon_encoder->pixel_clock > 165000)
350 args.sXTmdsEncoder.ucMisc = PANEL_ENCODER_MISC_DUAL;
351
352 /*if (pScrn->rgbBits == 8)*/
353 args.sXTmdsEncoder.ucMisc |= (1 << 1);
354
355 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
356
357}
358
359static void
360atombios_ddia_setup(struct drm_encoder *encoder, int action)
361{
362 struct drm_device *dev = encoder->dev;
363 struct radeon_device *rdev = dev->dev_private;
364 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
365 DVO_ENCODER_CONTROL_PS_ALLOCATION args;
366 int index = 0;
367
368 memset(&args, 0, sizeof(args));
369
370 index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
371
372 args.sDVOEncoder.ucAction = action;
373 args.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
374
375 if (radeon_encoder->pixel_clock > 165000)
376 args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL;
377
378 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
379
380}
381
382union lvds_encoder_control {
383 LVDS_ENCODER_CONTROL_PS_ALLOCATION v1;
384 LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
385};
386
387static void
388atombios_digital_setup(struct drm_encoder *encoder, int action)
389{
390 struct drm_device *dev = encoder->dev;
391 struct radeon_device *rdev = dev->dev_private;
392 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
393 union lvds_encoder_control args;
394 int index = 0;
395 uint8_t frev, crev;
396 struct radeon_encoder_atom_dig *dig;
397 struct drm_connector *connector;
398 struct radeon_connector *radeon_connector;
399 struct radeon_connector_atom_dig *dig_connector;
400
401 connector = radeon_get_connector_for_encoder(encoder);
402 if (!connector)
403 return;
404
405 radeon_connector = to_radeon_connector(connector);
406
407 if (!radeon_encoder->enc_priv)
408 return;
409
410 dig = radeon_encoder->enc_priv;
411
412 if (!radeon_connector->con_priv)
413 return;
414
415 dig_connector = radeon_connector->con_priv;
416
417 memset(&args, 0, sizeof(args));
418
419 switch (radeon_encoder->encoder_id) {
420 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
421 index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
422 break;
423 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
424 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
425 index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
426 break;
427 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
428 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
429 index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
430 else
431 index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
432 break;
433 }
434
435 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
436
437 switch (frev) {
438 case 1:
439 case 2:
440 switch (crev) {
441 case 1:
442 args.v1.ucMisc = 0;
443 args.v1.ucAction = action;
444 if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
445 args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
446 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
447 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
448 if (dig->lvds_misc & (1 << 0))
449 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
450 if (dig->lvds_misc & (1 << 1))
451 args.v1.ucMisc |= (1 << 1);
452 } else {
453 if (dig_connector->linkb)
454 args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
455 if (radeon_encoder->pixel_clock > 165000)
456 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
457 /*if (pScrn->rgbBits == 8) */
458 args.v1.ucMisc |= (1 << 1);
459 }
460 break;
461 case 2:
462 case 3:
463 args.v2.ucMisc = 0;
464 args.v2.ucAction = action;
465 if (crev == 3) {
466 if (dig->coherent_mode)
467 args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
468 }
469 if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
470 args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
471 args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
472 args.v2.ucTruncate = 0;
473 args.v2.ucSpatial = 0;
474 args.v2.ucTemporal = 0;
475 args.v2.ucFRC = 0;
476 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
477 if (dig->lvds_misc & (1 << 0))
478 args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
479 if (dig->lvds_misc & (1 << 5)) {
480 args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
481 if (dig->lvds_misc & (1 << 1))
482 args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
483 }
484 if (dig->lvds_misc & (1 << 6)) {
485 args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
486 if (dig->lvds_misc & (1 << 1))
487 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
488 if (((dig->lvds_misc >> 2) & 0x3) == 2)
489 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
490 }
491 } else {
492 if (dig_connector->linkb)
493 args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
494 if (radeon_encoder->pixel_clock > 165000)
495 args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
496 }
497 break;
498 default:
499 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
500 break;
501 }
502 break;
503 default:
504 DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
505 break;
506 }
507
508 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
509
510}
511
512int
513atombios_get_encoder_mode(struct drm_encoder *encoder)
514{
515 struct drm_connector *connector;
516 struct radeon_connector *radeon_connector;
517
518 connector = radeon_get_connector_for_encoder(encoder);
519 if (!connector)
520 return 0;
521
522 radeon_connector = to_radeon_connector(connector);
523
524 switch (connector->connector_type) {
525 case DRM_MODE_CONNECTOR_DVII:
526 if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
527 return ATOM_ENCODER_MODE_HDMI;
528 else if (radeon_connector->use_digital)
529 return ATOM_ENCODER_MODE_DVI;
530 else
531 return ATOM_ENCODER_MODE_CRT;
532 break;
533 case DRM_MODE_CONNECTOR_DVID:
534 case DRM_MODE_CONNECTOR_HDMIA:
535 case DRM_MODE_CONNECTOR_HDMIB:
536 default:
537 if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
538 return ATOM_ENCODER_MODE_HDMI;
539 else
540 return ATOM_ENCODER_MODE_DVI;
541 break;
542 case DRM_MODE_CONNECTOR_LVDS:
543 return ATOM_ENCODER_MODE_LVDS;
544 break;
545 case DRM_MODE_CONNECTOR_DisplayPort:
546 /*if (radeon_output->MonType == MT_DP)
547 return ATOM_ENCODER_MODE_DP;
548 else*/
549 if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr))
550 return ATOM_ENCODER_MODE_HDMI;
551 else
552 return ATOM_ENCODER_MODE_DVI;
553 break;
554 case CONNECTOR_DVI_A:
555 case CONNECTOR_VGA:
556 return ATOM_ENCODER_MODE_CRT;
557 break;
558 case CONNECTOR_STV:
559 case CONNECTOR_CTV:
560 case CONNECTOR_DIN:
561 /* fix me */
562 return ATOM_ENCODER_MODE_TV;
563 /*return ATOM_ENCODER_MODE_CV;*/
564 break;
565 }
566}
567
568static void
569atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
570{
571 struct drm_device *dev = encoder->dev;
572 struct radeon_device *rdev = dev->dev_private;
573 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
574 DIG_ENCODER_CONTROL_PS_ALLOCATION args;
575 int index = 0, num = 0;
576 uint8_t frev, crev;
577 struct radeon_encoder_atom_dig *dig;
578 struct drm_connector *connector;
579 struct radeon_connector *radeon_connector;
580 struct radeon_connector_atom_dig *dig_connector;
581
582 connector = radeon_get_connector_for_encoder(encoder);
583 if (!connector)
584 return;
585
586 radeon_connector = to_radeon_connector(connector);
587
588 if (!radeon_connector->con_priv)
589 return;
590
591 dig_connector = radeon_connector->con_priv;
592
593 if (!radeon_encoder->enc_priv)
594 return;
595
596 dig = radeon_encoder->enc_priv;
597
598 memset(&args, 0, sizeof(args));
599
600 if (ASIC_IS_DCE32(rdev)) {
601 if (dig->dig_block)
602 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
603 else
604 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
605 num = dig->dig_block + 1;
606 } else {
607 switch (radeon_encoder->encoder_id) {
608 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
609 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
610 num = 1;
611 break;
612 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
613 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
614 num = 2;
615 break;
616 }
617 }
618
619 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
620
621 args.ucAction = action;
622 args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
623
624 if (ASIC_IS_DCE32(rdev)) {
625 switch (radeon_encoder->encoder_id) {
626 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
627 args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
628 break;
629 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
630 args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
631 break;
632 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
633 args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
634 break;
635 }
636 } else {
637 switch (radeon_encoder->encoder_id) {
638 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
639 args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER1;
640 break;
641 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
642 args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER2;
643 break;
644 }
645 }
646
647 if (radeon_encoder->pixel_clock > 165000) {
648 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B;
649 args.ucLaneNum = 8;
650 } else {
651 if (dig_connector->linkb)
652 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
653 else
654 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
655 args.ucLaneNum = 4;
656 }
657
658 args.ucEncoderMode = atombios_get_encoder_mode(encoder);
659
660 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
661
662}
663
664union dig_transmitter_control {
665 DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
666 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
667};
668
669static void
670atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
671{
672 struct drm_device *dev = encoder->dev;
673 struct radeon_device *rdev = dev->dev_private;
674 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
675 union dig_transmitter_control args;
676 int index = 0, num = 0;
677 uint8_t frev, crev;
678 struct radeon_encoder_atom_dig *dig;
679 struct drm_connector *connector;
680 struct radeon_connector *radeon_connector;
681 struct radeon_connector_atom_dig *dig_connector;
682
683 connector = radeon_get_connector_for_encoder(encoder);
684 if (!connector)
685 return;
686
687 radeon_connector = to_radeon_connector(connector);
688
689 if (!radeon_encoder->enc_priv)
690 return;
691
692 dig = radeon_encoder->enc_priv;
693
694 if (!radeon_connector->con_priv)
695 return;
696
697 dig_connector = radeon_connector->con_priv;
698
699 memset(&args, 0, sizeof(args));
700
701 if (ASIC_IS_DCE32(rdev))
702 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
703 else {
704 switch (radeon_encoder->encoder_id) {
705 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
706 index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl);
707 break;
708 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
709 index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl);
710 break;
711 }
712 }
713
714 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
715
716 args.v1.ucAction = action;
717
718 if (ASIC_IS_DCE32(rdev)) {
719 if (radeon_encoder->pixel_clock > 165000) {
720 args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 2) / 100);
721 args.v2.acConfig.fDualLinkConnector = 1;
722 } else {
723 args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock * 10 * 4) / 100);
724 }
725 if (dig->dig_block)
726 args.v2.acConfig.ucEncoderSel = 1;
727
728 switch (radeon_encoder->encoder_id) {
729 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
730 args.v2.acConfig.ucTransmitterSel = 0;
731 num = 0;
732 break;
733 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
734 args.v2.acConfig.ucTransmitterSel = 1;
735 num = 1;
736 break;
737 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
738 args.v2.acConfig.ucTransmitterSel = 2;
739 num = 2;
740 break;
741 }
742
743 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
744 if (dig->coherent_mode)
745 args.v2.acConfig.fCoherentMode = 1;
746 }
747 } else {
748 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
749 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock) / 10);
750
751 switch (radeon_encoder->encoder_id) {
752 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
753 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
754 if (rdev->flags & RADEON_IS_IGP) {
755 if (radeon_encoder->pixel_clock > 165000) {
756 args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
757 ATOM_TRANSMITTER_CONFIG_LINKA_B);
758 if (dig_connector->igp_lane_info & 0x3)
759 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
760 else if (dig_connector->igp_lane_info & 0xc)
761 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
762 } else {
763 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
764 if (dig_connector->igp_lane_info & 0x1)
765 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
766 else if (dig_connector->igp_lane_info & 0x2)
767 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
768 else if (dig_connector->igp_lane_info & 0x4)
769 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
770 else if (dig_connector->igp_lane_info & 0x8)
771 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
772 }
773 } else {
774 if (radeon_encoder->pixel_clock > 165000)
775 args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
776 ATOM_TRANSMITTER_CONFIG_LINKA_B |
777 ATOM_TRANSMITTER_CONFIG_LANE_0_7);
778 else {
779 if (dig_connector->linkb)
780 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
781 else
782 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
783 }
784 }
785 break;
786 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
787 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
788 if (radeon_encoder->pixel_clock > 165000)
789 args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
790 ATOM_TRANSMITTER_CONFIG_LINKA_B |
791 ATOM_TRANSMITTER_CONFIG_LANE_0_7);
792 else {
793 if (dig_connector->linkb)
794 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
795 else
796 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
797 }
798 break;
799 }
800
801 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
802 if (dig->coherent_mode)
803 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
804 }
805 }
806
807 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
808
809}
810
811static void atom_rv515_force_tv_scaler(struct radeon_device *rdev)
812{
813
814 WREG32(0x659C, 0x0);
815 WREG32(0x6594, 0x705);
816 WREG32(0x65A4, 0x10001);
817 WREG32(0x65D8, 0x0);
818 WREG32(0x65B0, 0x0);
819 WREG32(0x65C0, 0x0);
820 WREG32(0x65D4, 0x0);
821 WREG32(0x6578, 0x0);
822 WREG32(0x657C, 0x841880A8);
823 WREG32(0x6578, 0x1);
824 WREG32(0x657C, 0x84208680);
825 WREG32(0x6578, 0x2);
826 WREG32(0x657C, 0xBFF880B0);
827 WREG32(0x6578, 0x100);
828 WREG32(0x657C, 0x83D88088);
829 WREG32(0x6578, 0x101);
830 WREG32(0x657C, 0x84608680);
831 WREG32(0x6578, 0x102);
832 WREG32(0x657C, 0xBFF080D0);
833 WREG32(0x6578, 0x200);
834 WREG32(0x657C, 0x83988068);
835 WREG32(0x6578, 0x201);
836 WREG32(0x657C, 0x84A08680);
837 WREG32(0x6578, 0x202);
838 WREG32(0x657C, 0xBFF080F8);
839 WREG32(0x6578, 0x300);
840 WREG32(0x657C, 0x83588058);
841 WREG32(0x6578, 0x301);
842 WREG32(0x657C, 0x84E08660);
843 WREG32(0x6578, 0x302);
844 WREG32(0x657C, 0xBFF88120);
845 WREG32(0x6578, 0x400);
846 WREG32(0x657C, 0x83188040);
847 WREG32(0x6578, 0x401);
848 WREG32(0x657C, 0x85008660);
849 WREG32(0x6578, 0x402);
850 WREG32(0x657C, 0xBFF88150);
851 WREG32(0x6578, 0x500);
852 WREG32(0x657C, 0x82D88030);
853 WREG32(0x6578, 0x501);
854 WREG32(0x657C, 0x85408640);
855 WREG32(0x6578, 0x502);
856 WREG32(0x657C, 0xBFF88180);
857 WREG32(0x6578, 0x600);
858 WREG32(0x657C, 0x82A08018);
859 WREG32(0x6578, 0x601);
860 WREG32(0x657C, 0x85808620);
861 WREG32(0x6578, 0x602);
862 WREG32(0x657C, 0xBFF081B8);
863 WREG32(0x6578, 0x700);
864 WREG32(0x657C, 0x82608010);
865 WREG32(0x6578, 0x701);
866 WREG32(0x657C, 0x85A08600);
867 WREG32(0x6578, 0x702);
868 WREG32(0x657C, 0x800081F0);
869 WREG32(0x6578, 0x800);
870 WREG32(0x657C, 0x8228BFF8);
871 WREG32(0x6578, 0x801);
872 WREG32(0x657C, 0x85E085E0);
873 WREG32(0x6578, 0x802);
874 WREG32(0x657C, 0xBFF88228);
875 WREG32(0x6578, 0x10000);
876 WREG32(0x657C, 0x82A8BF00);
877 WREG32(0x6578, 0x10001);
878 WREG32(0x657C, 0x82A08CC0);
879 WREG32(0x6578, 0x10002);
880 WREG32(0x657C, 0x8008BEF8);
881 WREG32(0x6578, 0x10100);
882 WREG32(0x657C, 0x81F0BF28);
883 WREG32(0x6578, 0x10101);
884 WREG32(0x657C, 0x83608CA0);
885 WREG32(0x6578, 0x10102);
886 WREG32(0x657C, 0x8018BED0);
887 WREG32(0x6578, 0x10200);
888 WREG32(0x657C, 0x8148BF38);
889 WREG32(0x6578, 0x10201);
890 WREG32(0x657C, 0x84408C80);
891 WREG32(0x6578, 0x10202);
892 WREG32(0x657C, 0x8008BEB8);
893 WREG32(0x6578, 0x10300);
894 WREG32(0x657C, 0x80B0BF78);
895 WREG32(0x6578, 0x10301);
896 WREG32(0x657C, 0x85008C20);
897 WREG32(0x6578, 0x10302);
898 WREG32(0x657C, 0x8020BEA0);
899 WREG32(0x6578, 0x10400);
900 WREG32(0x657C, 0x8028BF90);
901 WREG32(0x6578, 0x10401);
902 WREG32(0x657C, 0x85E08BC0);
903 WREG32(0x6578, 0x10402);
904 WREG32(0x657C, 0x8018BE90);
905 WREG32(0x6578, 0x10500);
906 WREG32(0x657C, 0xBFB8BFB0);
907 WREG32(0x6578, 0x10501);
908 WREG32(0x657C, 0x86C08B40);
909 WREG32(0x6578, 0x10502);
910 WREG32(0x657C, 0x8010BE90);
911 WREG32(0x6578, 0x10600);
912 WREG32(0x657C, 0xBF58BFC8);
913 WREG32(0x6578, 0x10601);
914 WREG32(0x657C, 0x87A08AA0);
915 WREG32(0x6578, 0x10602);
916 WREG32(0x657C, 0x8010BE98);
917 WREG32(0x6578, 0x10700);
918 WREG32(0x657C, 0xBF10BFF0);
919 WREG32(0x6578, 0x10701);
920 WREG32(0x657C, 0x886089E0);
921 WREG32(0x6578, 0x10702);
922 WREG32(0x657C, 0x8018BEB0);
923 WREG32(0x6578, 0x10800);
924 WREG32(0x657C, 0xBED8BFE8);
925 WREG32(0x6578, 0x10801);
926 WREG32(0x657C, 0x89408940);
927 WREG32(0x6578, 0x10802);
928 WREG32(0x657C, 0xBFE8BED8);
929 WREG32(0x6578, 0x20000);
930 WREG32(0x657C, 0x80008000);
931 WREG32(0x6578, 0x20001);
932 WREG32(0x657C, 0x90008000);
933 WREG32(0x6578, 0x20002);
934 WREG32(0x657C, 0x80008000);
935 WREG32(0x6578, 0x20003);
936 WREG32(0x657C, 0x80008000);
937 WREG32(0x6578, 0x20100);
938 WREG32(0x657C, 0x80108000);
939 WREG32(0x6578, 0x20101);
940 WREG32(0x657C, 0x8FE0BF70);
941 WREG32(0x6578, 0x20102);
942 WREG32(0x657C, 0xBFE880C0);
943 WREG32(0x6578, 0x20103);
944 WREG32(0x657C, 0x80008000);
945 WREG32(0x6578, 0x20200);
946 WREG32(0x657C, 0x8018BFF8);
947 WREG32(0x6578, 0x20201);
948 WREG32(0x657C, 0x8F80BF08);
949 WREG32(0x6578, 0x20202);
950 WREG32(0x657C, 0xBFD081A0);
951 WREG32(0x6578, 0x20203);
952 WREG32(0x657C, 0xBFF88000);
953 WREG32(0x6578, 0x20300);
954 WREG32(0x657C, 0x80188000);
955 WREG32(0x6578, 0x20301);
956 WREG32(0x657C, 0x8EE0BEC0);
957 WREG32(0x6578, 0x20302);
958 WREG32(0x657C, 0xBFB082A0);
959 WREG32(0x6578, 0x20303);
960 WREG32(0x657C, 0x80008000);
961 WREG32(0x6578, 0x20400);
962 WREG32(0x657C, 0x80188000);
963 WREG32(0x6578, 0x20401);
964 WREG32(0x657C, 0x8E00BEA0);
965 WREG32(0x6578, 0x20402);
966 WREG32(0x657C, 0xBF8883C0);
967 WREG32(0x6578, 0x20403);
968 WREG32(0x657C, 0x80008000);
969 WREG32(0x6578, 0x20500);
970 WREG32(0x657C, 0x80188000);
971 WREG32(0x6578, 0x20501);
972 WREG32(0x657C, 0x8D00BE90);
973 WREG32(0x6578, 0x20502);
974 WREG32(0x657C, 0xBF588500);
975 WREG32(0x6578, 0x20503);
976 WREG32(0x657C, 0x80008008);
977 WREG32(0x6578, 0x20600);
978 WREG32(0x657C, 0x80188000);
979 WREG32(0x6578, 0x20601);
980 WREG32(0x657C, 0x8BC0BE98);
981 WREG32(0x6578, 0x20602);
982 WREG32(0x657C, 0xBF308660);
983 WREG32(0x6578, 0x20603);
984 WREG32(0x657C, 0x80008008);
985 WREG32(0x6578, 0x20700);
986 WREG32(0x657C, 0x80108000);
987 WREG32(0x6578, 0x20701);
988 WREG32(0x657C, 0x8A80BEB0);
989 WREG32(0x6578, 0x20702);
990 WREG32(0x657C, 0xBF0087C0);
991 WREG32(0x6578, 0x20703);
992 WREG32(0x657C, 0x80008008);
993 WREG32(0x6578, 0x20800);
994 WREG32(0x657C, 0x80108000);
995 WREG32(0x6578, 0x20801);
996 WREG32(0x657C, 0x8920BED0);
997 WREG32(0x6578, 0x20802);
998 WREG32(0x657C, 0xBED08920);
999 WREG32(0x6578, 0x20803);
1000 WREG32(0x657C, 0x80008010);
1001 WREG32(0x6578, 0x30000);
1002 WREG32(0x657C, 0x90008000);
1003 WREG32(0x6578, 0x30001);
1004 WREG32(0x657C, 0x80008000);
1005 WREG32(0x6578, 0x30100);
1006 WREG32(0x657C, 0x8FE0BF90);
1007 WREG32(0x6578, 0x30101);
1008 WREG32(0x657C, 0xBFF880A0);
1009 WREG32(0x6578, 0x30200);
1010 WREG32(0x657C, 0x8F60BF40);
1011 WREG32(0x6578, 0x30201);
1012 WREG32(0x657C, 0xBFE88180);
1013 WREG32(0x6578, 0x30300);
1014 WREG32(0x657C, 0x8EC0BF00);
1015 WREG32(0x6578, 0x30301);
1016 WREG32(0x657C, 0xBFC88280);
1017 WREG32(0x6578, 0x30400);
1018 WREG32(0x657C, 0x8DE0BEE0);
1019 WREG32(0x6578, 0x30401);
1020 WREG32(0x657C, 0xBFA083A0);
1021 WREG32(0x6578, 0x30500);
1022 WREG32(0x657C, 0x8CE0BED0);
1023 WREG32(0x6578, 0x30501);
1024 WREG32(0x657C, 0xBF7884E0);
1025 WREG32(0x6578, 0x30600);
1026 WREG32(0x657C, 0x8BA0BED8);
1027 WREG32(0x6578, 0x30601);
1028 WREG32(0x657C, 0xBF508640);
1029 WREG32(0x6578, 0x30700);
1030 WREG32(0x657C, 0x8A60BEE8);
1031 WREG32(0x6578, 0x30701);
1032 WREG32(0x657C, 0xBF2087A0);
1033 WREG32(0x6578, 0x30800);
1034 WREG32(0x657C, 0x8900BF00);
1035 WREG32(0x6578, 0x30801);
1036 WREG32(0x657C, 0xBF008900);
1037}
1038
1039static void
1040atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
1041{
1042 struct drm_device *dev = encoder->dev;
1043 struct radeon_device *rdev = dev->dev_private;
1044 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1045 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1046 ENABLE_YUV_PS_ALLOCATION args;
1047 int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
1048 uint32_t temp, reg;
1049
1050 memset(&args, 0, sizeof(args));
1051
1052 if (rdev->family >= CHIP_R600)
1053 reg = R600_BIOS_3_SCRATCH;
1054 else
1055 reg = RADEON_BIOS_3_SCRATCH;
1056
1057 /* XXX: fix up scratch reg handling */
1058 temp = RREG32(reg);
1059 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
1060 WREG32(reg, (ATOM_S3_TV1_ACTIVE |
1061 (radeon_crtc->crtc_id << 18)));
1062 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
1063 WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
1064 else
1065 WREG32(reg, 0);
1066
1067 if (enable)
1068 args.ucEnable = ATOM_ENABLE;
1069 args.ucCRTC = radeon_crtc->crtc_id;
1070
1071 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1072
1073 WREG32(reg, temp);
1074}
1075
1076static void
1077atombios_overscan_setup(struct drm_encoder *encoder,
1078 struct drm_display_mode *mode,
1079 struct drm_display_mode *adjusted_mode)
1080{
1081 struct drm_device *dev = encoder->dev;
1082 struct radeon_device *rdev = dev->dev_private;
1083 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1084 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1085 SET_CRTC_OVERSCAN_PS_ALLOCATION args;
1086 int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
1087
1088 memset(&args, 0, sizeof(args));
1089
1090 args.usOverscanRight = 0;
1091 args.usOverscanLeft = 0;
1092 args.usOverscanBottom = 0;
1093 args.usOverscanTop = 0;
1094 args.ucCRTC = radeon_crtc->crtc_id;
1095
1096 if (radeon_encoder->flags & RADEON_USE_RMX) {
1097 if (radeon_encoder->rmx_type == RMX_FULL) {
1098 args.usOverscanRight = 0;
1099 args.usOverscanLeft = 0;
1100 args.usOverscanBottom = 0;
1101 args.usOverscanTop = 0;
1102 } else if (radeon_encoder->rmx_type == RMX_CENTER) {
1103 args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
1104 args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
1105 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
1106 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
1107 } else if (radeon_encoder->rmx_type == RMX_ASPECT) {
1108 int a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
1109 int a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
1110
1111 if (a1 > a2) {
1112 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
1113 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
1114 } else if (a2 > a1) {
1115 args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
1116 args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
1117 }
1118 }
1119 }
1120
1121 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1122
1123}
1124
1125static void
1126atombios_scaler_setup(struct drm_encoder *encoder)
1127{
1128 struct drm_device *dev = encoder->dev;
1129 struct radeon_device *rdev = dev->dev_private;
1130 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1131 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1132 ENABLE_SCALER_PS_ALLOCATION args;
1133 int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
1134 /* fixme - fill in enc_priv for atom dac */
1135 enum radeon_tv_std tv_std = TV_STD_NTSC;
1136
1137 if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
1138 return;
1139
1140 memset(&args, 0, sizeof(args));
1141
1142 args.ucScaler = radeon_crtc->crtc_id;
1143
1144 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
1145 switch (tv_std) {
1146 case TV_STD_NTSC:
1147 default:
1148 args.ucTVStandard = ATOM_TV_NTSC;
1149 break;
1150 case TV_STD_PAL:
1151 args.ucTVStandard = ATOM_TV_PAL;
1152 break;
1153 case TV_STD_PAL_M:
1154 args.ucTVStandard = ATOM_TV_PALM;
1155 break;
1156 case TV_STD_PAL_60:
1157 args.ucTVStandard = ATOM_TV_PAL60;
1158 break;
1159 case TV_STD_NTSC_J:
1160 args.ucTVStandard = ATOM_TV_NTSCJ;
1161 break;
1162 case TV_STD_SCART_PAL:
1163 args.ucTVStandard = ATOM_TV_PAL; /* ??? */
1164 break;
1165 case TV_STD_SECAM:
1166 args.ucTVStandard = ATOM_TV_SECAM;
1167 break;
1168 case TV_STD_PAL_CN:
1169 args.ucTVStandard = ATOM_TV_PALCN;
1170 break;
1171 }
1172 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
1173 } else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) {
1174 args.ucTVStandard = ATOM_TV_CV;
1175 args.ucEnable = SCALER_ENABLE_MULTITAP_MODE;
1176 } else if (radeon_encoder->flags & RADEON_USE_RMX) {
1177 if (radeon_encoder->rmx_type == RMX_FULL)
1178 args.ucEnable = ATOM_SCALER_EXPANSION;
1179 else if (radeon_encoder->rmx_type == RMX_CENTER)
1180 args.ucEnable = ATOM_SCALER_CENTER;
1181 else if (radeon_encoder->rmx_type == RMX_ASPECT)
1182 args.ucEnable = ATOM_SCALER_EXPANSION;
1183 } else {
1184 if (ASIC_IS_AVIVO(rdev))
1185 args.ucEnable = ATOM_SCALER_DISABLE;
1186 else
1187 args.ucEnable = ATOM_SCALER_CENTER;
1188 }
1189
1190 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1191
1192 if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)
1193 && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) {
1194 atom_rv515_force_tv_scaler(rdev);
1195 }
1196
1197}
1198
1199static void
1200radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1201{
1202 struct drm_device *dev = encoder->dev;
1203 struct radeon_device *rdev = dev->dev_private;
1204 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1205 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
1206 int index = 0;
1207 bool is_dig = false;
1208
1209 memset(&args, 0, sizeof(args));
1210
1211 switch (radeon_encoder->encoder_id) {
1212 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1213 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1214 index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
1215 break;
1216 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1217 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1218 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1219 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1220 is_dig = true;
1221 break;
1222 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1223 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1224 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1225 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
1226 break;
1227 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1228 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
1229 break;
1230 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1231 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1232 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
1233 else
1234 index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
1235 break;
1236 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1237 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1238 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
1239 index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
1240 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
1241 index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
1242 else
1243 index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
1244 break;
1245 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1246 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1247 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
1248 index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
1249 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
1250 index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
1251 else
1252 index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
1253 break;
1254 }
1255
1256 if (is_dig) {
1257 switch (mode) {
1258 case DRM_MODE_DPMS_ON:
1259 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
1260 break;
1261 case DRM_MODE_DPMS_STANDBY:
1262 case DRM_MODE_DPMS_SUSPEND:
1263 case DRM_MODE_DPMS_OFF:
1264 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
1265 break;
1266 }
1267 } else {
1268 switch (mode) {
1269 case DRM_MODE_DPMS_ON:
1270 args.ucAction = ATOM_ENABLE;
1271 break;
1272 case DRM_MODE_DPMS_STANDBY:
1273 case DRM_MODE_DPMS_SUSPEND:
1274 case DRM_MODE_DPMS_OFF:
1275 args.ucAction = ATOM_DISABLE;
1276 break;
1277 }
1278 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1279 }
1280 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
1281}
1282
1283union crtc_sourc_param {
1284 SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
1285 SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
1286};
1287
1288static void
1289atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1290{
1291 struct drm_device *dev = encoder->dev;
1292 struct radeon_device *rdev = dev->dev_private;
1293 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1294 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1295 union crtc_sourc_param args;
1296 int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
1297 uint8_t frev, crev;
1298
1299 memset(&args, 0, sizeof(args));
1300
1301 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
1302
1303 switch (frev) {
1304 case 1:
1305 switch (crev) {
1306 case 1:
1307 default:
1308 if (ASIC_IS_AVIVO(rdev))
1309 args.v1.ucCRTC = radeon_crtc->crtc_id;
1310 else {
1311 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
1312 args.v1.ucCRTC = radeon_crtc->crtc_id;
1313 } else {
1314 args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
1315 }
1316 }
1317 switch (radeon_encoder->encoder_id) {
1318 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1319 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1320 args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
1321 break;
1322 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1323 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1324 if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
1325 args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
1326 else
1327 args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
1328 break;
1329 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1330 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1331 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1332 args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
1333 break;
1334 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1335 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1336 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
1337 args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
1338 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
1339 args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
1340 else
1341 args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
1342 break;
1343 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1344 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1345 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
1346 args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
1347 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
1348 args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
1349 else
1350 args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
1351 break;
1352 }
1353 break;
1354 case 2:
1355 args.v2.ucCRTC = radeon_crtc->crtc_id;
1356 args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
1357 switch (radeon_encoder->encoder_id) {
1358 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1359 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1360 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1361 if (ASIC_IS_DCE32(rdev)) {
1362 if (radeon_crtc->crtc_id)
1363 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1364 else
1365 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1366 } else
1367 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1368 break;
1369 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1370 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
1371 break;
1372 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1373 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1374 break;
1375 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1376 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
1377 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1378 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
1379 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1380 else
1381 args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
1382 break;
1383 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1384 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT))
1385 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1386 else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT))
1387 args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
1388 else
1389 args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
1390 break;
1391 }
1392 break;
1393 }
1394 break;
1395 default:
1396 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
1397 break;
1398 }
1399
1400 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1401
1402}
1403
1404static void
1405atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1406 struct drm_display_mode *mode)
1407{
1408 struct drm_device *dev = encoder->dev;
1409 struct radeon_device *rdev = dev->dev_private;
1410 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1411 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1412
1413 /* Funky macbooks */
1414 if ((dev->pdev->device == 0x71C5) &&
1415 (dev->pdev->subsystem_vendor == 0x106b) &&
1416 (dev->pdev->subsystem_device == 0x0080)) {
1417 if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
1418 uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
1419
1420 lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
1421 lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
1422
1423 WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
1424 }
1425 }
1426
1427 /* set scaler clears this on some chips */
1428 if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
1429 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, AVIVO_D1MODE_INTERLEAVE_EN);
1430}
1431
1432static void
1433radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1434 struct drm_display_mode *mode,
1435 struct drm_display_mode *adjusted_mode)
1436{
1437 struct drm_device *dev = encoder->dev;
1438 struct radeon_device *rdev = dev->dev_private;
1439 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1440 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1441
1442 if (radeon_encoder->enc_priv) {
1443 struct radeon_encoder_atom_dig *dig;
1444
1445 dig = radeon_encoder->enc_priv;
1446 dig->dig_block = radeon_crtc->crtc_id;
1447 }
1448 radeon_encoder->pixel_clock = adjusted_mode->clock;
1449
1450 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
1451 atombios_overscan_setup(encoder, mode, adjusted_mode);
1452 atombios_scaler_setup(encoder);
1453 atombios_set_encoder_crtc_source(encoder);
1454
1455 if (ASIC_IS_AVIVO(rdev)) {
1456 if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
1457 atombios_yuv_setup(encoder, true);
1458 else
1459 atombios_yuv_setup(encoder, false);
1460 }
1461
1462 switch (radeon_encoder->encoder_id) {
1463 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1464 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1465 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1466 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1467 atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
1468 break;
1469 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1470 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1471 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1472 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1473 /* disable the encoder and transmitter */
1474 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
1475 atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
1476
1477 /* setup and enable the encoder and transmitter */
1478 atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
1479 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP);
1480 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
1481 break;
1482 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1483 atombios_ddia_setup(encoder, ATOM_ENABLE);
1484 break;
1485 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1486 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1487 atombios_external_tmds_setup(encoder, ATOM_ENABLE);
1488 break;
1489 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1490 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1491 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1492 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1493 atombios_dac_setup(encoder, ATOM_ENABLE);
1494 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1495 atombios_tv_setup(encoder, ATOM_ENABLE);
1496 break;
1497 }
1498 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1499}
1500
1501static bool
1502atombios_dac_load_detect(struct drm_encoder *encoder)
1503{
1504 struct drm_device *dev = encoder->dev;
1505 struct radeon_device *rdev = dev->dev_private;
1506 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1507
1508 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
1509 ATOM_DEVICE_CV_SUPPORT |
1510 ATOM_DEVICE_CRT_SUPPORT)) {
1511 DAC_LOAD_DETECTION_PS_ALLOCATION args;
1512 int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
1513 uint8_t frev, crev;
1514
1515 memset(&args, 0, sizeof(args));
1516
1517 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
1518
1519 args.sDacload.ucMisc = 0;
1520
1521 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
1522 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
1523 args.sDacload.ucDacType = ATOM_DAC_A;
1524 else
1525 args.sDacload.ucDacType = ATOM_DAC_B;
1526
1527 if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT)
1528 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
1529 else if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT)
1530 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
1531 else if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
1532 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
1533 if (crev >= 3)
1534 args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
1535 } else if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
1536 args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
1537 if (crev >= 3)
1538 args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
1539 }
1540
1541 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1542
1543 return true;
1544 } else
1545 return false;
1546}
1547
1548static enum drm_connector_status
1549radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1550{
1551 struct drm_device *dev = encoder->dev;
1552 struct radeon_device *rdev = dev->dev_private;
1553 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1554 uint32_t bios_0_scratch;
1555
1556 if (!atombios_dac_load_detect(encoder)) {
1557 DRM_DEBUG("detect returned false \n");
1558 return connector_status_unknown;
1559 }
1560
1561 if (rdev->family >= CHIP_R600)
1562 bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
1563 else
1564 bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
1565
1566 DRM_DEBUG("Bios 0 scratch %x\n", bios_0_scratch);
1567 if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) {
1568 if (bios_0_scratch & ATOM_S0_CRT1_MASK)
1569 return connector_status_connected;
1570 } else if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) {
1571 if (bios_0_scratch & ATOM_S0_CRT2_MASK)
1572 return connector_status_connected;
1573 } else if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) {
1574 if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
1575 return connector_status_connected;
1576 } else if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) {
1577 if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
1578 return connector_status_connected; /* CTV */
1579 else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
1580 return connector_status_connected; /* STV */
1581 }
1582 return connector_status_disconnected;
1583}
1584
1585static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1586{
1587 radeon_atom_output_lock(encoder, true);
1588 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1589}
1590
1591static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
1592{
1593 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
1594 radeon_atom_output_lock(encoder, false);
1595}
1596
1597static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
1598 .dpms = radeon_atom_encoder_dpms,
1599 .mode_fixup = radeon_atom_mode_fixup,
1600 .prepare = radeon_atom_encoder_prepare,
1601 .mode_set = radeon_atom_encoder_mode_set,
1602 .commit = radeon_atom_encoder_commit,
1603 /* no detect for TMDS/LVDS yet */
1604};
1605
1606static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
1607 .dpms = radeon_atom_encoder_dpms,
1608 .mode_fixup = radeon_atom_mode_fixup,
1609 .prepare = radeon_atom_encoder_prepare,
1610 .mode_set = radeon_atom_encoder_mode_set,
1611 .commit = radeon_atom_encoder_commit,
1612 .detect = radeon_atom_dac_detect,
1613};
1614
1615void radeon_enc_destroy(struct drm_encoder *encoder)
1616{
1617 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1618 kfree(radeon_encoder->enc_priv);
1619 drm_encoder_cleanup(encoder);
1620 kfree(radeon_encoder);
1621}
1622
1623static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
1624 .destroy = radeon_enc_destroy,
1625};
1626
1627struct radeon_encoder_atom_dig *
1628radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
1629{
1630 struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
1631
1632 if (!dig)
1633 return NULL;
1634
1635 /* coherent mode by default */
1636 dig->coherent_mode = true;
1637
1638 return dig;
1639}
1640
1641void
1642radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
1643{
1644 struct drm_encoder *encoder;
1645 struct radeon_encoder *radeon_encoder;
1646
1647 /* see if we already added it */
1648 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1649 radeon_encoder = to_radeon_encoder(encoder);
1650 if (radeon_encoder->encoder_id == encoder_id) {
1651 radeon_encoder->devices |= supported_device;
1652 return;
1653 }
1654
1655 }
1656
1657 /* add a new one */
1658 radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
1659 if (!radeon_encoder)
1660 return;
1661
1662 encoder = &radeon_encoder->base;
1663 encoder->possible_crtcs = 0x3;
1664 encoder->possible_clones = 0;
1665
1666 radeon_encoder->enc_priv = NULL;
1667
1668 radeon_encoder->encoder_id = encoder_id;
1669 radeon_encoder->devices = supported_device;
1670
1671 switch (radeon_encoder->encoder_id) {
1672 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1673 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1674 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1675 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
1676 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1677 radeon_encoder->rmx_type = RMX_FULL;
1678 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
1679 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
1680 } else {
1681 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
1682 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
1683 }
1684 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
1685 break;
1686 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1687 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
1688 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
1689 break;
1690 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1691 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1692 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1693 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
1694 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
1695 break;
1696 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1697 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1698 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1699 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1700 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1701 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1702 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1703 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
1704 radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
1705 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
1706 break;
1707 }
1708}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
new file mode 100644
index 000000000000..fa86d398945e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -0,0 +1,825 @@
1/*
2 * Copyright © 2007 David Airlie
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * David Airlie
25 */
26 /*
27 * Modularization
28 */
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/mm.h>
35#include <linux/tty.h>
36#include <linux/slab.h>
37#include <linux/delay.h>
38#include <linux/fb.h>
39#include <linux/init.h>
40
41#include "drmP.h"
42#include "drm.h"
43#include "drm_crtc.h"
44#include "drm_crtc_helper.h"
45#include "radeon_drm.h"
46#include "radeon.h"
47
48struct radeon_fb_device {
49 struct radeon_device *rdev;
50 struct drm_display_mode *mode;
51 struct radeon_framebuffer *rfb;
52 int crtc_count;
53 /* crtc currently bound to this */
54 uint32_t crtc_ids[2];
55};
56
57static int radeonfb_setcolreg(unsigned regno,
58 unsigned red,
59 unsigned green,
60 unsigned blue,
61 unsigned transp,
62 struct fb_info *info)
63{
64 struct radeon_fb_device *rfbdev = info->par;
65 struct drm_device *dev = rfbdev->rdev->ddev;
66 struct drm_crtc *crtc;
67 int i;
68
69 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
70 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
71 struct drm_mode_set *modeset = &radeon_crtc->mode_set;
72 struct drm_framebuffer *fb = modeset->fb;
73
74 for (i = 0; i < rfbdev->crtc_count; i++) {
75 if (crtc->base.id == rfbdev->crtc_ids[i]) {
76 break;
77 }
78 }
79 if (i == rfbdev->crtc_count) {
80 continue;
81 }
82 if (regno > 255) {
83 return 1;
84 }
85 if (fb->depth == 8) {
86 radeon_crtc_fb_gamma_set(crtc, red, green, blue, regno);
87 return 0;
88 }
89
90 if (regno < 16) {
91 switch (fb->depth) {
92 case 15:
93 fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
94 ((green & 0xf800) >> 6) |
95 ((blue & 0xf800) >> 11);
96 break;
97 case 16:
98 fb->pseudo_palette[regno] = (red & 0xf800) |
99 ((green & 0xfc00) >> 5) |
100 ((blue & 0xf800) >> 11);
101 break;
102 case 24:
103 case 32:
104 fb->pseudo_palette[regno] = ((red & 0xff00) << 8) |
105 (green & 0xff00) |
106 ((blue & 0xff00) >> 8);
107 break;
108 }
109 }
110 }
111 return 0;
112}
113
114static int radeonfb_check_var(struct fb_var_screeninfo *var,
115 struct fb_info *info)
116{
117 struct radeon_fb_device *rfbdev = info->par;
118 struct radeon_framebuffer *rfb = rfbdev->rfb;
119 struct drm_framebuffer *fb = &rfb->base;
120 int depth;
121
122 if (var->pixclock == -1 || !var->pixclock) {
123 return -EINVAL;
124 }
125 /* Need to resize the fb object !!! */
126 if (var->xres > fb->width || var->yres > fb->height) {
127 DRM_ERROR("Requested width/height is greater than current fb "
128 "object %dx%d > %dx%d\n", var->xres, var->yres,
129 fb->width, fb->height);
130 DRM_ERROR("Need resizing code.\n");
131 return -EINVAL;
132 }
133
134 switch (var->bits_per_pixel) {
135 case 16:
136 depth = (var->green.length == 6) ? 16 : 15;
137 break;
138 case 32:
139 depth = (var->transp.length > 0) ? 32 : 24;
140 break;
141 default:
142 depth = var->bits_per_pixel;
143 break;
144 }
145
146 switch (depth) {
147 case 8:
148 var->red.offset = 0;
149 var->green.offset = 0;
150 var->blue.offset = 0;
151 var->red.length = 8;
152 var->green.length = 8;
153 var->blue.length = 8;
154 var->transp.length = 0;
155 var->transp.offset = 0;
156 break;
157 case 15:
158 var->red.offset = 10;
159 var->green.offset = 5;
160 var->blue.offset = 0;
161 var->red.length = 5;
162 var->green.length = 5;
163 var->blue.length = 5;
164 var->transp.length = 1;
165 var->transp.offset = 15;
166 break;
167 case 16:
168 var->red.offset = 11;
169 var->green.offset = 5;
170 var->blue.offset = 0;
171 var->red.length = 5;
172 var->green.length = 6;
173 var->blue.length = 5;
174 var->transp.length = 0;
175 var->transp.offset = 0;
176 break;
177 case 24:
178 var->red.offset = 16;
179 var->green.offset = 8;
180 var->blue.offset = 0;
181 var->red.length = 8;
182 var->green.length = 8;
183 var->blue.length = 8;
184 var->transp.length = 0;
185 var->transp.offset = 0;
186 break;
187 case 32:
188 var->red.offset = 16;
189 var->green.offset = 8;
190 var->blue.offset = 0;
191 var->red.length = 8;
192 var->green.length = 8;
193 var->blue.length = 8;
194 var->transp.length = 8;
195 var->transp.offset = 24;
196 break;
197 default:
198 return -EINVAL;
199 }
200 return 0;
201}
202
203/* this will let fbcon do the mode init */
204static int radeonfb_set_par(struct fb_info *info)
205{
206 struct radeon_fb_device *rfbdev = info->par;
207 struct drm_device *dev = rfbdev->rdev->ddev;
208 struct fb_var_screeninfo *var = &info->var;
209 struct drm_crtc *crtc;
210 int ret;
211 int i;
212
213 if (var->pixclock != -1) {
214 DRM_ERROR("PIXEL CLCOK SET\n");
215 return -EINVAL;
216 }
217
218 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
219 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
220
221 for (i = 0; i < rfbdev->crtc_count; i++) {
222 if (crtc->base.id == rfbdev->crtc_ids[i]) {
223 break;
224 }
225 }
226 if (i == rfbdev->crtc_count) {
227 continue;
228 }
229 if (crtc->fb == radeon_crtc->mode_set.fb) {
230 mutex_lock(&dev->mode_config.mutex);
231 ret = crtc->funcs->set_config(&radeon_crtc->mode_set);
232 mutex_unlock(&dev->mode_config.mutex);
233 if (ret) {
234 return ret;
235 }
236 }
237 }
238 return 0;
239}
240
241static int radeonfb_pan_display(struct fb_var_screeninfo *var,
242 struct fb_info *info)
243{
244 struct radeon_fb_device *rfbdev = info->par;
245 struct drm_device *dev = rfbdev->rdev->ddev;
246 struct drm_mode_set *modeset;
247 struct drm_crtc *crtc;
248 struct radeon_crtc *radeon_crtc;
249 int ret = 0;
250 int i;
251
252 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
253 for (i = 0; i < rfbdev->crtc_count; i++) {
254 if (crtc->base.id == rfbdev->crtc_ids[i]) {
255 break;
256 }
257 }
258
259 if (i == rfbdev->crtc_count) {
260 continue;
261 }
262
263 radeon_crtc = to_radeon_crtc(crtc);
264 modeset = &radeon_crtc->mode_set;
265
266 modeset->x = var->xoffset;
267 modeset->y = var->yoffset;
268
269 if (modeset->num_connectors) {
270 mutex_lock(&dev->mode_config.mutex);
271 ret = crtc->funcs->set_config(modeset);
272 mutex_unlock(&dev->mode_config.mutex);
273 if (!ret) {
274 info->var.xoffset = var->xoffset;
275 info->var.yoffset = var->yoffset;
276 }
277 }
278 }
279 return ret;
280}
281
282static void radeonfb_on(struct fb_info *info)
283{
284 struct radeon_fb_device *rfbdev = info->par;
285 struct drm_device *dev = rfbdev->rdev->ddev;
286 struct drm_crtc *crtc;
287 struct drm_encoder *encoder;
288 int i;
289
290 /*
291 * For each CRTC in this fb, find all associated encoders
292 * and turn them off, then turn off the CRTC.
293 */
294 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
295 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
296
297 for (i = 0; i < rfbdev->crtc_count; i++) {
298 if (crtc->base.id == rfbdev->crtc_ids[i]) {
299 break;
300 }
301 }
302
303 mutex_lock(&dev->mode_config.mutex);
304 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
305 mutex_unlock(&dev->mode_config.mutex);
306
307 /* Found a CRTC on this fb, now find encoders */
308 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
309 if (encoder->crtc == crtc) {
310 struct drm_encoder_helper_funcs *encoder_funcs;
311
312 encoder_funcs = encoder->helper_private;
313 mutex_lock(&dev->mode_config.mutex);
314 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
315 mutex_unlock(&dev->mode_config.mutex);
316 }
317 }
318 }
319}
320
321static void radeonfb_off(struct fb_info *info, int dpms_mode)
322{
323 struct radeon_fb_device *rfbdev = info->par;
324 struct drm_device *dev = rfbdev->rdev->ddev;
325 struct drm_crtc *crtc;
326 struct drm_encoder *encoder;
327 int i;
328
329 /*
330 * For each CRTC in this fb, find all associated encoders
331 * and turn them off, then turn off the CRTC.
332 */
333 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
334 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
335
336 for (i = 0; i < rfbdev->crtc_count; i++) {
337 if (crtc->base.id == rfbdev->crtc_ids[i]) {
338 break;
339 }
340 }
341
342 /* Found a CRTC on this fb, now find encoders */
343 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
344 if (encoder->crtc == crtc) {
345 struct drm_encoder_helper_funcs *encoder_funcs;
346
347 encoder_funcs = encoder->helper_private;
348 mutex_lock(&dev->mode_config.mutex);
349 encoder_funcs->dpms(encoder, dpms_mode);
350 mutex_unlock(&dev->mode_config.mutex);
351 }
352 }
353 if (dpms_mode == DRM_MODE_DPMS_OFF) {
354 mutex_lock(&dev->mode_config.mutex);
355 crtc_funcs->dpms(crtc, dpms_mode);
356 mutex_unlock(&dev->mode_config.mutex);
357 }
358 }
359}
360
361int radeonfb_blank(int blank, struct fb_info *info)
362{
363 switch (blank) {
364 case FB_BLANK_UNBLANK:
365 radeonfb_on(info);
366 break;
367 case FB_BLANK_NORMAL:
368 radeonfb_off(info, DRM_MODE_DPMS_STANDBY);
369 break;
370 case FB_BLANK_HSYNC_SUSPEND:
371 radeonfb_off(info, DRM_MODE_DPMS_STANDBY);
372 break;
373 case FB_BLANK_VSYNC_SUSPEND:
374 radeonfb_off(info, DRM_MODE_DPMS_SUSPEND);
375 break;
376 case FB_BLANK_POWERDOWN:
377 radeonfb_off(info, DRM_MODE_DPMS_OFF);
378 break;
379 }
380 return 0;
381}
382
383static struct fb_ops radeonfb_ops = {
384 .owner = THIS_MODULE,
385 .fb_check_var = radeonfb_check_var,
386 .fb_set_par = radeonfb_set_par,
387 .fb_setcolreg = radeonfb_setcolreg,
388 .fb_fillrect = cfb_fillrect,
389 .fb_copyarea = cfb_copyarea,
390 .fb_imageblit = cfb_imageblit,
391 .fb_pan_display = radeonfb_pan_display,
392 .fb_blank = radeonfb_blank,
393};
394
395/**
396 * Curretly it is assumed that the old framebuffer is reused.
397 *
398 * LOCKING
399 * caller should hold the mode config lock.
400 *
401 */
402int radeonfb_resize(struct drm_device *dev, struct drm_crtc *crtc)
403{
404 struct fb_info *info;
405 struct drm_framebuffer *fb;
406 struct drm_display_mode *mode = crtc->desired_mode;
407
408 fb = crtc->fb;
409 if (fb == NULL) {
410 return 1;
411 }
412 info = fb->fbdev;
413 if (info == NULL) {
414 return 1;
415 }
416 if (mode == NULL) {
417 return 1;
418 }
419 info->var.xres = mode->hdisplay;
420 info->var.right_margin = mode->hsync_start - mode->hdisplay;
421 info->var.hsync_len = mode->hsync_end - mode->hsync_start;
422 info->var.left_margin = mode->htotal - mode->hsync_end;
423 info->var.yres = mode->vdisplay;
424 info->var.lower_margin = mode->vsync_start - mode->vdisplay;
425 info->var.vsync_len = mode->vsync_end - mode->vsync_start;
426 info->var.upper_margin = mode->vtotal - mode->vsync_end;
427 info->var.pixclock = 10000000 / mode->htotal * 1000 / mode->vtotal * 100;
428 /* avoid overflow */
429 info->var.pixclock = info->var.pixclock * 1000 / mode->vrefresh;
430
431 return 0;
432}
433EXPORT_SYMBOL(radeonfb_resize);
434
435static struct drm_mode_set panic_mode;
436
437int radeonfb_panic(struct notifier_block *n, unsigned long ununsed,
438 void *panic_str)
439{
440 DRM_ERROR("panic occurred, switching back to text console\n");
441 drm_crtc_helper_set_config(&panic_mode);
442 return 0;
443}
444EXPORT_SYMBOL(radeonfb_panic);
445
446static struct notifier_block paniced = {
447 .notifier_call = radeonfb_panic,
448};
449
450static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp)
451{
452 int aligned = width;
453 int align_large = (ASIC_IS_AVIVO(rdev));
454 int pitch_mask = 0;
455
456 switch (bpp / 8) {
457 case 1:
458 pitch_mask = align_large ? 255 : 127;
459 break;
460 case 2:
461 pitch_mask = align_large ? 127 : 31;
462 break;
463 case 3:
464 case 4:
465 pitch_mask = align_large ? 63 : 15;
466 break;
467 }
468
469 aligned += pitch_mask;
470 aligned &= ~pitch_mask;
471 return aligned;
472}
473
474int radeonfb_create(struct radeon_device *rdev,
475 uint32_t fb_width, uint32_t fb_height,
476 uint32_t surface_width, uint32_t surface_height,
477 struct radeon_framebuffer **rfb_p)
478{
479 struct fb_info *info;
480 struct radeon_fb_device *rfbdev;
481 struct drm_framebuffer *fb;
482 struct radeon_framebuffer *rfb;
483 struct drm_mode_fb_cmd mode_cmd;
484 struct drm_gem_object *gobj = NULL;
485 struct radeon_object *robj = NULL;
486 struct device *device = &rdev->pdev->dev;
487 int size, aligned_size, ret;
488 void *fbptr = NULL;
489
490 mode_cmd.width = surface_width;
491 mode_cmd.height = surface_height;
492 mode_cmd.bpp = 32;
493 /* need to align pitch with crtc limits */
494 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8);
495 mode_cmd.depth = 24;
496
497 size = mode_cmd.pitch * mode_cmd.height;
498 aligned_size = ALIGN(size, PAGE_SIZE);
499
500 ret = radeon_gem_object_create(rdev, aligned_size, 0,
501 RADEON_GEM_DOMAIN_VRAM,
502 false, ttm_bo_type_kernel,
503 false, &gobj);
504 if (ret) {
505 printk(KERN_ERR "failed to allocate framebuffer\n");
506 ret = -ENOMEM;
507 goto out;
508 }
509 robj = gobj->driver_private;
510
511 mutex_lock(&rdev->ddev->struct_mutex);
512 fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
513 if (fb == NULL) {
514 DRM_ERROR("failed to allocate fb.\n");
515 ret = -ENOMEM;
516 goto out_unref;
517 }
518
519 list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list);
520
521 rfb = to_radeon_framebuffer(fb);
522 *rfb_p = rfb;
523 rdev->fbdev_rfb = rfb;
524
525 info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
526 if (info == NULL) {
527 ret = -ENOMEM;
528 goto out_unref;
529 }
530 rfbdev = info->par;
531
532 ret = radeon_object_kmap(robj, &fbptr);
533 if (ret) {
534 goto out_unref;
535 }
536
537 strcpy(info->fix.id, "radeondrmfb");
538 info->fix.type = FB_TYPE_PACKED_PIXELS;
539 info->fix.visual = FB_VISUAL_TRUECOLOR;
540 info->fix.type_aux = 0;
541 info->fix.xpanstep = 1; /* doing it in hw */
542 info->fix.ypanstep = 1; /* doing it in hw */
543 info->fix.ywrapstep = 0;
544 info->fix.accel = FB_ACCEL_I830;
545 info->fix.type_aux = 0;
546 info->flags = FBINFO_DEFAULT;
547 info->fbops = &radeonfb_ops;
548 info->fix.line_length = fb->pitch;
549 info->screen_base = fbptr;
550 info->fix.smem_start = (unsigned long)fbptr;
551 info->fix.smem_len = size;
552 info->screen_base = fbptr;
553 info->screen_size = size;
554 info->pseudo_palette = fb->pseudo_palette;
555 info->var.xres_virtual = fb->width;
556 info->var.yres_virtual = fb->height;
557 info->var.bits_per_pixel = fb->bits_per_pixel;
558 info->var.xoffset = 0;
559 info->var.yoffset = 0;
560 info->var.activate = FB_ACTIVATE_NOW;
561 info->var.height = -1;
562 info->var.width = -1;
563 info->var.xres = fb_width;
564 info->var.yres = fb_height;
565 info->fix.mmio_start = pci_resource_start(rdev->pdev, 2);
566 info->fix.mmio_len = pci_resource_len(rdev->pdev, 2);
567 info->pixmap.size = 64*1024;
568 info->pixmap.buf_align = 8;
569 info->pixmap.access_align = 32;
570 info->pixmap.flags = FB_PIXMAP_SYSTEM;
571 info->pixmap.scan_align = 1;
572 if (info->screen_base == NULL) {
573 ret = -ENOSPC;
574 goto out_unref;
575 }
576 DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
577 DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
578 DRM_INFO("size %lu\n", (unsigned long)size);
579 DRM_INFO("fb depth is %d\n", fb->depth);
580 DRM_INFO(" pitch is %d\n", fb->pitch);
581
582 switch (fb->depth) {
583 case 8:
584 info->var.red.offset = 0;
585 info->var.green.offset = 0;
586 info->var.blue.offset = 0;
587 info->var.red.length = 8; /* 8bit DAC */
588 info->var.green.length = 8;
589 info->var.blue.length = 8;
590 info->var.transp.offset = 0;
591 info->var.transp.length = 0;
592 break;
593 case 15:
594 info->var.red.offset = 10;
595 info->var.green.offset = 5;
596 info->var.blue.offset = 0;
597 info->var.red.length = 5;
598 info->var.green.length = 5;
599 info->var.blue.length = 5;
600 info->var.transp.offset = 15;
601 info->var.transp.length = 1;
602 break;
603 case 16:
604 info->var.red.offset = 11;
605 info->var.green.offset = 5;
606 info->var.blue.offset = 0;
607 info->var.red.length = 5;
608 info->var.green.length = 6;
609 info->var.blue.length = 5;
610 info->var.transp.offset = 0;
611 break;
612 case 24:
613 info->var.red.offset = 16;
614 info->var.green.offset = 8;
615 info->var.blue.offset = 0;
616 info->var.red.length = 8;
617 info->var.green.length = 8;
618 info->var.blue.length = 8;
619 info->var.transp.offset = 0;
620 info->var.transp.length = 0;
621 break;
622 case 32:
623 info->var.red.offset = 16;
624 info->var.green.offset = 8;
625 info->var.blue.offset = 0;
626 info->var.red.length = 8;
627 info->var.green.length = 8;
628 info->var.blue.length = 8;
629 info->var.transp.offset = 24;
630 info->var.transp.length = 8;
631 break;
632 default:
633 break;
634 }
635
636 fb->fbdev = info;
637 rfbdev->rfb = rfb;
638 rfbdev->rdev = rdev;
639
640 mutex_unlock(&rdev->ddev->struct_mutex);
641 return 0;
642
643out_unref:
644 if (robj) {
645 radeon_object_kunmap(robj);
646 }
647 if (ret) {
648 list_del(&fb->filp_head);
649 drm_gem_object_unreference(gobj);
650 drm_framebuffer_cleanup(fb);
651 kfree(fb);
652 }
653 drm_gem_object_unreference(gobj);
654 mutex_unlock(&rdev->ddev->struct_mutex);
655out:
656 return ret;
657}
658
659static int radeonfb_single_fb_probe(struct radeon_device *rdev)
660{
661 struct drm_crtc *crtc;
662 struct drm_connector *connector;
663 unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1;
664 unsigned int surface_width = 0, surface_height = 0;
665 int new_fb = 0;
666 int crtc_count = 0;
667 int ret, i, conn_count = 0;
668 struct radeon_framebuffer *rfb;
669 struct fb_info *info;
670 struct radeon_fb_device *rfbdev;
671 struct drm_mode_set *modeset = NULL;
672
673 /* first up get a count of crtcs now in use and new min/maxes width/heights */
674 list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) {
675 if (drm_helper_crtc_in_use(crtc)) {
676 if (crtc->desired_mode) {
677 if (crtc->desired_mode->hdisplay < fb_width)
678 fb_width = crtc->desired_mode->hdisplay;
679
680 if (crtc->desired_mode->vdisplay < fb_height)
681 fb_height = crtc->desired_mode->vdisplay;
682
683 if (crtc->desired_mode->hdisplay > surface_width)
684 surface_width = crtc->desired_mode->hdisplay;
685
686 if (crtc->desired_mode->vdisplay > surface_height)
687 surface_height = crtc->desired_mode->vdisplay;
688 }
689 crtc_count++;
690 }
691 }
692
693 if (crtc_count == 0 || fb_width == -1 || fb_height == -1) {
694 /* hmm everyone went away - assume VGA cable just fell out
695 and will come back later. */
696 return 0;
697 }
698
699 /* do we have an fb already? */
700 if (list_empty(&rdev->ddev->mode_config.fb_kernel_list)) {
701 /* create an fb if we don't have one */
702 ret = radeonfb_create(rdev, fb_width, fb_height, surface_width, surface_height, &rfb);
703 if (ret) {
704 return -EINVAL;
705 }
706 new_fb = 1;
707 } else {
708 struct drm_framebuffer *fb;
709 fb = list_first_entry(&rdev->ddev->mode_config.fb_kernel_list, struct drm_framebuffer, filp_head);
710 rfb = to_radeon_framebuffer(fb);
711
712 /* if someone hotplugs something bigger than we have already allocated, we are pwned.
713 As really we can't resize an fbdev that is in the wild currently due to fbdev
714 not really being designed for the lower layers moving stuff around under it.
715 - so in the grand style of things - punt. */
716 if ((fb->width < surface_width) || (fb->height < surface_height)) {
717 DRM_ERROR("Framebuffer not large enough to scale console onto.\n");
718 return -EINVAL;
719 }
720 }
721
722 info = rfb->base.fbdev;
723 rdev->fbdev_info = info;
724 rfbdev = info->par;
725
726 crtc_count = 0;
727 /* okay we need to setup new connector sets in the crtcs */
728 list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) {
729 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
730 modeset = &radeon_crtc->mode_set;
731 modeset->fb = &rfb->base;
732 conn_count = 0;
733 list_for_each_entry(connector, &rdev->ddev->mode_config.connector_list, head) {
734 if (connector->encoder)
735 if (connector->encoder->crtc == modeset->crtc) {
736 modeset->connectors[conn_count] = connector;
737 conn_count++;
738 if (conn_count > RADEONFB_CONN_LIMIT)
739 BUG();
740 }
741 }
742
743 for (i = conn_count; i < RADEONFB_CONN_LIMIT; i++)
744 modeset->connectors[i] = NULL;
745
746
747 rfbdev->crtc_ids[crtc_count++] = crtc->base.id;
748
749 modeset->num_connectors = conn_count;
750 if (modeset->crtc->desired_mode) {
751 if (modeset->mode) {
752 drm_mode_destroy(rdev->ddev, modeset->mode);
753 }
754 modeset->mode = drm_mode_duplicate(rdev->ddev,
755 modeset->crtc->desired_mode);
756 }
757 }
758 rfbdev->crtc_count = crtc_count;
759
760 if (new_fb) {
761 info->var.pixclock = -1;
762 if (register_framebuffer(info) < 0)
763 return -EINVAL;
764 } else {
765 radeonfb_set_par(info);
766 }
767 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
768 info->fix.id);
769
770 /* Switch back to kernel console on panic */
771 panic_mode = *modeset;
772 atomic_notifier_chain_register(&panic_notifier_list, &paniced);
773 printk(KERN_INFO "registered panic notifier\n");
774
775 return 0;
776}
777
778int radeonfb_probe(struct drm_device *dev)
779{
780 int ret;
781
782 /* something has changed in the lower levels of hell - deal with it
783 here */
784
785 /* two modes : a) 1 fb to rule all crtcs.
786 b) one fb per crtc.
787 two actions 1) new connected device
788 2) device removed.
789 case a/1 : if the fb surface isn't big enough - resize the surface fb.
790 if the fb size isn't big enough - resize fb into surface.
791 if everything big enough configure the new crtc/etc.
792 case a/2 : undo the configuration
793 possibly resize down the fb to fit the new configuration.
794 case b/1 : see if it is on a new crtc - setup a new fb and add it.
795 case b/2 : teardown the new fb.
796 */
797 ret = radeonfb_single_fb_probe(dev->dev_private);
798 return ret;
799}
800EXPORT_SYMBOL(radeonfb_probe);
801
802int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
803{
804 struct fb_info *info;
805 struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
806 struct radeon_object *robj;
807
808 if (!fb) {
809 return -EINVAL;
810 }
811 info = fb->fbdev;
812 if (info) {
813 robj = rfb->obj->driver_private;
814 unregister_framebuffer(info);
815 radeon_object_kunmap(robj);
816 framebuffer_release(info);
817 }
818
819 printk(KERN_INFO "unregistered panic notifier\n");
820 atomic_notifier_chain_unregister(&panic_notifier_list, &paniced);
821 memset(&panic_mode, 0, sizeof(struct drm_mode_set));
822 return 0;
823}
824EXPORT_SYMBOL(radeonfb_remove);
825MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
new file mode 100644
index 000000000000..96afbf5ae2ad
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -0,0 +1,387 @@
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31#include <linux/seq_file.h>
32#include <asm/atomic.h>
33#include <linux/wait.h>
34#include <linux/list.h>
35#include <linux/kref.h>
36#include "drmP.h"
37#include "drm.h"
38#include "radeon_reg.h"
39#include "radeon.h"
40
41int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
42{
43 unsigned long irq_flags;
44
45 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
46 if (fence->emited) {
47 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
48 return 0;
49 }
50 fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
51 if (!rdev->cp.ready) {
52 /* FIXME: cp is not running assume everythings is done right
53 * away
54 */
55 WREG32(rdev->fence_drv.scratch_reg, fence->seq);
56 } else {
57 radeon_fence_ring_emit(rdev, fence);
58 }
59 fence->emited = true;
60 fence->timeout = jiffies + ((2000 * HZ) / 1000);
61 list_del(&fence->list);
62 list_add_tail(&fence->list, &rdev->fence_drv.emited);
63 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
64 return 0;
65}
66
67static bool radeon_fence_poll_locked(struct radeon_device *rdev)
68{
69 struct radeon_fence *fence;
70 struct list_head *i, *n;
71 uint32_t seq;
72 bool wake = false;
73
74 if (rdev == NULL) {
75 return true;
76 }
77 if (rdev->shutdown) {
78 return true;
79 }
80 seq = RREG32(rdev->fence_drv.scratch_reg);
81 rdev->fence_drv.last_seq = seq;
82 n = NULL;
83 list_for_each(i, &rdev->fence_drv.emited) {
84 fence = list_entry(i, struct radeon_fence, list);
85 if (fence->seq == seq) {
86 n = i;
87 break;
88 }
89 }
90 /* all fence previous to this one are considered as signaled */
91 if (n) {
92 i = n;
93 do {
94 n = i->prev;
95 list_del(i);
96 list_add_tail(i, &rdev->fence_drv.signaled);
97 fence = list_entry(i, struct radeon_fence, list);
98 fence->signaled = true;
99 i = n;
100 } while (i != &rdev->fence_drv.emited);
101 wake = true;
102 }
103 return wake;
104}
105
106static void radeon_fence_destroy(struct kref *kref)
107{
108 unsigned long irq_flags;
109 struct radeon_fence *fence;
110
111 fence = container_of(kref, struct radeon_fence, kref);
112 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
113 list_del(&fence->list);
114 fence->emited = false;
115 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
116 kfree(fence);
117}
118
119int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
120{
121 unsigned long irq_flags;
122
123 *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
124 if ((*fence) == NULL) {
125 return -ENOMEM;
126 }
127 kref_init(&((*fence)->kref));
128 (*fence)->rdev = rdev;
129 (*fence)->emited = false;
130 (*fence)->signaled = false;
131 (*fence)->seq = 0;
132 INIT_LIST_HEAD(&(*fence)->list);
133
134 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
135 list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
136 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
137 return 0;
138}
139
140
141bool radeon_fence_signaled(struct radeon_fence *fence)
142{
143 struct radeon_device *rdev = fence->rdev;
144 unsigned long irq_flags;
145 bool signaled = false;
146
147 if (rdev->gpu_lockup) {
148 return true;
149 }
150 if (fence == NULL) {
151 return true;
152 }
153 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
154 signaled = fence->signaled;
155 /* if we are shuting down report all fence as signaled */
156 if (fence->rdev->shutdown) {
157 signaled = true;
158 }
159 if (!fence->emited) {
160 WARN(1, "Querying an unemited fence : %p !\n", fence);
161 signaled = true;
162 }
163 if (!signaled) {
164 radeon_fence_poll_locked(fence->rdev);
165 signaled = fence->signaled;
166 }
167 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
168 return signaled;
169}
170
171int radeon_fence_wait(struct radeon_fence *fence, bool interruptible)
172{
173 struct radeon_device *rdev;
174 unsigned long cur_jiffies;
175 unsigned long timeout;
176 bool expired = false;
177 int r;
178
179
180 if (fence == NULL) {
181 WARN(1, "Querying an invalid fence : %p !\n", fence);
182 return 0;
183 }
184 rdev = fence->rdev;
185 if (radeon_fence_signaled(fence)) {
186 return 0;
187 }
188retry:
189 cur_jiffies = jiffies;
190 timeout = HZ / 100;
191 if (time_after(fence->timeout, cur_jiffies)) {
192 timeout = fence->timeout - cur_jiffies;
193 }
194 if (interruptible) {
195 r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
196 radeon_fence_signaled(fence), timeout);
197 if (unlikely(r == -ERESTARTSYS)) {
198 return -ERESTART;
199 }
200 } else {
201 r = wait_event_timeout(rdev->fence_drv.queue,
202 radeon_fence_signaled(fence), timeout);
203 }
204 if (unlikely(!radeon_fence_signaled(fence))) {
205 if (unlikely(r == 0)) {
206 expired = true;
207 }
208 if (unlikely(expired)) {
209 timeout = 1;
210 if (time_after(cur_jiffies, fence->timeout)) {
211 timeout = cur_jiffies - fence->timeout;
212 }
213 timeout = jiffies_to_msecs(timeout);
214 if (timeout > 500) {
215 DRM_ERROR("fence(%p:0x%08X) %lums timeout "
216 "going to reset GPU\n",
217 fence, fence->seq, timeout);
218 radeon_gpu_reset(rdev);
219 WREG32(rdev->fence_drv.scratch_reg, fence->seq);
220 }
221 }
222 goto retry;
223 }
224 if (unlikely(expired)) {
225 rdev->fence_drv.count_timeout++;
226 cur_jiffies = jiffies;
227 timeout = 1;
228 if (time_after(cur_jiffies, fence->timeout)) {
229 timeout = cur_jiffies - fence->timeout;
230 }
231 timeout = jiffies_to_msecs(timeout);
232 DRM_ERROR("fence(%p:0x%08X) %lums timeout\n",
233 fence, fence->seq, timeout);
234 DRM_ERROR("last signaled fence(0x%08X)\n",
235 rdev->fence_drv.last_seq);
236 }
237 return 0;
238}
239
240int radeon_fence_wait_next(struct radeon_device *rdev)
241{
242 unsigned long irq_flags;
243 struct radeon_fence *fence;
244 int r;
245
246 if (rdev->gpu_lockup) {
247 return 0;
248 }
249 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
250 if (list_empty(&rdev->fence_drv.emited)) {
251 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
252 return 0;
253 }
254 fence = list_entry(rdev->fence_drv.emited.next,
255 struct radeon_fence, list);
256 radeon_fence_ref(fence);
257 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
258 r = radeon_fence_wait(fence, false);
259 radeon_fence_unref(&fence);
260 return r;
261}
262
263int radeon_fence_wait_last(struct radeon_device *rdev)
264{
265 unsigned long irq_flags;
266 struct radeon_fence *fence;
267 int r;
268
269 if (rdev->gpu_lockup) {
270 return 0;
271 }
272 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
273 if (list_empty(&rdev->fence_drv.emited)) {
274 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
275 return 0;
276 }
277 fence = list_entry(rdev->fence_drv.emited.prev,
278 struct radeon_fence, list);
279 radeon_fence_ref(fence);
280 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
281 r = radeon_fence_wait(fence, false);
282 radeon_fence_unref(&fence);
283 return r;
284}
285
286struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
287{
288 kref_get(&fence->kref);
289 return fence;
290}
291
292void radeon_fence_unref(struct radeon_fence **fence)
293{
294 struct radeon_fence *tmp = *fence;
295
296 *fence = NULL;
297 if (tmp) {
298 kref_put(&tmp->kref, &radeon_fence_destroy);
299 }
300}
301
302void radeon_fence_process(struct radeon_device *rdev)
303{
304 unsigned long irq_flags;
305 bool wake;
306
307 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
308 wake = radeon_fence_poll_locked(rdev);
309 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
310 if (wake) {
311 wake_up_all(&rdev->fence_drv.queue);
312 }
313}
314
315int radeon_fence_driver_init(struct radeon_device *rdev)
316{
317 unsigned long irq_flags;
318 int r;
319
320 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
321 r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
322 if (r) {
323 DRM_ERROR("Fence failed to get a scratch register.");
324 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
325 return r;
326 }
327 WREG32(rdev->fence_drv.scratch_reg, 0);
328 atomic_set(&rdev->fence_drv.seq, 0);
329 INIT_LIST_HEAD(&rdev->fence_drv.created);
330 INIT_LIST_HEAD(&rdev->fence_drv.emited);
331 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
332 rdev->fence_drv.count_timeout = 0;
333 init_waitqueue_head(&rdev->fence_drv.queue);
334 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
335 if (radeon_debugfs_fence_init(rdev)) {
336 DRM_ERROR("Failed to register debugfs file for fence !\n");
337 }
338 return 0;
339}
340
341void radeon_fence_driver_fini(struct radeon_device *rdev)
342{
343 unsigned long irq_flags;
344
345 wake_up_all(&rdev->fence_drv.queue);
346 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
347 radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
348 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
349 DRM_INFO("radeon: fence finalized\n");
350}
351
352
353/*
354 * Fence debugfs
355 */
356#if defined(CONFIG_DEBUG_FS)
357static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
358{
359 struct drm_info_node *node = (struct drm_info_node *)m->private;
360 struct drm_device *dev = node->minor->dev;
361 struct radeon_device *rdev = dev->dev_private;
362 struct radeon_fence *fence;
363
364 seq_printf(m, "Last signaled fence 0x%08X\n",
365 RREG32(rdev->fence_drv.scratch_reg));
366 if (!list_empty(&rdev->fence_drv.emited)) {
367 fence = list_entry(rdev->fence_drv.emited.prev,
368 struct radeon_fence, list);
369 seq_printf(m, "Last emited fence %p with 0x%08X\n",
370 fence, fence->seq);
371 }
372 return 0;
373}
374
375static struct drm_info_list radeon_debugfs_fence_list[] = {
376 {"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
377};
378#endif
379
380int radeon_debugfs_fence_init(struct radeon_device *rdev)
381{
382#if defined(CONFIG_DEBUG_FS)
383 return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
384#else
385 return 0;
386#endif
387}
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
new file mode 100644
index 000000000000..90187d173847
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_fixed.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 */
24#ifndef RADEON_FIXED_H
25#define RADEON_FIXED_H
26
27typedef union rfixed {
28 u32 full;
29} fixed20_12;
30
31
32#define rfixed_const(A) (u32)(((A) << 12))/* + ((B + 0.000122)*4096)) */
33#define rfixed_const_half(A) (u32)(((A) << 12) + 2048)
34#define rfixed_const_666(A) (u32)(((A) << 12) + 2731)
35#define rfixed_const_8(A) (u32)(((A) << 12) + 3277)
36#define rfixed_mul(A, B) ((u64)((u64)(A).full * (B).full + 2048) >> 12)
37#define fixed_init(A) { .full = rfixed_const((A)) }
38#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
39#define rfixed_trunc(A) ((A).full >> 12)
40
41static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
42{
43 u64 tmp = ((u64)A.full << 13);
44
45 do_div(tmp, B.full);
46 tmp += 1;
47 tmp /= 2;
48 return lower_32_bits(tmp);
49}
50#endif
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
new file mode 100644
index 000000000000..d343a15316ec
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -0,0 +1,233 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_drm.h"
30#include "radeon.h"
31#include "radeon_reg.h"
32
33/*
34 * Common GART table functions.
35 */
36int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
37{
38 void *ptr;
39
40 ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
41 &rdev->gart.table_addr);
42 if (ptr == NULL) {
43 return -ENOMEM;
44 }
45#ifdef CONFIG_X86
46 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
47 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
48 set_memory_uc((unsigned long)ptr,
49 rdev->gart.table_size >> PAGE_SHIFT);
50 }
51#endif
52 rdev->gart.table.ram.ptr = ptr;
53 memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size);
54 return 0;
55}
56
57void radeon_gart_table_ram_free(struct radeon_device *rdev)
58{
59 if (rdev->gart.table.ram.ptr == NULL) {
60 return;
61 }
62#ifdef CONFIG_X86
63 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
64 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
65 set_memory_wb((unsigned long)rdev->gart.table.ram.ptr,
66 rdev->gart.table_size >> PAGE_SHIFT);
67 }
68#endif
69 pci_free_consistent(rdev->pdev, rdev->gart.table_size,
70 (void *)rdev->gart.table.ram.ptr,
71 rdev->gart.table_addr);
72 rdev->gart.table.ram.ptr = NULL;
73 rdev->gart.table_addr = 0;
74}
75
76int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
77{
78 uint64_t gpu_addr;
79 int r;
80
81 if (rdev->gart.table.vram.robj == NULL) {
82 r = radeon_object_create(rdev, NULL,
83 rdev->gart.table_size,
84 true,
85 RADEON_GEM_DOMAIN_VRAM,
86 false, &rdev->gart.table.vram.robj);
87 if (r) {
88 return r;
89 }
90 }
91 r = radeon_object_pin(rdev->gart.table.vram.robj,
92 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
93 if (r) {
94 radeon_object_unref(&rdev->gart.table.vram.robj);
95 return r;
96 }
97 r = radeon_object_kmap(rdev->gart.table.vram.robj,
98 (void **)&rdev->gart.table.vram.ptr);
99 if (r) {
100 radeon_object_unpin(rdev->gart.table.vram.robj);
101 radeon_object_unref(&rdev->gart.table.vram.robj);
102 DRM_ERROR("radeon: failed to map gart vram table.\n");
103 return r;
104 }
105 rdev->gart.table_addr = gpu_addr;
106 return 0;
107}
108
109void radeon_gart_table_vram_free(struct radeon_device *rdev)
110{
111 if (rdev->gart.table.vram.robj == NULL) {
112 return;
113 }
114 radeon_object_kunmap(rdev->gart.table.vram.robj);
115 radeon_object_unpin(rdev->gart.table.vram.robj);
116 radeon_object_unref(&rdev->gart.table.vram.robj);
117}
118
119
120
121
122/*
123 * Common gart functions.
124 */
125void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
126 int pages)
127{
128 unsigned t;
129 unsigned p;
130 int i, j;
131
132 if (!rdev->gart.ready) {
133 WARN(1, "trying to unbind memory to unitialized GART !\n");
134 return;
135 }
136 t = offset / 4096;
137 p = t / (PAGE_SIZE / 4096);
138 for (i = 0; i < pages; i++, p++) {
139 if (rdev->gart.pages[p]) {
140 pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
141 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
142 rdev->gart.pages[p] = NULL;
143 rdev->gart.pages_addr[p] = 0;
144 for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
145 radeon_gart_set_page(rdev, t, 0);
146 }
147 }
148 }
149 mb();
150 radeon_gart_tlb_flush(rdev);
151}
152
153int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
154 int pages, struct page **pagelist)
155{
156 unsigned t;
157 unsigned p;
158 uint64_t page_base;
159 int i, j;
160
161 if (!rdev->gart.ready) {
162 DRM_ERROR("trying to bind memory to unitialized GART !\n");
163 return -EINVAL;
164 }
165 t = offset / 4096;
166 p = t / (PAGE_SIZE / 4096);
167
168 for (i = 0; i < pages; i++, p++) {
169 /* we need to support large memory configurations */
170 /* assume that unbind have already been call on the range */
171 rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
172 0, PAGE_SIZE,
173 PCI_DMA_BIDIRECTIONAL);
174 if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
175 /* FIXME: failed to map page (return -ENOMEM?) */
176 radeon_gart_unbind(rdev, offset, pages);
177 return -ENOMEM;
178 }
179 rdev->gart.pages[p] = pagelist[i];
180 page_base = (uint32_t)rdev->gart.pages_addr[p];
181 for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) {
182 radeon_gart_set_page(rdev, t, page_base);
183 page_base += 4096;
184 }
185 }
186 mb();
187 radeon_gart_tlb_flush(rdev);
188 return 0;
189}
190
191int radeon_gart_init(struct radeon_device *rdev)
192{
193 if (rdev->gart.pages) {
194 return 0;
195 }
196 /* We need PAGE_SIZE >= 4096 */
197 if (PAGE_SIZE < 4096) {
198 DRM_ERROR("Page size is smaller than GPU page size!\n");
199 return -EINVAL;
200 }
201 /* Compute table size */
202 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
203 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096;
204 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
205 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
206 /* Allocate pages table */
207 rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
208 GFP_KERNEL);
209 if (rdev->gart.pages == NULL) {
210 radeon_gart_fini(rdev);
211 return -ENOMEM;
212 }
213 rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
214 rdev->gart.num_cpu_pages, GFP_KERNEL);
215 if (rdev->gart.pages_addr == NULL) {
216 radeon_gart_fini(rdev);
217 return -ENOMEM;
218 }
219 return 0;
220}
221
222void radeon_gart_fini(struct radeon_device *rdev)
223{
224 if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
225 /* unbind pages */
226 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
227 }
228 rdev->gart.ready = false;
229 kfree(rdev->gart.pages);
230 kfree(rdev->gart.pages_addr);
231 rdev->gart.pages = NULL;
232 rdev->gart.pages_addr = NULL;
233}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
new file mode 100644
index 000000000000..eb516034235d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -0,0 +1,287 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "drm.h"
30#include "radeon_drm.h"
31#include "radeon.h"
32
33int radeon_gem_object_init(struct drm_gem_object *obj)
34{
35 /* we do nothings here */
36 return 0;
37}
38
39void radeon_gem_object_free(struct drm_gem_object *gobj)
40{
41 struct radeon_object *robj = gobj->driver_private;
42
43 gobj->driver_private = NULL;
44 if (robj) {
45 radeon_object_unref(&robj);
46 }
47}
48
49int radeon_gem_object_create(struct radeon_device *rdev, int size,
50 int alignment, int initial_domain,
51 bool discardable, bool kernel,
52 bool interruptible,
53 struct drm_gem_object **obj)
54{
55 struct drm_gem_object *gobj;
56 struct radeon_object *robj;
57 int r;
58
59 *obj = NULL;
60 gobj = drm_gem_object_alloc(rdev->ddev, size);
61 if (!gobj) {
62 return -ENOMEM;
63 }
64 /* At least align on page size */
65 if (alignment < PAGE_SIZE) {
66 alignment = PAGE_SIZE;
67 }
68 r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
69 interruptible, &robj);
70 if (r) {
71 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
72 size, initial_domain, alignment);
73 mutex_lock(&rdev->ddev->struct_mutex);
74 drm_gem_object_unreference(gobj);
75 mutex_unlock(&rdev->ddev->struct_mutex);
76 return r;
77 }
78 gobj->driver_private = robj;
79 *obj = gobj;
80 return 0;
81}
82
83int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
84 uint64_t *gpu_addr)
85{
86 struct radeon_object *robj = obj->driver_private;
87 uint32_t flags;
88
89 switch (pin_domain) {
90 case RADEON_GEM_DOMAIN_VRAM:
91 flags = TTM_PL_FLAG_VRAM;
92 break;
93 case RADEON_GEM_DOMAIN_GTT:
94 flags = TTM_PL_FLAG_TT;
95 break;
96 default:
97 flags = TTM_PL_FLAG_SYSTEM;
98 break;
99 }
100 return radeon_object_pin(robj, flags, gpu_addr);
101}
102
103void radeon_gem_object_unpin(struct drm_gem_object *obj)
104{
105 struct radeon_object *robj = obj->driver_private;
106 radeon_object_unpin(robj);
107}
108
109int radeon_gem_set_domain(struct drm_gem_object *gobj,
110 uint32_t rdomain, uint32_t wdomain)
111{
112 struct radeon_object *robj;
113 uint32_t domain;
114 int r;
115
116 /* FIXME: reeimplement */
117 robj = gobj->driver_private;
118 /* work out where to validate the buffer to */
119 domain = wdomain;
120 if (!domain) {
121 domain = rdomain;
122 }
123 if (!domain) {
124 /* Do nothings */
125 printk(KERN_WARNING "Set domain withou domain !\n");
126 return 0;
127 }
128 if (domain == RADEON_GEM_DOMAIN_CPU) {
129 /* Asking for cpu access wait for object idle */
130 r = radeon_object_wait(robj);
131 if (r) {
132 printk(KERN_ERR "Failed to wait for object !\n");
133 return r;
134 }
135 }
136 return 0;
137}
138
139int radeon_gem_init(struct radeon_device *rdev)
140{
141 INIT_LIST_HEAD(&rdev->gem.objects);
142 return 0;
143}
144
145void radeon_gem_fini(struct radeon_device *rdev)
146{
147 radeon_object_force_delete(rdev);
148}
149
150
151/*
152 * GEM ioctls.
153 */
154int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
155 struct drm_file *filp)
156{
157 struct radeon_device *rdev = dev->dev_private;
158 struct drm_radeon_gem_info *args = data;
159
160 args->vram_size = rdev->mc.vram_size;
161 /* FIXME: report somethings that makes sense */
162 args->vram_visible = rdev->mc.vram_size - (4 * 1024 * 1024);
163 args->gart_size = rdev->mc.gtt_size;
164 return 0;
165}
166
167int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
168 struct drm_file *filp)
169{
170 /* TODO: implement */
171 DRM_ERROR("unimplemented %s\n", __func__);
172 return -ENOSYS;
173}
174
175int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
176 struct drm_file *filp)
177{
178 /* TODO: implement */
179 DRM_ERROR("unimplemented %s\n", __func__);
180 return -ENOSYS;
181}
182
183int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
184 struct drm_file *filp)
185{
186 struct radeon_device *rdev = dev->dev_private;
187 struct drm_radeon_gem_create *args = data;
188 struct drm_gem_object *gobj;
189 uint32_t handle;
190 int r;
191
192 /* create a gem object to contain this object in */
193 args->size = roundup(args->size, PAGE_SIZE);
194 r = radeon_gem_object_create(rdev, args->size, args->alignment,
195 args->initial_domain, false,
196 false, true, &gobj);
197 if (r) {
198 return r;
199 }
200 r = drm_gem_handle_create(filp, gobj, &handle);
201 if (r) {
202 mutex_lock(&dev->struct_mutex);
203 drm_gem_object_unreference(gobj);
204 mutex_unlock(&dev->struct_mutex);
205 return r;
206 }
207 mutex_lock(&dev->struct_mutex);
208 drm_gem_object_handle_unreference(gobj);
209 mutex_unlock(&dev->struct_mutex);
210 args->handle = handle;
211 return 0;
212}
213
214int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
215 struct drm_file *filp)
216{
217 /* transition the BO to a domain -
218 * just validate the BO into a certain domain */
219 struct drm_radeon_gem_set_domain *args = data;
220 struct drm_gem_object *gobj;
221 struct radeon_object *robj;
222 int r;
223
224 /* for now if someone requests domain CPU -
225 * just make sure the buffer is finished with */
226
227 /* just do a BO wait for now */
228 gobj = drm_gem_object_lookup(dev, filp, args->handle);
229 if (gobj == NULL) {
230 return -EINVAL;
231 }
232 robj = gobj->driver_private;
233
234 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
235
236 mutex_lock(&dev->struct_mutex);
237 drm_gem_object_unreference(gobj);
238 mutex_unlock(&dev->struct_mutex);
239 return r;
240}
241
242int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
243 struct drm_file *filp)
244{
245 struct drm_radeon_gem_mmap *args = data;
246 struct drm_gem_object *gobj;
247 struct radeon_object *robj;
248 int r;
249
250 gobj = drm_gem_object_lookup(dev, filp, args->handle);
251 if (gobj == NULL) {
252 return -EINVAL;
253 }
254 robj = gobj->driver_private;
255 r = radeon_object_mmap(robj, &args->addr_ptr);
256 mutex_lock(&dev->struct_mutex);
257 drm_gem_object_unreference(gobj);
258 mutex_unlock(&dev->struct_mutex);
259 return r;
260}
261
262int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
263 struct drm_file *filp)
264{
265 /* FIXME: implement */
266 return 0;
267}
268
269int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
270 struct drm_file *filp)
271{
272 struct drm_radeon_gem_wait_idle *args = data;
273 struct drm_gem_object *gobj;
274 struct radeon_object *robj;
275 int r;
276
277 gobj = drm_gem_object_lookup(dev, filp, args->handle);
278 if (gobj == NULL) {
279 return -EINVAL;
280 }
281 robj = gobj->driver_private;
282 r = radeon_object_wait(robj);
283 mutex_lock(&dev->struct_mutex);
284 drm_gem_object_unreference(gobj);
285 mutex_unlock(&dev->struct_mutex);
286 return r;
287}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
new file mode 100644
index 000000000000..71465ed2688a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -0,0 +1,209 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "radeon_drm.h"
28#include "radeon.h"
29
30/**
31 * radeon_ddc_probe
32 *
33 */
34bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
35{
36 u8 out_buf[] = { 0x0, 0x0};
37 u8 buf[2];
38 int ret;
39 struct i2c_msg msgs[] = {
40 {
41 .addr = 0x50,
42 .flags = 0,
43 .len = 1,
44 .buf = out_buf,
45 },
46 {
47 .addr = 0x50,
48 .flags = I2C_M_RD,
49 .len = 1,
50 .buf = buf,
51 }
52 };
53
54 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
55 if (ret == 2)
56 return true;
57
58 return false;
59}
60
61
62void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state)
63{
64 struct radeon_device *rdev = radeon_connector->base.dev->dev_private;
65 uint32_t temp;
66 struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
67
68 /* RV410 appears to have a bug where the hw i2c in reset
69 * holds the i2c port in a bad state - switch hw i2c away before
70 * doing DDC - do this for all r200s/r300s/r400s for safety sake
71 */
72 if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
73 if (rec->a_clk_reg == RADEON_GPIO_MONID) {
74 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
75 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
76 } else {
77 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
78 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
79 }
80 }
81 if (lock_state) {
82 temp = RREG32(rec->a_clk_reg);
83 temp &= ~(rec->a_clk_mask);
84 WREG32(rec->a_clk_reg, temp);
85
86 temp = RREG32(rec->a_data_reg);
87 temp &= ~(rec->a_data_mask);
88 WREG32(rec->a_data_reg, temp);
89 }
90
91 temp = RREG32(rec->mask_clk_reg);
92 if (lock_state)
93 temp |= rec->mask_clk_mask;
94 else
95 temp &= ~rec->mask_clk_mask;
96 WREG32(rec->mask_clk_reg, temp);
97 temp = RREG32(rec->mask_clk_reg);
98
99 temp = RREG32(rec->mask_data_reg);
100 if (lock_state)
101 temp |= rec->mask_data_mask;
102 else
103 temp &= ~rec->mask_data_mask;
104 WREG32(rec->mask_data_reg, temp);
105 temp = RREG32(rec->mask_data_reg);
106}
107
108static int get_clock(void *i2c_priv)
109{
110 struct radeon_i2c_chan *i2c = i2c_priv;
111 struct radeon_device *rdev = i2c->dev->dev_private;
112 struct radeon_i2c_bus_rec *rec = &i2c->rec;
113 uint32_t val;
114
115 val = RREG32(rec->get_clk_reg);
116 val &= rec->get_clk_mask;
117
118 return (val != 0);
119}
120
121
122static int get_data(void *i2c_priv)
123{
124 struct radeon_i2c_chan *i2c = i2c_priv;
125 struct radeon_device *rdev = i2c->dev->dev_private;
126 struct radeon_i2c_bus_rec *rec = &i2c->rec;
127 uint32_t val;
128
129 val = RREG32(rec->get_data_reg);
130 val &= rec->get_data_mask;
131 return (val != 0);
132}
133
134static void set_clock(void *i2c_priv, int clock)
135{
136 struct radeon_i2c_chan *i2c = i2c_priv;
137 struct radeon_device *rdev = i2c->dev->dev_private;
138 struct radeon_i2c_bus_rec *rec = &i2c->rec;
139 uint32_t val;
140
141 val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask);
142 val |= clock ? 0 : rec->put_clk_mask;
143 WREG32(rec->put_clk_reg, val);
144}
145
146static void set_data(void *i2c_priv, int data)
147{
148 struct radeon_i2c_chan *i2c = i2c_priv;
149 struct radeon_device *rdev = i2c->dev->dev_private;
150 struct radeon_i2c_bus_rec *rec = &i2c->rec;
151 uint32_t val;
152
153 val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask);
154 val |= data ? 0 : rec->put_data_mask;
155 WREG32(rec->put_data_reg, val);
156}
157
158struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
159 struct radeon_i2c_bus_rec *rec,
160 const char *name)
161{
162 struct radeon_i2c_chan *i2c;
163 int ret;
164
165 i2c = drm_calloc(1, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
166 if (i2c == NULL)
167 return NULL;
168
169 i2c->adapter.owner = THIS_MODULE;
170 i2c->adapter.algo_data = &i2c->algo;
171 i2c->dev = dev;
172 i2c->algo.setsda = set_data;
173 i2c->algo.setscl = set_clock;
174 i2c->algo.getsda = get_data;
175 i2c->algo.getscl = get_clock;
176 i2c->algo.udelay = 20;
177 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
178 * make this, 2 jiffies is a lot more reliable */
179 i2c->algo.timeout = 2;
180 i2c->algo.data = i2c;
181 i2c->rec = *rec;
182 i2c_set_adapdata(&i2c->adapter, i2c);
183
184 ret = i2c_bit_add_bus(&i2c->adapter);
185 if (ret) {
186 DRM_INFO("Failed to register i2c %s\n", name);
187 goto out_free;
188 }
189
190 return i2c;
191out_free:
192 drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
193 return NULL;
194
195}
196
197void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
198{
199 if (!i2c)
200 return;
201
202 i2c_del_adapter(&i2c->adapter);
203 drm_free(i2c, sizeof(struct radeon_i2c_chan), DRM_MEM_DRIVER);
204}
205
206struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
207{
208 return NULL;
209}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
new file mode 100644
index 000000000000..491d569deb0e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -0,0 +1,158 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_drm.h"
30#include "radeon_reg.h"
31#include "radeon_microcode.h"
32#include "radeon.h"
33#include "atom.h"
34
35static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
36{
37 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
38 uint32_t irq_mask = RADEON_SW_INT_TEST;
39
40 if (irqs) {
41 WREG32(RADEON_GEN_INT_STATUS, irqs);
42 }
43 return irqs & irq_mask;
44}
45
46int r100_irq_set(struct radeon_device *rdev)
47{
48 uint32_t tmp = 0;
49
50 if (rdev->irq.sw_int) {
51 tmp |= RADEON_SW_INT_ENABLE;
52 }
53 /* Todo go through CRTC and enable vblank int or not */
54 WREG32(RADEON_GEN_INT_CNTL, tmp);
55 return 0;
56}
57
58int r100_irq_process(struct radeon_device *rdev)
59{
60 uint32_t status;
61
62 status = r100_irq_ack(rdev);
63 if (!status) {
64 return IRQ_NONE;
65 }
66 while (status) {
67 /* SW interrupt */
68 if (status & RADEON_SW_INT_TEST) {
69 radeon_fence_process(rdev);
70 }
71 status = r100_irq_ack(rdev);
72 }
73 return IRQ_HANDLED;
74}
75
76int rs600_irq_set(struct radeon_device *rdev)
77{
78 uint32_t tmp = 0;
79
80 if (rdev->irq.sw_int) {
81 tmp |= RADEON_SW_INT_ENABLE;
82 }
83 WREG32(RADEON_GEN_INT_CNTL, tmp);
84 /* Todo go through CRTC and enable vblank int or not */
85 WREG32(R500_DxMODE_INT_MASK, 0);
86 return 0;
87}
88
89irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
90{
91 struct drm_device *dev = (struct drm_device *) arg;
92 struct radeon_device *rdev = dev->dev_private;
93
94 return radeon_irq_process(rdev);
95}
96
97void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
98{
99 struct radeon_device *rdev = dev->dev_private;
100 unsigned i;
101
102 /* Disable *all* interrupts */
103 rdev->irq.sw_int = false;
104 for (i = 0; i < 2; i++) {
105 rdev->irq.crtc_vblank_int[i] = false;
106 }
107 radeon_irq_set(rdev);
108 /* Clear bits */
109 radeon_irq_process(rdev);
110}
111
112int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
113{
114 struct radeon_device *rdev = dev->dev_private;
115
116 dev->max_vblank_count = 0x001fffff;
117 rdev->irq.sw_int = true;
118 radeon_irq_set(rdev);
119 return 0;
120}
121
122void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
123{
124 struct radeon_device *rdev = dev->dev_private;
125 unsigned i;
126
127 if (rdev == NULL) {
128 return;
129 }
130 /* Disable *all* interrupts */
131 rdev->irq.sw_int = false;
132 for (i = 0; i < 2; i++) {
133 rdev->irq.crtc_vblank_int[i] = false;
134 }
135 radeon_irq_set(rdev);
136}
137
138int radeon_irq_kms_init(struct radeon_device *rdev)
139{
140 int r = 0;
141
142 r = drm_vblank_init(rdev->ddev, 2);
143 if (r) {
144 return r;
145 }
146 drm_irq_install(rdev->ddev);
147 rdev->irq.installed = true;
148 DRM_INFO("radeon: irq initialized.\n");
149 return 0;
150}
151
152void radeon_irq_kms_fini(struct radeon_device *rdev)
153{
154 if (rdev->irq.installed) {
155 rdev->irq.installed = false;
156 drm_irq_uninstall(rdev->ddev);
157 }
158}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
new file mode 100644
index 000000000000..64f42b19cbfa
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -0,0 +1,295 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "drm_sarea.h"
30#include "radeon.h"
31#include "radeon_drm.h"
32
33
34/*
35 * Driver load/unload
36 */
37int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
38{
39 struct radeon_device *rdev;
40 int r;
41
42 rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
43 if (rdev == NULL) {
44 return -ENOMEM;
45 }
46 dev->dev_private = (void *)rdev;
47
48 /* update BUS flag */
49 if (drm_device_is_agp(dev)) {
50 flags |= RADEON_IS_AGP;
51 } else if (drm_device_is_pcie(dev)) {
52 flags |= RADEON_IS_PCIE;
53 } else {
54 flags |= RADEON_IS_PCI;
55 }
56
57 r = radeon_device_init(rdev, dev, dev->pdev, flags);
58 if (r) {
59 DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n");
60 radeon_device_fini(rdev);
61 return r;
62 }
63 return 0;
64}
65
66int radeon_driver_unload_kms(struct drm_device *dev)
67{
68 struct radeon_device *rdev = dev->dev_private;
69
70 radeon_device_fini(rdev);
71 kfree(rdev);
72 dev->dev_private = NULL;
73 return 0;
74}
75
76
77/*
78 * Userspace get informations ioctl
79 */
80int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
81{
82 struct radeon_device *rdev = dev->dev_private;
83 struct drm_radeon_info *info;
84 uint32_t *value_ptr;
85 uint32_t value;
86
87 info = data;
88 value_ptr = (uint32_t *)((unsigned long)info->value);
89 switch (info->request) {
90 case RADEON_INFO_DEVICE_ID:
91 value = dev->pci_device;
92 break;
93 case RADEON_INFO_NUM_GB_PIPES:
94 value = rdev->num_gb_pipes;
95 break;
96 default:
97 DRM_DEBUG("Invalid request %d\n", info->request);
98 return -EINVAL;
99 }
100 if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
101 DRM_ERROR("copy_to_user\n");
102 return -EFAULT;
103 }
104 return 0;
105}
106
107
108/*
109 * Outdated mess for old drm with Xorg being in charge (void function now).
110 */
111int radeon_driver_firstopen_kms(struct drm_device *dev)
112{
113 return 0;
114}
115
116
117void radeon_driver_lastclose_kms(struct drm_device *dev)
118{
119}
120
121int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
122{
123 return 0;
124}
125
126void radeon_driver_postclose_kms(struct drm_device *dev,
127 struct drm_file *file_priv)
128{
129}
130
131void radeon_driver_preclose_kms(struct drm_device *dev,
132 struct drm_file *file_priv)
133{
134}
135
136
137/*
138 * VBlank related functions.
139 */
140u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
141{
142 /* FIXME: implement */
143 return 0;
144}
145
146int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
147{
148 /* FIXME: implement */
149 return 0;
150}
151
152void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
153{
154 /* FIXME: implement */
155}
156
157
158/*
159 * For multiple master (like multiple X).
160 */
161struct drm_radeon_master_private {
162 drm_local_map_t *sarea;
163 drm_radeon_sarea_t *sarea_priv;
164};
165
166int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master)
167{
168 struct drm_radeon_master_private *master_priv;
169 unsigned long sareapage;
170 int ret;
171
172 master_priv = drm_calloc(1, sizeof(*master_priv), DRM_MEM_DRIVER);
173 if (master_priv == NULL) {
174 return -ENOMEM;
175 }
176 /* prebuild the SAREA */
177 sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
178 ret = drm_addmap(dev, 0, sareapage, _DRM_SHM,
179 _DRM_CONTAINS_LOCK,
180 &master_priv->sarea);
181 if (ret) {
182 DRM_ERROR("SAREA setup failed\n");
183 return ret;
184 }
185 master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
186 master_priv->sarea_priv->pfCurrentPage = 0;
187 master->driver_priv = master_priv;
188 return 0;
189}
190
191void radeon_master_destroy_kms(struct drm_device *dev,
192 struct drm_master *master)
193{
194 struct drm_radeon_master_private *master_priv = master->driver_priv;
195
196 if (master_priv == NULL) {
197 return;
198 }
199 if (master_priv->sarea) {
200 drm_rmmap_locked(dev, master_priv->sarea);
201 }
202 drm_free(master_priv, sizeof(*master_priv), DRM_MEM_DRIVER);
203 master->driver_priv = NULL;
204}
205
206
207/*
208 * IOCTL.
209 */
210int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
211 struct drm_file *file_priv)
212{
213 /* Not valid in KMS. */
214 return -EINVAL;
215}
216
217#define KMS_INVALID_IOCTL(name) \
218int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
219{ \
220 DRM_ERROR("invalid ioctl with kms %s\n", __func__); \
221 return -EINVAL; \
222}
223
224/*
225 * All these ioctls are invalid in kms world.
226 */
227KMS_INVALID_IOCTL(radeon_cp_init_kms)
228KMS_INVALID_IOCTL(radeon_cp_start_kms)
229KMS_INVALID_IOCTL(radeon_cp_stop_kms)
230KMS_INVALID_IOCTL(radeon_cp_reset_kms)
231KMS_INVALID_IOCTL(radeon_cp_idle_kms)
232KMS_INVALID_IOCTL(radeon_cp_resume_kms)
233KMS_INVALID_IOCTL(radeon_engine_reset_kms)
234KMS_INVALID_IOCTL(radeon_fullscreen_kms)
235KMS_INVALID_IOCTL(radeon_cp_swap_kms)
236KMS_INVALID_IOCTL(radeon_cp_clear_kms)
237KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
238KMS_INVALID_IOCTL(radeon_cp_indices_kms)
239KMS_INVALID_IOCTL(radeon_cp_texture_kms)
240KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
241KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
242KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
243KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
244KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
245KMS_INVALID_IOCTL(radeon_cp_flip_kms)
246KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
247KMS_INVALID_IOCTL(radeon_mem_free_kms)
248KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
249KMS_INVALID_IOCTL(radeon_irq_emit_kms)
250KMS_INVALID_IOCTL(radeon_irq_wait_kms)
251KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
252KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
253KMS_INVALID_IOCTL(radeon_surface_free_kms)
254
255
256struct drm_ioctl_desc radeon_ioctls_kms[] = {
257 DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
258 DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
259 DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
260 DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
261 DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
262 DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
263 DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
264 DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
265 DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
266 DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
267 DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
268 DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
269 DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
270 DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
271 DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
272 DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
273 DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
274 DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
275 DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
276 DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
277 DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
278 DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
279 DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
280 DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
281 DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
282 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
283 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
284 /* KMS */
285 DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH),
286 DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH),
287 DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH),
288 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH),
289 DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
290 DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
291 DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH),
292 DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH),
293 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH),
294};
295int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
new file mode 100644
index 000000000000..8086ecf7f03d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -0,0 +1,1276 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h>
28#include <drm/radeon_drm.h>
29#include "radeon_fixed.h"
30#include "radeon.h"
31
32void radeon_restore_common_regs(struct drm_device *dev)
33{
34 /* don't need this yet */
35}
36
37static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev)
38{
39 struct radeon_device *rdev = dev->dev_private;
40 int i = 0;
41
42 /* FIXME: Certain revisions of R300 can't recover here. Not sure of
43 the cause yet, but this workaround will mask the problem for now.
44 Other chips usually will pass at the very first test, so the
45 workaround shouldn't have any effect on them. */
46 for (i = 0;
47 (i < 10000 &&
48 RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
49 i++);
50}
51
52static void radeon_pll_write_update(struct drm_device *dev)
53{
54 struct radeon_device *rdev = dev->dev_private;
55
56 while (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_ATOMIC_UPDATE_R);
57
58 WREG32_PLL_P(RADEON_PPLL_REF_DIV,
59 RADEON_PPLL_ATOMIC_UPDATE_W,
60 ~(RADEON_PPLL_ATOMIC_UPDATE_W));
61}
62
63static void radeon_pll2_wait_for_read_update_complete(struct drm_device *dev)
64{
65 struct radeon_device *rdev = dev->dev_private;
66 int i = 0;
67
68
69 /* FIXME: Certain revisions of R300 can't recover here. Not sure of
70 the cause yet, but this workaround will mask the problem for now.
71 Other chips usually will pass at the very first test, so the
72 workaround shouldn't have any effect on them. */
73 for (i = 0;
74 (i < 10000 &&
75 RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
76 i++);
77}
78
79static void radeon_pll2_write_update(struct drm_device *dev)
80{
81 struct radeon_device *rdev = dev->dev_private;
82
83 while (RREG32_PLL(RADEON_P2PLL_REF_DIV) & RADEON_P2PLL_ATOMIC_UPDATE_R);
84
85 WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
86 RADEON_P2PLL_ATOMIC_UPDATE_W,
87 ~(RADEON_P2PLL_ATOMIC_UPDATE_W));
88}
89
90static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
91 uint16_t fb_div)
92{
93 unsigned int vcoFreq;
94
95 if (!ref_div)
96 return 1;
97
98 vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div;
99
100 /*
101 * This is horribly crude: the VCO frequency range is divided into
102 * 3 parts, each part having a fixed PLL gain value.
103 */
104 if (vcoFreq >= 30000)
105 /*
106 * [300..max] MHz : 7
107 */
108 return 7;
109 else if (vcoFreq >= 18000)
110 /*
111 * [180..300) MHz : 4
112 */
113 return 4;
114 else
115 /*
116 * [0..180) MHz : 1
117 */
118 return 1;
119}
120
121void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
122{
123 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
124 struct drm_device *dev = crtc->dev;
125 struct radeon_device *rdev = dev->dev_private;
126 uint32_t mask;
127
128 if (radeon_crtc->crtc_id)
129 mask = (RADEON_CRTC2_EN |
130 RADEON_CRTC2_DISP_DIS |
131 RADEON_CRTC2_VSYNC_DIS |
132 RADEON_CRTC2_HSYNC_DIS |
133 RADEON_CRTC2_DISP_REQ_EN_B);
134 else
135 mask = (RADEON_CRTC_DISPLAY_DIS |
136 RADEON_CRTC_VSYNC_DIS |
137 RADEON_CRTC_HSYNC_DIS);
138
139 switch (mode) {
140 case DRM_MODE_DPMS_ON:
141 if (radeon_crtc->crtc_id)
142 WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask);
143 else {
144 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
145 RADEON_CRTC_DISP_REQ_EN_B));
146 WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask);
147 }
148 break;
149 case DRM_MODE_DPMS_STANDBY:
150 case DRM_MODE_DPMS_SUSPEND:
151 case DRM_MODE_DPMS_OFF:
152 if (radeon_crtc->crtc_id)
153 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask);
154 else {
155 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
156 RADEON_CRTC_DISP_REQ_EN_B));
157 WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask);
158 }
159 break;
160 }
161
162 if (mode != DRM_MODE_DPMS_OFF) {
163 radeon_crtc_load_lut(crtc);
164 }
165}
166
167/* properly set crtc bpp when using atombios */
168void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
169{
170 struct drm_device *dev = crtc->dev;
171 struct radeon_device *rdev = dev->dev_private;
172 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
173 int format;
174 uint32_t crtc_gen_cntl;
175 uint32_t disp_merge_cntl;
176 uint32_t crtc_pitch;
177
178 switch (crtc->fb->bits_per_pixel) {
179 case 15: /* 555 */
180 format = 3;
181 break;
182 case 16: /* 565 */
183 format = 4;
184 break;
185 case 24: /* RGB */
186 format = 5;
187 break;
188 case 32: /* xRGB */
189 format = 6;
190 break;
191 default:
192 return;
193 }
194
195 crtc_pitch = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
196 ((crtc->fb->bits_per_pixel * 8) - 1)) /
197 (crtc->fb->bits_per_pixel * 8));
198 crtc_pitch |= crtc_pitch << 16;
199
200 WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
201
202 switch (radeon_crtc->crtc_id) {
203 case 0:
204 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
205 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
206 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
207
208 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
209 crtc_gen_cntl |= (format << 8);
210 crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
211 WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
212 break;
213 case 1:
214 disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
215 disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
216 WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
217
218 crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
219 crtc_gen_cntl |= (format << 8);
220 WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
221 WREG32(RADEON_FP_H2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
222 WREG32(RADEON_FP_V2_SYNC_STRT_WID, RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
223 break;
224 }
225}
226
227int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
228 struct drm_framebuffer *old_fb)
229{
230 struct drm_device *dev = crtc->dev;
231 struct radeon_device *rdev = dev->dev_private;
232 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
233 struct radeon_framebuffer *radeon_fb;
234 struct drm_gem_object *obj;
235 uint64_t base;
236 uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
237 uint32_t crtc_pitch, pitch_pixels;
238
239 DRM_DEBUG("\n");
240
241 radeon_fb = to_radeon_framebuffer(crtc->fb);
242
243 obj = radeon_fb->obj;
244 if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) {
245 return -EINVAL;
246 }
247 crtc_offset = (u32)base;
248 crtc_offset_cntl = 0;
249
250 pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
251 crtc_pitch = (((pitch_pixels * crtc->fb->bits_per_pixel) +
252 ((crtc->fb->bits_per_pixel * 8) - 1)) /
253 (crtc->fb->bits_per_pixel * 8));
254 crtc_pitch |= crtc_pitch << 16;
255
256 /* TODO tiling */
257 if (0) {
258 if (ASIC_IS_R300(rdev))
259 crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
260 R300_CRTC_MICRO_TILE_BUFFER_DIS |
261 R300_CRTC_MACRO_TILE_EN);
262 else
263 crtc_offset_cntl |= RADEON_CRTC_TILE_EN;
264 } else {
265 if (ASIC_IS_R300(rdev))
266 crtc_offset_cntl &= ~(R300_CRTC_X_Y_MODE_EN |
267 R300_CRTC_MICRO_TILE_BUFFER_DIS |
268 R300_CRTC_MACRO_TILE_EN);
269 else
270 crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN;
271 }
272
273
274 /* TODO more tiling */
275 if (0) {
276 if (ASIC_IS_R300(rdev)) {
277 crtc_tile_x0_y0 = x | (y << 16);
278 base &= ~0x7ff;
279 } else {
280 int byteshift = crtc->fb->bits_per_pixel >> 4;
281 int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11;
282 base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8);
283 crtc_offset_cntl |= (y % 16);
284 }
285 } else {
286 int offset = y * pitch_pixels + x;
287 switch (crtc->fb->bits_per_pixel) {
288 case 15:
289 case 16:
290 offset *= 2;
291 break;
292 case 24:
293 offset *= 3;
294 break;
295 case 32:
296 offset *= 4;
297 break;
298 default:
299 return false;
300 }
301 base += offset;
302 }
303
304 base &= ~7;
305
306 /* update sarea TODO */
307
308 crtc_offset = (u32)base;
309
310 WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, rdev->mc.vram_location);
311
312 if (ASIC_IS_R300(rdev)) {
313 if (radeon_crtc->crtc_id)
314 WREG32(R300_CRTC2_TILE_X0_Y0, crtc_tile_x0_y0);
315 else
316 WREG32(R300_CRTC_TILE_X0_Y0, crtc_tile_x0_y0);
317 }
318 WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, crtc_offset_cntl);
319 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
320 WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
321
322 if (old_fb && old_fb != crtc->fb) {
323 radeon_fb = to_radeon_framebuffer(old_fb);
324 radeon_gem_object_unpin(radeon_fb->obj);
325 }
326 return 0;
327}
328
329static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mode *mode)
330{
331 struct drm_device *dev = crtc->dev;
332 struct radeon_device *rdev = dev->dev_private;
333 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
334 int format;
335 int hsync_start;
336 int hsync_wid;
337 int vsync_wid;
338 uint32_t crtc_h_total_disp;
339 uint32_t crtc_h_sync_strt_wid;
340 uint32_t crtc_v_total_disp;
341 uint32_t crtc_v_sync_strt_wid;
342
343 DRM_DEBUG("\n");
344
345 switch (crtc->fb->bits_per_pixel) {
346 case 15: /* 555 */
347 format = 3;
348 break;
349 case 16: /* 565 */
350 format = 4;
351 break;
352 case 24: /* RGB */
353 format = 5;
354 break;
355 case 32: /* xRGB */
356 format = 6;
357 break;
358 default:
359 return false;
360 }
361
362 crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
363 | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
364
365 hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
366 if (!hsync_wid)
367 hsync_wid = 1;
368 hsync_start = mode->crtc_hsync_start - 8;
369
370 crtc_h_sync_strt_wid = ((hsync_start & 0x1fff)
371 | ((hsync_wid & 0x3f) << 16)
372 | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
373 ? RADEON_CRTC_H_SYNC_POL
374 : 0));
375
376 /* This works for double scan mode. */
377 crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
378 | ((mode->crtc_vdisplay - 1) << 16));
379
380 vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
381 if (!vsync_wid)
382 vsync_wid = 1;
383
384 crtc_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
385 | ((vsync_wid & 0x1f) << 16)
386 | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
387 ? RADEON_CRTC_V_SYNC_POL
388 : 0));
389
390 /* TODO -> Dell Server */
391 if (0) {
392 uint32_t disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
393 uint32_t tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
394 uint32_t dac2_cntl = RREG32(RADEON_DAC_CNTL2);
395 uint32_t crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
396
397 dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
398 dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
399
400 /* For CRT on DAC2, don't turn it on if BIOS didn't
401 enable it, even it's detected.
402 */
403 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
404 tv_dac_cntl &= ~((1<<2) | (3<<8) | (7<<24) | (0xff<<16));
405 tv_dac_cntl |= (0x03 | (2<<8) | (0x58<<16));
406
407 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
408 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
409 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
410 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
411 }
412
413 if (radeon_crtc->crtc_id) {
414 uint32_t crtc2_gen_cntl;
415 uint32_t disp2_merge_cntl;
416
417 /* check to see if TV DAC is enabled for another crtc and keep it enabled */
418 if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON)
419 crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON;
420 else
421 crtc2_gen_cntl = 0;
422
423 crtc2_gen_cntl |= ((format << 8)
424 | RADEON_CRTC2_VSYNC_DIS
425 | RADEON_CRTC2_HSYNC_DIS
426 | RADEON_CRTC2_DISP_DIS
427 | RADEON_CRTC2_DISP_REQ_EN_B
428 | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
429 ? RADEON_CRTC2_DBL_SCAN_EN
430 : 0)
431 | ((mode->flags & DRM_MODE_FLAG_CSYNC)
432 ? RADEON_CRTC2_CSYNC_EN
433 : 0)
434 | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
435 ? RADEON_CRTC2_INTERLACE_EN
436 : 0));
437
438 disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
439 disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
440
441 WREG32(RADEON_DISP2_MERGE_CNTL, disp2_merge_cntl);
442 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
443 } else {
444 uint32_t crtc_gen_cntl;
445 uint32_t crtc_ext_cntl;
446 uint32_t disp_merge_cntl;
447
448 crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN
449 | (format << 8)
450 | RADEON_CRTC_DISP_REQ_EN_B
451 | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
452 ? RADEON_CRTC_DBL_SCAN_EN
453 : 0)
454 | ((mode->flags & DRM_MODE_FLAG_CSYNC)
455 ? RADEON_CRTC_CSYNC_EN
456 : 0)
457 | ((mode->flags & DRM_MODE_FLAG_INTERLACE)
458 ? RADEON_CRTC_INTERLACE_EN
459 : 0));
460
461 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
462 crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
463 RADEON_CRTC_VSYNC_DIS |
464 RADEON_CRTC_HSYNC_DIS |
465 RADEON_CRTC_DISPLAY_DIS);
466
467 disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
468 disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
469
470 WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
471 WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
472 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
473 }
474
475 WREG32(RADEON_CRTC_H_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_h_total_disp);
476 WREG32(RADEON_CRTC_H_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_h_sync_strt_wid);
477 WREG32(RADEON_CRTC_V_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_v_total_disp);
478 WREG32(RADEON_CRTC_V_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_v_sync_strt_wid);
479
480 return true;
481}
482
483static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
484{
485 struct drm_device *dev = crtc->dev;
486 struct radeon_device *rdev = dev->dev_private;
487 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
488 struct drm_encoder *encoder;
489 uint32_t feedback_div = 0;
490 uint32_t frac_fb_div = 0;
491 uint32_t reference_div = 0;
492 uint32_t post_divider = 0;
493 uint32_t freq = 0;
494 uint8_t pll_gain;
495 int pll_flags = RADEON_PLL_LEGACY;
496 bool use_bios_divs = false;
497 /* PLL registers */
498 uint32_t pll_ref_div = 0;
499 uint32_t pll_fb_post_div = 0;
500 uint32_t htotal_cntl = 0;
501
502 struct radeon_pll *pll;
503
504 struct {
505 int divider;
506 int bitvalue;
507 } *post_div, post_divs[] = {
508 /* From RAGE 128 VR/RAGE 128 GL Register
509 * Reference Manual (Technical Reference
510 * Manual P/N RRG-G04100-C Rev. 0.04), page
511 * 3-17 (PLL_DIV_[3:0]).
512 */
513 { 1, 0 }, /* VCLK_SRC */
514 { 2, 1 }, /* VCLK_SRC/2 */
515 { 4, 2 }, /* VCLK_SRC/4 */
516 { 8, 3 }, /* VCLK_SRC/8 */
517 { 3, 4 }, /* VCLK_SRC/3 */
518 { 16, 5 }, /* VCLK_SRC/16 */
519 { 6, 6 }, /* VCLK_SRC/6 */
520 { 12, 7 }, /* VCLK_SRC/12 */
521 { 0, 0 }
522 };
523
524 if (radeon_crtc->crtc_id)
525 pll = &rdev->clock.p2pll;
526 else
527 pll = &rdev->clock.p1pll;
528
529 if (mode->clock > 200000) /* range limits??? */
530 pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
531 else
532 pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
533
534 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
535 if (encoder->crtc == crtc) {
536 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
537 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
538 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
539 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
540 struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
541 if (lvds) {
542 if (lvds->use_bios_dividers) {
543 pll_ref_div = lvds->panel_ref_divider;
544 pll_fb_post_div = (lvds->panel_fb_divider |
545 (lvds->panel_post_divider << 16));
546 htotal_cntl = 0;
547 use_bios_divs = true;
548 }
549 }
550 pll_flags |= RADEON_PLL_USE_REF_DIV;
551 }
552 }
553 }
554
555 DRM_DEBUG("\n");
556
557 if (!use_bios_divs) {
558 radeon_compute_pll(pll, mode->clock,
559 &freq, &feedback_div, &frac_fb_div,
560 &reference_div, &post_divider,
561 pll_flags);
562
563 for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
564 if (post_div->divider == post_divider)
565 break;
566 }
567
568 if (!post_div->divider)
569 post_div = &post_divs[0];
570
571 DRM_DEBUG("dc=%u, fd=%d, rd=%d, pd=%d\n",
572 (unsigned)freq,
573 feedback_div,
574 reference_div,
575 post_divider);
576
577 pll_ref_div = reference_div;
578#if defined(__powerpc__) && (0) /* TODO */
579 /* apparently programming this otherwise causes a hang??? */
580 if (info->MacModel == RADEON_MAC_IBOOK)
581 pll_fb_post_div = 0x000600ad;
582 else
583#endif
584 pll_fb_post_div = (feedback_div | (post_div->bitvalue << 16));
585
586 htotal_cntl = mode->htotal & 0x7;
587
588 }
589
590 pll_gain = radeon_compute_pll_gain(pll->reference_freq,
591 pll_ref_div & 0x3ff,
592 pll_fb_post_div & 0x7ff);
593
594 if (radeon_crtc->crtc_id) {
595 uint32_t pixclks_cntl = ((RREG32_PLL(RADEON_PIXCLKS_CNTL) &
596 ~(RADEON_PIX2CLK_SRC_SEL_MASK)) |
597 RADEON_PIX2CLK_SRC_SEL_P2PLLCLK);
598
599 WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
600 RADEON_PIX2CLK_SRC_SEL_CPUCLK,
601 ~(RADEON_PIX2CLK_SRC_SEL_MASK));
602
603 WREG32_PLL_P(RADEON_P2PLL_CNTL,
604 RADEON_P2PLL_RESET
605 | RADEON_P2PLL_ATOMIC_UPDATE_EN
606 | ((uint32_t)pll_gain << RADEON_P2PLL_PVG_SHIFT),
607 ~(RADEON_P2PLL_RESET
608 | RADEON_P2PLL_ATOMIC_UPDATE_EN
609 | RADEON_P2PLL_PVG_MASK));
610
611 WREG32_PLL_P(RADEON_P2PLL_REF_DIV,
612 pll_ref_div,
613 ~RADEON_P2PLL_REF_DIV_MASK);
614
615 WREG32_PLL_P(RADEON_P2PLL_DIV_0,
616 pll_fb_post_div,
617 ~RADEON_P2PLL_FB0_DIV_MASK);
618
619 WREG32_PLL_P(RADEON_P2PLL_DIV_0,
620 pll_fb_post_div,
621 ~RADEON_P2PLL_POST0_DIV_MASK);
622
623 radeon_pll2_write_update(dev);
624 radeon_pll2_wait_for_read_update_complete(dev);
625
626 WREG32_PLL(RADEON_HTOTAL2_CNTL, htotal_cntl);
627
628 WREG32_PLL_P(RADEON_P2PLL_CNTL,
629 0,
630 ~(RADEON_P2PLL_RESET
631 | RADEON_P2PLL_SLEEP
632 | RADEON_P2PLL_ATOMIC_UPDATE_EN));
633
634 DRM_DEBUG("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
635 (unsigned)pll_ref_div,
636 (unsigned)pll_fb_post_div,
637 (unsigned)htotal_cntl,
638 RREG32_PLL(RADEON_P2PLL_CNTL));
639 DRM_DEBUG("Wrote2: rd=%u, fd=%u, pd=%u\n",
640 (unsigned)pll_ref_div & RADEON_P2PLL_REF_DIV_MASK,
641 (unsigned)pll_fb_post_div & RADEON_P2PLL_FB0_DIV_MASK,
642 (unsigned)((pll_fb_post_div &
643 RADEON_P2PLL_POST0_DIV_MASK) >> 16));
644
645 mdelay(50); /* Let the clock to lock */
646
647 WREG32_PLL_P(RADEON_PIXCLKS_CNTL,
648 RADEON_PIX2CLK_SRC_SEL_P2PLLCLK,
649 ~(RADEON_PIX2CLK_SRC_SEL_MASK));
650
651 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
652 } else {
653 if (rdev->flags & RADEON_IS_MOBILITY) {
654 /* A temporal workaround for the occational blanking on certain laptop panels.
655 This appears to related to the PLL divider registers (fail to lock?).
656 It occurs even when all dividers are the same with their old settings.
657 In this case we really don't need to fiddle with PLL registers.
658 By doing this we can avoid the blanking problem with some panels.
659 */
660 if ((pll_ref_div == (RREG32_PLL(RADEON_PPLL_REF_DIV) & RADEON_PPLL_REF_DIV_MASK)) &&
661 (pll_fb_post_div == (RREG32_PLL(RADEON_PPLL_DIV_3) &
662 (RADEON_PPLL_POST3_DIV_MASK | RADEON_PPLL_FB3_DIV_MASK)))) {
663 WREG32_P(RADEON_CLOCK_CNTL_INDEX,
664 RADEON_PLL_DIV_SEL,
665 ~(RADEON_PLL_DIV_SEL));
666 r100_pll_errata_after_index(rdev);
667 return;
668 }
669 }
670
671 WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
672 RADEON_VCLK_SRC_SEL_CPUCLK,
673 ~(RADEON_VCLK_SRC_SEL_MASK));
674 WREG32_PLL_P(RADEON_PPLL_CNTL,
675 RADEON_PPLL_RESET
676 | RADEON_PPLL_ATOMIC_UPDATE_EN
677 | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
678 | ((uint32_t)pll_gain << RADEON_PPLL_PVG_SHIFT),
679 ~(RADEON_PPLL_RESET
680 | RADEON_PPLL_ATOMIC_UPDATE_EN
681 | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN
682 | RADEON_PPLL_PVG_MASK));
683
684 WREG32_P(RADEON_CLOCK_CNTL_INDEX,
685 RADEON_PLL_DIV_SEL,
686 ~(RADEON_PLL_DIV_SEL));
687 r100_pll_errata_after_index(rdev);
688
689 if (ASIC_IS_R300(rdev) ||
690 (rdev->family == CHIP_RS300) ||
691 (rdev->family == CHIP_RS400) ||
692 (rdev->family == CHIP_RS480)) {
693 if (pll_ref_div & R300_PPLL_REF_DIV_ACC_MASK) {
694 /* When restoring console mode, use saved PPLL_REF_DIV
695 * setting.
696 */
697 WREG32_PLL_P(RADEON_PPLL_REF_DIV,
698 pll_ref_div,
699 0);
700 } else {
701 /* R300 uses ref_div_acc field as real ref divider */
702 WREG32_PLL_P(RADEON_PPLL_REF_DIV,
703 (pll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT),
704 ~R300_PPLL_REF_DIV_ACC_MASK);
705 }
706 } else
707 WREG32_PLL_P(RADEON_PPLL_REF_DIV,
708 pll_ref_div,
709 ~RADEON_PPLL_REF_DIV_MASK);
710
711 WREG32_PLL_P(RADEON_PPLL_DIV_3,
712 pll_fb_post_div,
713 ~RADEON_PPLL_FB3_DIV_MASK);
714
715 WREG32_PLL_P(RADEON_PPLL_DIV_3,
716 pll_fb_post_div,
717 ~RADEON_PPLL_POST3_DIV_MASK);
718
719 radeon_pll_write_update(dev);
720 radeon_pll_wait_for_read_update_complete(dev);
721
722 WREG32_PLL(RADEON_HTOTAL_CNTL, htotal_cntl);
723
724 WREG32_PLL_P(RADEON_PPLL_CNTL,
725 0,
726 ~(RADEON_PPLL_RESET
727 | RADEON_PPLL_SLEEP
728 | RADEON_PPLL_ATOMIC_UPDATE_EN
729 | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN));
730
731 DRM_DEBUG("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
732 pll_ref_div,
733 pll_fb_post_div,
734 (unsigned)htotal_cntl,
735 RREG32_PLL(RADEON_PPLL_CNTL));
736 DRM_DEBUG("Wrote: rd=%d, fd=%d, pd=%d\n",
737 pll_ref_div & RADEON_PPLL_REF_DIV_MASK,
738 pll_fb_post_div & RADEON_PPLL_FB3_DIV_MASK,
739 (pll_fb_post_div & RADEON_PPLL_POST3_DIV_MASK) >> 16);
740
741 mdelay(50); /* Let the clock to lock */
742
743 WREG32_PLL_P(RADEON_VCLK_ECP_CNTL,
744 RADEON_VCLK_SRC_SEL_PPLLCLK,
745 ~(RADEON_VCLK_SRC_SEL_MASK));
746
747 }
748}
749
750static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
751 struct drm_display_mode *mode,
752 struct drm_display_mode *adjusted_mode)
753{
754 return true;
755}
756
757static int radeon_crtc_mode_set(struct drm_crtc *crtc,
758 struct drm_display_mode *mode,
759 struct drm_display_mode *adjusted_mode,
760 int x, int y, struct drm_framebuffer *old_fb)
761{
762
763 DRM_DEBUG("\n");
764
765 /* TODO TV */
766
767 radeon_crtc_set_base(crtc, x, y, old_fb);
768 radeon_set_crtc_timing(crtc, adjusted_mode);
769 radeon_set_pll(crtc, adjusted_mode);
770 radeon_init_disp_bandwidth(crtc->dev);
771
772 return 0;
773}
774
775static void radeon_crtc_prepare(struct drm_crtc *crtc)
776{
777 radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
778}
779
780static void radeon_crtc_commit(struct drm_crtc *crtc)
781{
782 radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
783}
784
785static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
786 .dpms = radeon_crtc_dpms,
787 .mode_fixup = radeon_crtc_mode_fixup,
788 .mode_set = radeon_crtc_mode_set,
789 .mode_set_base = radeon_crtc_set_base,
790 .prepare = radeon_crtc_prepare,
791 .commit = radeon_crtc_commit,
792};
793
794
795void radeon_legacy_init_crtc(struct drm_device *dev,
796 struct radeon_crtc *radeon_crtc)
797{
798 if (radeon_crtc->crtc_id == 1)
799 radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP;
800 drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs);
801}
802
803void radeon_init_disp_bw_legacy(struct drm_device *dev,
804 struct drm_display_mode *mode1,
805 uint32_t pixel_bytes1,
806 struct drm_display_mode *mode2,
807 uint32_t pixel_bytes2)
808{
809 struct radeon_device *rdev = dev->dev_private;
810 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
811 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
812 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
813 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
814 fixed20_12 memtcas_ff[8] = {
815 fixed_init(1),
816 fixed_init(2),
817 fixed_init(3),
818 fixed_init(0),
819 fixed_init_half(1),
820 fixed_init_half(2),
821 fixed_init(0),
822 };
823 fixed20_12 memtcas_rs480_ff[8] = {
824 fixed_init(0),
825 fixed_init(1),
826 fixed_init(2),
827 fixed_init(3),
828 fixed_init(0),
829 fixed_init_half(1),
830 fixed_init_half(2),
831 fixed_init_half(3),
832 };
833 fixed20_12 memtcas2_ff[8] = {
834 fixed_init(0),
835 fixed_init(1),
836 fixed_init(2),
837 fixed_init(3),
838 fixed_init(4),
839 fixed_init(5),
840 fixed_init(6),
841 fixed_init(7),
842 };
843 fixed20_12 memtrbs[8] = {
844 fixed_init(1),
845 fixed_init_half(1),
846 fixed_init(2),
847 fixed_init_half(2),
848 fixed_init(3),
849 fixed_init_half(3),
850 fixed_init(4),
851 fixed_init_half(4)
852 };
853 fixed20_12 memtrbs_r4xx[8] = {
854 fixed_init(4),
855 fixed_init(5),
856 fixed_init(6),
857 fixed_init(7),
858 fixed_init(8),
859 fixed_init(9),
860 fixed_init(10),
861 fixed_init(11)
862 };
863 fixed20_12 min_mem_eff;
864 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
865 fixed20_12 cur_latency_mclk, cur_latency_sclk;
866 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
867 disp_drain_rate2, read_return_rate;
868 fixed20_12 time_disp1_drop_priority;
869 int c;
870 int cur_size = 16; /* in octawords */
871 int critical_point = 0, critical_point2;
872/* uint32_t read_return_rate, time_disp1_drop_priority; */
873 int stop_req, max_stop_req;
874
875 min_mem_eff.full = rfixed_const_8(0);
876 /* get modes */
877 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
878 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
879 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
880 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
881 /* check crtc enables */
882 if (mode2)
883 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
884 if (mode1)
885 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
886 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
887 }
888
889 /*
890 * determine is there is enough bw for current mode
891 */
892 mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
893 temp_ff.full = rfixed_const(100);
894 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
895 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
896 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
897
898 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
899 temp_ff.full = rfixed_const(temp);
900 mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
901
902 pix_clk.full = 0;
903 pix_clk2.full = 0;
904 peak_disp_bw.full = 0;
905 if (mode1) {
906 temp_ff.full = rfixed_const(1000);
907 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
908 pix_clk.full = rfixed_div(pix_clk, temp_ff);
909 temp_ff.full = rfixed_const(pixel_bytes1);
910 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
911 }
912 if (mode2) {
913 temp_ff.full = rfixed_const(1000);
914 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
915 pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
916 temp_ff.full = rfixed_const(pixel_bytes2);
917 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
918 }
919
920 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
921 if (peak_disp_bw.full >= mem_bw.full) {
922 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
923 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
924 }
925
926 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
927 temp = RREG32(RADEON_MEM_TIMING_CNTL);
928 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
929 mem_trcd = ((temp >> 2) & 0x3) + 1;
930 mem_trp = ((temp & 0x3)) + 1;
931 mem_tras = ((temp & 0x70) >> 4) + 1;
932 } else if (rdev->family == CHIP_R300 ||
933 rdev->family == CHIP_R350) { /* r300, r350 */
934 mem_trcd = (temp & 0x7) + 1;
935 mem_trp = ((temp >> 8) & 0x7) + 1;
936 mem_tras = ((temp >> 11) & 0xf) + 4;
937 } else if (rdev->family == CHIP_RV350 ||
938 rdev->family <= CHIP_RV380) {
939 /* rv3x0 */
940 mem_trcd = (temp & 0x7) + 3;
941 mem_trp = ((temp >> 8) & 0x7) + 3;
942 mem_tras = ((temp >> 11) & 0xf) + 6;
943 } else if (rdev->family == CHIP_R420 ||
944 rdev->family == CHIP_R423 ||
945 rdev->family == CHIP_RV410) {
946 /* r4xx */
947 mem_trcd = (temp & 0xf) + 3;
948 if (mem_trcd > 15)
949 mem_trcd = 15;
950 mem_trp = ((temp >> 8) & 0xf) + 3;
951 if (mem_trp > 15)
952 mem_trp = 15;
953 mem_tras = ((temp >> 12) & 0x1f) + 6;
954 if (mem_tras > 31)
955 mem_tras = 31;
956 } else { /* RV200, R200 */
957 mem_trcd = (temp & 0x7) + 1;
958 mem_trp = ((temp >> 8) & 0x7) + 1;
959 mem_tras = ((temp >> 12) & 0xf) + 4;
960 }
961 /* convert to FF */
962 trcd_ff.full = rfixed_const(mem_trcd);
963 trp_ff.full = rfixed_const(mem_trp);
964 tras_ff.full = rfixed_const(mem_tras);
965
966 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
967 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
968 data = (temp & (7 << 20)) >> 20;
969 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
970 if (rdev->family == CHIP_RS480) /* don't think rs400 */
971 tcas_ff = memtcas_rs480_ff[data];
972 else
973 tcas_ff = memtcas_ff[data];
974 } else
975 tcas_ff = memtcas2_ff[data];
976
977 if (rdev->family == CHIP_RS400 ||
978 rdev->family == CHIP_RS480) {
979 /* extra cas latency stored in bits 23-25 0-4 clocks */
980 data = (temp >> 23) & 0x7;
981 if (data < 5)
982 tcas_ff.full += rfixed_const(data);
983 }
984
985 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
986 /* on the R300, Tcas is included in Trbs.
987 */
988 temp = RREG32(RADEON_MEM_CNTL);
989 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
990 if (data == 1) {
991 if (R300_MEM_USE_CD_CH_ONLY & temp) {
992 temp = RREG32(R300_MC_IND_INDEX);
993 temp &= ~R300_MC_IND_ADDR_MASK;
994 temp |= R300_MC_READ_CNTL_CD_mcind;
995 WREG32(R300_MC_IND_INDEX, temp);
996 temp = RREG32(R300_MC_IND_DATA);
997 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
998 } else {
999 temp = RREG32(R300_MC_READ_CNTL_AB);
1000 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
1001 }
1002 } else {
1003 temp = RREG32(R300_MC_READ_CNTL_AB);
1004 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
1005 }
1006 if (rdev->family == CHIP_RV410 ||
1007 rdev->family == CHIP_R420 ||
1008 rdev->family == CHIP_R423)
1009 trbs_ff = memtrbs_r4xx[data];
1010 else
1011 trbs_ff = memtrbs[data];
1012 tcas_ff.full += trbs_ff.full;
1013 }
1014
1015 sclk_eff_ff.full = sclk_ff.full;
1016
1017 if (rdev->flags & RADEON_IS_AGP) {
1018 fixed20_12 agpmode_ff;
1019 agpmode_ff.full = rfixed_const(radeon_agpmode);
1020 temp_ff.full = rfixed_const_666(16);
1021 sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
1022 }
1023 /* TODO PCIE lanes may affect this - agpmode == 16?? */
1024
1025 if (ASIC_IS_R300(rdev)) {
1026 sclk_delay_ff.full = rfixed_const(250);
1027 } else {
1028 if ((rdev->family == CHIP_RV100) ||
1029 rdev->flags & RADEON_IS_IGP) {
1030 if (rdev->mc.vram_is_ddr)
1031 sclk_delay_ff.full = rfixed_const(41);
1032 else
1033 sclk_delay_ff.full = rfixed_const(33);
1034 } else {
1035 if (rdev->mc.vram_width == 128)
1036 sclk_delay_ff.full = rfixed_const(57);
1037 else
1038 sclk_delay_ff.full = rfixed_const(41);
1039 }
1040 }
1041
1042 mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
1043
1044 if (rdev->mc.vram_is_ddr) {
1045 if (rdev->mc.vram_width == 32) {
1046 k1.full = rfixed_const(40);
1047 c = 3;
1048 } else {
1049 k1.full = rfixed_const(20);
1050 c = 1;
1051 }
1052 } else {
1053 k1.full = rfixed_const(40);
1054 c = 3;
1055 }
1056
1057 temp_ff.full = rfixed_const(2);
1058 mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
1059 temp_ff.full = rfixed_const(c);
1060 mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
1061 temp_ff.full = rfixed_const(4);
1062 mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
1063 mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
1064 mc_latency_mclk.full += k1.full;
1065
1066 mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
1067 mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
1068
1069 /*
1070 HW cursor time assuming worst case of full size colour cursor.
1071 */
1072 temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
1073 temp_ff.full += trcd_ff.full;
1074 if (temp_ff.full < tras_ff.full)
1075 temp_ff.full = tras_ff.full;
1076 cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
1077
1078 temp_ff.full = rfixed_const(cur_size);
1079 cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
1080 /*
1081 Find the total latency for the display data.
1082 */
1083 disp_latency_overhead.full = rfixed_const(80);
1084 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
1085 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
1086 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
1087
1088 if (mc_latency_mclk.full > mc_latency_sclk.full)
1089 disp_latency.full = mc_latency_mclk.full;
1090 else
1091 disp_latency.full = mc_latency_sclk.full;
1092
1093 /* setup Max GRPH_STOP_REQ default value */
1094 if (ASIC_IS_RV100(rdev))
1095 max_stop_req = 0x5c;
1096 else
1097 max_stop_req = 0x7c;
1098
1099 if (mode1) {
1100 /* CRTC1
1101 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
1102 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
1103 */
1104 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
1105
1106 if (stop_req > max_stop_req)
1107 stop_req = max_stop_req;
1108
1109 /*
1110 Find the drain rate of the display buffer.
1111 */
1112 temp_ff.full = rfixed_const((16/pixel_bytes1));
1113 disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
1114
1115 /*
1116 Find the critical point of the display buffer.
1117 */
1118 crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
1119 crit_point_ff.full += rfixed_const_half(0);
1120
1121 critical_point = rfixed_trunc(crit_point_ff);
1122
1123 if (rdev->disp_priority == 2) {
1124 critical_point = 0;
1125 }
1126
1127 /*
1128 The critical point should never be above max_stop_req-4. Setting
1129 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
1130 */
1131 if (max_stop_req - critical_point < 4)
1132 critical_point = 0;
1133
1134 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
1135 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
1136 critical_point = 0x10;
1137 }
1138
1139 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
1140 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
1141 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
1142 temp &= ~(RADEON_GRPH_START_REQ_MASK);
1143 if ((rdev->family == CHIP_R350) &&
1144 (stop_req > 0x15)) {
1145 stop_req -= 0x10;
1146 }
1147 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
1148 temp |= RADEON_GRPH_BUFFER_SIZE;
1149 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
1150 RADEON_GRPH_CRITICAL_AT_SOF |
1151 RADEON_GRPH_STOP_CNTL);
1152 /*
1153 Write the result into the register.
1154 */
1155 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
1156 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
1157
1158#if 0
1159 if ((rdev->family == CHIP_RS400) ||
1160 (rdev->family == CHIP_RS480)) {
1161 /* attempt to program RS400 disp regs correctly ??? */
1162 temp = RREG32(RS400_DISP1_REG_CNTL);
1163 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
1164 RS400_DISP1_STOP_REQ_LEVEL_MASK);
1165 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
1166 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
1167 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
1168 temp = RREG32(RS400_DMIF_MEM_CNTL1);
1169 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
1170 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
1171 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
1172 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
1173 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
1174 }
1175#endif
1176
1177 DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
1178 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
1179 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
1180 }
1181
1182 if (mode2) {
1183 u32 grph2_cntl;
1184 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
1185
1186 if (stop_req > max_stop_req)
1187 stop_req = max_stop_req;
1188
1189 /*
1190 Find the drain rate of the display buffer.
1191 */
1192 temp_ff.full = rfixed_const((16/pixel_bytes2));
1193 disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
1194
1195 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
1196 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
1197 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
1198 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
1199 if ((rdev->family == CHIP_R350) &&
1200 (stop_req > 0x15)) {
1201 stop_req -= 0x10;
1202 }
1203 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
1204 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
1205 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
1206 RADEON_GRPH_CRITICAL_AT_SOF |
1207 RADEON_GRPH_STOP_CNTL);
1208
1209 if ((rdev->family == CHIP_RS100) ||
1210 (rdev->family == CHIP_RS200))
1211 critical_point2 = 0;
1212 else {
1213 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
1214 temp_ff.full = rfixed_const(temp);
1215 temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
1216 if (sclk_ff.full < temp_ff.full)
1217 temp_ff.full = sclk_ff.full;
1218
1219 read_return_rate.full = temp_ff.full;
1220
1221 if (mode1) {
1222 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
1223 time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
1224 } else {
1225 time_disp1_drop_priority.full = 0;
1226 }
1227 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
1228 crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
1229 crit_point_ff.full += rfixed_const_half(0);
1230
1231 critical_point2 = rfixed_trunc(crit_point_ff);
1232
1233 if (rdev->disp_priority == 2) {
1234 critical_point2 = 0;
1235 }
1236
1237 if (max_stop_req - critical_point2 < 4)
1238 critical_point2 = 0;
1239
1240 }
1241
1242 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
1243 /* some R300 cards have problem with this set to 0 */
1244 critical_point2 = 0x10;
1245 }
1246
1247 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
1248 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
1249
1250 if ((rdev->family == CHIP_RS400) ||
1251 (rdev->family == CHIP_RS480)) {
1252#if 0
1253 /* attempt to program RS400 disp2 regs correctly ??? */
1254 temp = RREG32(RS400_DISP2_REQ_CNTL1);
1255 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
1256 RS400_DISP2_STOP_REQ_LEVEL_MASK);
1257 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
1258 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
1259 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
1260 temp = RREG32(RS400_DISP2_REQ_CNTL2);
1261 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
1262 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
1263 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
1264 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
1265 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
1266#endif
1267 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
1268 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
1269 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
1270 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
1271 }
1272
1273 DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
1274 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
1275 }
1276}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
new file mode 100644
index 000000000000..2c2f42de1d4c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -0,0 +1,1288 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "drm_crtc_helper.h"
28#include "radeon_drm.h"
29#include "radeon.h"
30#include "atom.h"
31
32
33static void radeon_legacy_rmx_mode_set(struct drm_encoder *encoder,
34 struct drm_display_mode *mode,
35 struct drm_display_mode *adjusted_mode)
36{
37 struct drm_device *dev = encoder->dev;
38 struct radeon_device *rdev = dev->dev_private;
39 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
40 int xres = mode->hdisplay;
41 int yres = mode->vdisplay;
42 bool hscale = true, vscale = true;
43 int hsync_wid;
44 int vsync_wid;
45 int hsync_start;
46 uint32_t scale, inc;
47 uint32_t fp_horz_stretch, fp_vert_stretch, crtc_more_cntl, fp_horz_vert_active;
48 uint32_t fp_h_sync_strt_wid, fp_v_sync_strt_wid, fp_crtc_h_total_disp, fp_crtc_v_total_disp;
49 struct radeon_native_mode *native_mode = &radeon_encoder->native_mode;
50
51 DRM_DEBUG("\n");
52
53 fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) &
54 (RADEON_VERT_STRETCH_RESERVED |
55 RADEON_VERT_AUTO_RATIO_INC);
56 fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) &
57 (RADEON_HORZ_FP_LOOP_STRETCH |
58 RADEON_HORZ_AUTO_RATIO_INC);
59
60 crtc_more_cntl = 0;
61 if ((rdev->family == CHIP_RS100) ||
62 (rdev->family == CHIP_RS200)) {
63 /* This is to workaround the asic bug for RMX, some versions
64 of BIOS dosen't have this register initialized correctly. */
65 crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN;
66 }
67
68
69 fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff)
70 | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
71
72 hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
73 if (!hsync_wid)
74 hsync_wid = 1;
75 hsync_start = mode->crtc_hsync_start - 8;
76
77 fp_h_sync_strt_wid = ((hsync_start & 0x1fff)
78 | ((hsync_wid & 0x3f) << 16)
79 | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
80 ? RADEON_CRTC_H_SYNC_POL
81 : 0));
82
83 fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff)
84 | ((mode->crtc_vdisplay - 1) << 16));
85
86 vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
87 if (!vsync_wid)
88 vsync_wid = 1;
89
90 fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff)
91 | ((vsync_wid & 0x1f) << 16)
92 | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
93 ? RADEON_CRTC_V_SYNC_POL
94 : 0));
95
96 fp_horz_vert_active = 0;
97
98 if (native_mode->panel_xres == 0 ||
99 native_mode->panel_yres == 0) {
100 hscale = false;
101 vscale = false;
102 } else {
103 if (xres > native_mode->panel_xres)
104 xres = native_mode->panel_xres;
105 if (yres > native_mode->panel_yres)
106 yres = native_mode->panel_yres;
107
108 if (xres == native_mode->panel_xres)
109 hscale = false;
110 if (yres == native_mode->panel_yres)
111 vscale = false;
112 }
113
114 if (radeon_encoder->flags & RADEON_USE_RMX) {
115 if (radeon_encoder->rmx_type != RMX_CENTER) {
116 if (!hscale)
117 fp_horz_stretch |= ((xres/8-1) << 16);
118 else {
119 inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0;
120 scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX)
121 / native_mode->panel_xres + 1;
122 fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) |
123 RADEON_HORZ_STRETCH_BLEND |
124 RADEON_HORZ_STRETCH_ENABLE |
125 ((native_mode->panel_xres/8-1) << 16));
126 }
127
128 if (!vscale)
129 fp_vert_stretch |= ((yres-1) << 12);
130 else {
131 inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0;
132 scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX)
133 / native_mode->panel_yres + 1;
134 fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) |
135 RADEON_VERT_STRETCH_ENABLE |
136 RADEON_VERT_STRETCH_BLEND |
137 ((native_mode->panel_yres-1) << 12));
138 }
139 } else if (radeon_encoder->rmx_type == RMX_CENTER) {
140 int blank_width;
141
142 fp_horz_stretch |= ((xres/8-1) << 16);
143 fp_vert_stretch |= ((yres-1) << 12);
144
145 crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN |
146 RADEON_CRTC_AUTO_VERT_CENTER_EN);
147
148 blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8;
149 if (blank_width > 110)
150 blank_width = 110;
151
152 fp_crtc_h_total_disp = (((blank_width) & 0x3ff)
153 | ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16));
154
155 hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8;
156 if (!hsync_wid)
157 hsync_wid = 1;
158
159 fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff)
160 | ((hsync_wid & 0x3f) << 16)
161 | ((mode->flags & DRM_MODE_FLAG_NHSYNC)
162 ? RADEON_CRTC_H_SYNC_POL
163 : 0));
164
165 fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff)
166 | ((mode->crtc_vdisplay - 1) << 16));
167
168 vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start;
169 if (!vsync_wid)
170 vsync_wid = 1;
171
172 fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff)
173 | ((vsync_wid & 0x1f) << 16)
174 | ((mode->flags & DRM_MODE_FLAG_NVSYNC)
175 ? RADEON_CRTC_V_SYNC_POL
176 : 0)));
177
178 fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) |
179 (((native_mode->panel_xres / 8) & 0x1ff) << 16));
180 }
181 } else {
182 fp_horz_stretch |= ((xres/8-1) << 16);
183 fp_vert_stretch |= ((yres-1) << 12);
184 }
185
186 WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch);
187 WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch);
188 WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl);
189 WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active);
190 WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid);
191 WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid);
192 WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp);
193 WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
194
195}
196
197static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
198{
199 struct drm_device *dev = encoder->dev;
200 struct radeon_device *rdev = dev->dev_private;
201 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
202 uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
203 int panel_pwr_delay = 2000;
204 DRM_DEBUG("\n");
205
206 if (radeon_encoder->enc_priv) {
207 if (rdev->is_atom_bios) {
208 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
209 panel_pwr_delay = lvds->panel_pwr_delay;
210 } else {
211 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
212 panel_pwr_delay = lvds->panel_pwr_delay;
213 }
214 }
215
216 switch (mode) {
217 case DRM_MODE_DPMS_ON:
218 disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN);
219 disp_pwr_man |= RADEON_AUTO_PWRUP_EN;
220 WREG32(RADEON_DISP_PWR_MAN, disp_pwr_man);
221 lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
222 lvds_pll_cntl |= RADEON_LVDS_PLL_EN;
223 WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
224 udelay(1000);
225
226 lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
227 lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
228 WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
229
230 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
231 lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON);
232 lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS);
233 udelay(panel_pwr_delay * 1000);
234 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
235 break;
236 case DRM_MODE_DPMS_STANDBY:
237 case DRM_MODE_DPMS_SUSPEND:
238 case DRM_MODE_DPMS_OFF:
239 pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
240 WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
241 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
242 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
243 lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
244 udelay(panel_pwr_delay * 1000);
245 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
246 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
247 break;
248 }
249
250 if (rdev->is_atom_bios)
251 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
252 else
253 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
254}
255
256static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
257{
258 struct radeon_device *rdev = encoder->dev->dev_private;
259
260 if (rdev->is_atom_bios)
261 radeon_atom_output_lock(encoder, true);
262 else
263 radeon_combios_output_lock(encoder, true);
264 radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF);
265}
266
267static void radeon_legacy_lvds_commit(struct drm_encoder *encoder)
268{
269 struct radeon_device *rdev = encoder->dev->dev_private;
270
271 radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_ON);
272 if (rdev->is_atom_bios)
273 radeon_atom_output_lock(encoder, false);
274 else
275 radeon_combios_output_lock(encoder, false);
276}
277
278static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
279 struct drm_display_mode *mode,
280 struct drm_display_mode *adjusted_mode)
281{
282 struct drm_device *dev = encoder->dev;
283 struct radeon_device *rdev = dev->dev_private;
284 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
285 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
286 uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl;
287
288 DRM_DEBUG("\n");
289
290 if (radeon_crtc->crtc_id == 0)
291 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
292
293 lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
294 lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
295
296 lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
297 if ((!rdev->is_atom_bios)) {
298 struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
299 if (lvds) {
300 DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
301 lvds_gen_cntl = lvds->lvds_gen_cntl;
302 lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
303 (0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
304 lvds_ss_gen_cntl |= ((lvds->panel_digon_delay << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
305 (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
306 } else
307 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
308 } else
309 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
310 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
311 lvds_gen_cntl &= ~(RADEON_LVDS_ON |
312 RADEON_LVDS_BLON |
313 RADEON_LVDS_EN |
314 RADEON_LVDS_RST_FM);
315
316 if (ASIC_IS_R300(rdev))
317 lvds_pll_cntl &= ~(R300_LVDS_SRC_SEL_MASK);
318
319 if (radeon_crtc->crtc_id == 0) {
320 if (ASIC_IS_R300(rdev)) {
321 if (radeon_encoder->flags & RADEON_USE_RMX)
322 lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX;
323 } else
324 lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2;
325 } else {
326 if (ASIC_IS_R300(rdev))
327 lvds_pll_cntl |= R300_LVDS_SRC_SEL_CRTC2;
328 else
329 lvds_gen_cntl |= RADEON_LVDS_SEL_CRTC2;
330 }
331
332 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
333 WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
334 WREG32(RADEON_LVDS_SS_GEN_CNTL, lvds_ss_gen_cntl);
335
336 if (rdev->family == CHIP_RV410)
337 WREG32(RADEON_CLOCK_CNTL_INDEX, 0);
338
339 if (rdev->is_atom_bios)
340 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
341 else
342 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
343}
344
345static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
346 struct drm_display_mode *mode,
347 struct drm_display_mode *adjusted_mode)
348{
349 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
350
351 drm_mode_set_crtcinfo(adjusted_mode, 0);
352
353 radeon_encoder->flags &= ~RADEON_USE_RMX;
354
355 if (radeon_encoder->rmx_type != RMX_OFF)
356 radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
357
358 return true;
359}
360
361static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
362 .dpms = radeon_legacy_lvds_dpms,
363 .mode_fixup = radeon_legacy_lvds_mode_fixup,
364 .prepare = radeon_legacy_lvds_prepare,
365 .mode_set = radeon_legacy_lvds_mode_set,
366 .commit = radeon_legacy_lvds_commit,
367};
368
369
370static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
371 .destroy = radeon_enc_destroy,
372};
373
374static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
375 struct drm_display_mode *mode,
376 struct drm_display_mode *adjusted_mode)
377{
378
379 drm_mode_set_crtcinfo(adjusted_mode, 0);
380
381 return true;
382}
383
384static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
385{
386 struct drm_device *dev = encoder->dev;
387 struct radeon_device *rdev = dev->dev_private;
388 uint32_t crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
389 uint32_t dac_cntl = RREG32(RADEON_DAC_CNTL);
390 uint32_t dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
391
392 DRM_DEBUG("\n");
393
394 switch (mode) {
395 case DRM_MODE_DPMS_ON:
396 crtc_ext_cntl |= RADEON_CRTC_CRT_ON;
397 dac_cntl &= ~RADEON_DAC_PDWN;
398 dac_macro_cntl &= ~(RADEON_DAC_PDWN_R |
399 RADEON_DAC_PDWN_G |
400 RADEON_DAC_PDWN_B);
401 break;
402 case DRM_MODE_DPMS_STANDBY:
403 case DRM_MODE_DPMS_SUSPEND:
404 case DRM_MODE_DPMS_OFF:
405 crtc_ext_cntl &= ~RADEON_CRTC_CRT_ON;
406 dac_cntl |= RADEON_DAC_PDWN;
407 dac_macro_cntl |= (RADEON_DAC_PDWN_R |
408 RADEON_DAC_PDWN_G |
409 RADEON_DAC_PDWN_B);
410 break;
411 }
412
413 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
414 WREG32(RADEON_DAC_CNTL, dac_cntl);
415 WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
416
417 if (rdev->is_atom_bios)
418 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
419 else
420 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
421}
422
423static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
424{
425 struct radeon_device *rdev = encoder->dev->dev_private;
426
427 if (rdev->is_atom_bios)
428 radeon_atom_output_lock(encoder, true);
429 else
430 radeon_combios_output_lock(encoder, true);
431 radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
432}
433
434static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder)
435{
436 struct radeon_device *rdev = encoder->dev->dev_private;
437
438 radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_ON);
439
440 if (rdev->is_atom_bios)
441 radeon_atom_output_lock(encoder, false);
442 else
443 radeon_combios_output_lock(encoder, false);
444}
445
446static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder,
447 struct drm_display_mode *mode,
448 struct drm_display_mode *adjusted_mode)
449{
450 struct drm_device *dev = encoder->dev;
451 struct radeon_device *rdev = dev->dev_private;
452 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
453 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
454 uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl;
455
456 DRM_DEBUG("\n");
457
458 if (radeon_crtc->crtc_id == 0)
459 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
460
461 if (radeon_crtc->crtc_id == 0) {
462 if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
463 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
464 ~(RADEON_DISP_DAC_SOURCE_MASK);
465 WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
466 } else {
467 dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~(RADEON_DAC2_DAC_CLK_SEL);
468 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
469 }
470 } else {
471 if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
472 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) &
473 ~(RADEON_DISP_DAC_SOURCE_MASK);
474 disp_output_cntl |= RADEON_DISP_DAC_SOURCE_CRTC2;
475 WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
476 } else {
477 dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC_CLK_SEL;
478 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
479 }
480 }
481
482 dac_cntl = (RADEON_DAC_MASK_ALL |
483 RADEON_DAC_VGA_ADR_EN |
484 /* TODO 6-bits */
485 RADEON_DAC_8BIT_EN);
486
487 WREG32_P(RADEON_DAC_CNTL,
488 dac_cntl,
489 RADEON_DAC_RANGE_CNTL |
490 RADEON_DAC_BLANKING);
491
492 if (radeon_encoder->enc_priv) {
493 struct radeon_encoder_primary_dac *p_dac = (struct radeon_encoder_primary_dac *)radeon_encoder->enc_priv;
494 dac_macro_cntl = p_dac->ps2_pdac_adj;
495 } else
496 dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
497 dac_macro_cntl |= RADEON_DAC_PDWN_R | RADEON_DAC_PDWN_G | RADEON_DAC_PDWN_B;
498 WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
499
500 if (rdev->is_atom_bios)
501 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
502 else
503 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
504}
505
506static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_encoder *encoder,
507 struct drm_connector *connector)
508{
509 struct drm_device *dev = encoder->dev;
510 struct radeon_device *rdev = dev->dev_private;
511 uint32_t vclk_ecp_cntl, crtc_ext_cntl;
512 uint32_t dac_ext_cntl, dac_cntl, dac_macro_cntl, tmp;
513 enum drm_connector_status found = connector_status_disconnected;
514 bool color = true;
515
516 /* save the regs we need */
517 vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
518 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
519 dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
520 dac_cntl = RREG32(RADEON_DAC_CNTL);
521 dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
522
523 tmp = vclk_ecp_cntl &
524 ~(RADEON_PIXCLK_ALWAYS_ONb | RADEON_PIXCLK_DAC_ALWAYS_ONb);
525 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
526
527 tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
528 WREG32(RADEON_CRTC_EXT_CNTL, tmp);
529
530 tmp = RADEON_DAC_FORCE_BLANK_OFF_EN |
531 RADEON_DAC_FORCE_DATA_EN;
532
533 if (color)
534 tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
535 else
536 tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
537
538 if (ASIC_IS_R300(rdev))
539 tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
540 else
541 tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
542
543 WREG32(RADEON_DAC_EXT_CNTL, tmp);
544
545 tmp = dac_cntl & ~(RADEON_DAC_RANGE_CNTL_MASK | RADEON_DAC_PDWN);
546 tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN;
547 WREG32(RADEON_DAC_CNTL, tmp);
548
549 tmp &= ~(RADEON_DAC_PDWN_R |
550 RADEON_DAC_PDWN_G |
551 RADEON_DAC_PDWN_B);
552
553 WREG32(RADEON_DAC_MACRO_CNTL, tmp);
554
555 udelay(2000);
556
557 if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT)
558 found = connector_status_connected;
559
560 /* restore the regs we used */
561 WREG32(RADEON_DAC_CNTL, dac_cntl);
562 WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
563 WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
564 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
565 WREG32_PLL(RADEON_VCLK_ECP_CNTL, vclk_ecp_cntl);
566
567 return found;
568}
569
570static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
571 .dpms = radeon_legacy_primary_dac_dpms,
572 .mode_fixup = radeon_legacy_primary_dac_mode_fixup,
573 .prepare = radeon_legacy_primary_dac_prepare,
574 .mode_set = radeon_legacy_primary_dac_mode_set,
575 .commit = radeon_legacy_primary_dac_commit,
576 .detect = radeon_legacy_primary_dac_detect,
577};
578
579
580static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
581 .destroy = radeon_enc_destroy,
582};
583
584static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder,
585 struct drm_display_mode *mode,
586 struct drm_display_mode *adjusted_mode)
587{
588
589 drm_mode_set_crtcinfo(adjusted_mode, 0);
590
591 return true;
592}
593
594static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
595{
596 struct drm_device *dev = encoder->dev;
597 struct radeon_device *rdev = dev->dev_private;
598 uint32_t fp_gen_cntl = RREG32(RADEON_FP_GEN_CNTL);
599 DRM_DEBUG("\n");
600
601 switch (mode) {
602 case DRM_MODE_DPMS_ON:
603 fp_gen_cntl |= (RADEON_FP_FPON | RADEON_FP_TMDS_EN);
604 break;
605 case DRM_MODE_DPMS_STANDBY:
606 case DRM_MODE_DPMS_SUSPEND:
607 case DRM_MODE_DPMS_OFF:
608 fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
609 break;
610 }
611
612 WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
613
614 if (rdev->is_atom_bios)
615 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
616 else
617 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
618}
619
620static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
621{
622 struct radeon_device *rdev = encoder->dev->dev_private;
623
624 if (rdev->is_atom_bios)
625 radeon_atom_output_lock(encoder, true);
626 else
627 radeon_combios_output_lock(encoder, true);
628 radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF);
629}
630
631static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder)
632{
633 struct radeon_device *rdev = encoder->dev->dev_private;
634
635 radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_ON);
636
637 if (rdev->is_atom_bios)
638 radeon_atom_output_lock(encoder, true);
639 else
640 radeon_combios_output_lock(encoder, true);
641}
642
643static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
644 struct drm_display_mode *mode,
645 struct drm_display_mode *adjusted_mode)
646{
647 struct drm_device *dev = encoder->dev;
648 struct radeon_device *rdev = dev->dev_private;
649 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
650 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
651 uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl;
652 int i;
653
654 DRM_DEBUG("\n");
655
656 if (radeon_crtc->crtc_id == 0)
657 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
658
659 tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL);
660 tmp &= 0xfffff;
661 if (rdev->family == CHIP_RV280) {
662 /* bit 22 of TMDS_PLL_CNTL is read-back inverted */
663 tmp ^= (1 << 22);
664 tmds_pll_cntl ^= (1 << 22);
665 }
666
667 if (radeon_encoder->enc_priv) {
668 struct radeon_encoder_int_tmds *tmds = (struct radeon_encoder_int_tmds *)radeon_encoder->enc_priv;
669
670 for (i = 0; i < 4; i++) {
671 if (tmds->tmds_pll[i].freq == 0)
672 break;
673 if ((uint32_t)(mode->clock / 10) < tmds->tmds_pll[i].freq) {
674 tmp = tmds->tmds_pll[i].value ;
675 break;
676 }
677 }
678 }
679
680 if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV280)) {
681 if (tmp & 0xfff00000)
682 tmds_pll_cntl = tmp;
683 else {
684 tmds_pll_cntl &= 0xfff00000;
685 tmds_pll_cntl |= tmp;
686 }
687 } else
688 tmds_pll_cntl = tmp;
689
690 tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) &
691 ~(RADEON_TMDS_TRANSMITTER_PLLRST);
692
693 if (rdev->family == CHIP_R200 ||
694 rdev->family == CHIP_R100 ||
695 ASIC_IS_R300(rdev))
696 tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
697 else /* RV chips got this bit reversed */
698 tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
699
700 fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
701 (RADEON_FP_CRTC_DONT_SHADOW_VPAR |
702 RADEON_FP_CRTC_DONT_SHADOW_HEND));
703
704 fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
705
706 if (1) /* FIXME rgbBits == 8 */
707 fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */
708 else
709 fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
710
711 if (radeon_crtc->crtc_id == 0) {
712 if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
713 fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
714 if (radeon_encoder->flags & RADEON_USE_RMX)
715 fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
716 else
717 fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
718 } else
719 fp_gen_cntl |= RADEON_FP_SEL_CRTC1;
720 } else {
721 if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
722 fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
723 fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
724 } else
725 fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
726 }
727
728 WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
729 WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
730 WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
731
732 if (rdev->is_atom_bios)
733 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
734 else
735 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
736}
737
738static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
739 .dpms = radeon_legacy_tmds_int_dpms,
740 .mode_fixup = radeon_legacy_tmds_int_mode_fixup,
741 .prepare = radeon_legacy_tmds_int_prepare,
742 .mode_set = radeon_legacy_tmds_int_mode_set,
743 .commit = radeon_legacy_tmds_int_commit,
744};
745
746
747static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
748 .destroy = radeon_enc_destroy,
749};
750
751static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
752 struct drm_display_mode *mode,
753 struct drm_display_mode *adjusted_mode)
754{
755
756 drm_mode_set_crtcinfo(adjusted_mode, 0);
757
758 return true;
759}
760
761static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
762{
763 struct drm_device *dev = encoder->dev;
764 struct radeon_device *rdev = dev->dev_private;
765 uint32_t fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
766 DRM_DEBUG("\n");
767
768 switch (mode) {
769 case DRM_MODE_DPMS_ON:
770 fp2_gen_cntl &= ~RADEON_FP2_BLANK_EN;
771 fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
772 break;
773 case DRM_MODE_DPMS_STANDBY:
774 case DRM_MODE_DPMS_SUSPEND:
775 case DRM_MODE_DPMS_OFF:
776 fp2_gen_cntl |= RADEON_FP2_BLANK_EN;
777 fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
778 break;
779 }
780
781 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
782
783 if (rdev->is_atom_bios)
784 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
785 else
786 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
787}
788
789static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
790{
791 struct radeon_device *rdev = encoder->dev->dev_private;
792
793 if (rdev->is_atom_bios)
794 radeon_atom_output_lock(encoder, true);
795 else
796 radeon_combios_output_lock(encoder, true);
797 radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF);
798}
799
800static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder)
801{
802 struct radeon_device *rdev = encoder->dev->dev_private;
803 radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_ON);
804
805 if (rdev->is_atom_bios)
806 radeon_atom_output_lock(encoder, false);
807 else
808 radeon_combios_output_lock(encoder, false);
809}
810
811static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
812 struct drm_display_mode *mode,
813 struct drm_display_mode *adjusted_mode)
814{
815 struct drm_device *dev = encoder->dev;
816 struct radeon_device *rdev = dev->dev_private;
817 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
818 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
819 uint32_t fp2_gen_cntl;
820
821 DRM_DEBUG("\n");
822
823 if (radeon_crtc->crtc_id == 0)
824 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
825
826 if (rdev->is_atom_bios) {
827 radeon_encoder->pixel_clock = adjusted_mode->clock;
828 atombios_external_tmds_setup(encoder, ATOM_ENABLE);
829 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
830 } else {
831 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
832
833 if (1) /* FIXME rgbBits == 8 */
834 fp2_gen_cntl |= RADEON_FP2_PANEL_FORMAT; /* 24 bit format, */
835 else
836 fp2_gen_cntl &= ~RADEON_FP2_PANEL_FORMAT;/* 18 bit format, */
837
838 fp2_gen_cntl &= ~(RADEON_FP2_ON |
839 RADEON_FP2_DVO_EN |
840 RADEON_FP2_DVO_RATE_SEL_SDR);
841
842 /* XXX: these are oem specific */
843 if (ASIC_IS_R300(rdev)) {
844 if ((dev->pdev->device == 0x4850) &&
845 (dev->pdev->subsystem_vendor == 0x1028) &&
846 (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */
847 fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE;
848 else
849 fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE;
850
851 /*if (mode->clock > 165000)
852 fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
853 }
854 }
855
856 if (radeon_crtc->crtc_id == 0) {
857 if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
858 fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
859 if (radeon_encoder->flags & RADEON_USE_RMX)
860 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX;
861 else
862 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1;
863 } else
864 fp2_gen_cntl &= ~RADEON_FP2_SRC_SEL_CRTC2;
865 } else {
866 if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) {
867 fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK;
868 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
869 } else
870 fp2_gen_cntl |= RADEON_FP2_SRC_SEL_CRTC2;
871 }
872
873 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
874
875 if (rdev->is_atom_bios)
876 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
877 else
878 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
879}
880
881static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
882 .dpms = radeon_legacy_tmds_ext_dpms,
883 .mode_fixup = radeon_legacy_tmds_ext_mode_fixup,
884 .prepare = radeon_legacy_tmds_ext_prepare,
885 .mode_set = radeon_legacy_tmds_ext_mode_set,
886 .commit = radeon_legacy_tmds_ext_commit,
887};
888
889
890static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
891 .destroy = radeon_enc_destroy,
892};
893
894static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
895 struct drm_display_mode *mode,
896 struct drm_display_mode *adjusted_mode)
897{
898
899 drm_mode_set_crtcinfo(adjusted_mode, 0);
900
901 return true;
902}
903
904static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
905{
906 struct drm_device *dev = encoder->dev;
907 struct radeon_device *rdev = dev->dev_private;
908 uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0;
909 /* uint32_t tv_master_cntl = 0; */
910
911 DRM_DEBUG("\n");
912
913 if (rdev->family == CHIP_R200)
914 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
915 else {
916 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
917 /* FIXME TV */
918 /* tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL); */
919 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
920 }
921
922 switch (mode) {
923 case DRM_MODE_DPMS_ON:
924 if (rdev->family == CHIP_R200) {
925 fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN);
926 } else {
927 crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON;
928 /* tv_master_cntl |= RADEON_TV_ON; */
929 if (rdev->family == CHIP_R420 ||
930 rdev->family == CHIP_R423 ||
931 rdev->family == CHIP_RV410)
932 tv_dac_cntl &= ~(R420_TV_DAC_RDACPD |
933 R420_TV_DAC_GDACPD |
934 R420_TV_DAC_BDACPD |
935 RADEON_TV_DAC_BGSLEEP);
936 else
937 tv_dac_cntl &= ~(RADEON_TV_DAC_RDACPD |
938 RADEON_TV_DAC_GDACPD |
939 RADEON_TV_DAC_BDACPD |
940 RADEON_TV_DAC_BGSLEEP);
941 }
942 break;
943 case DRM_MODE_DPMS_STANDBY:
944 case DRM_MODE_DPMS_SUSPEND:
945 case DRM_MODE_DPMS_OFF:
946 if (rdev->family == CHIP_R200)
947 fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN);
948 else {
949 crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON;
950 /* tv_master_cntl &= ~RADEON_TV_ON; */
951 if (rdev->family == CHIP_R420 ||
952 rdev->family == CHIP_R423 ||
953 rdev->family == CHIP_RV410)
954 tv_dac_cntl |= (R420_TV_DAC_RDACPD |
955 R420_TV_DAC_GDACPD |
956 R420_TV_DAC_BDACPD |
957 RADEON_TV_DAC_BGSLEEP);
958 else
959 tv_dac_cntl |= (RADEON_TV_DAC_RDACPD |
960 RADEON_TV_DAC_GDACPD |
961 RADEON_TV_DAC_BDACPD |
962 RADEON_TV_DAC_BGSLEEP);
963 }
964 break;
965 }
966
967 if (rdev->family == CHIP_R200) {
968 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
969 } else {
970 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
971 /* WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); */
972 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
973 }
974
975 if (rdev->is_atom_bios)
976 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
977 else
978 radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
979}
980
981static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
982{
983 struct radeon_device *rdev = encoder->dev->dev_private;
984
985 if (rdev->is_atom_bios)
986 radeon_atom_output_lock(encoder, true);
987 else
988 radeon_combios_output_lock(encoder, true);
989 radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF);
990}
991
992static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder)
993{
994 struct radeon_device *rdev = encoder->dev->dev_private;
995
996 radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_ON);
997
998 if (rdev->is_atom_bios)
999 radeon_atom_output_lock(encoder, true);
1000 else
1001 radeon_combios_output_lock(encoder, true);
1002}
1003
1004static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
1005 struct drm_display_mode *mode,
1006 struct drm_display_mode *adjusted_mode)
1007{
1008 struct drm_device *dev = encoder->dev;
1009 struct radeon_device *rdev = dev->dev_private;
1010 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1011 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1012 uint32_t tv_dac_cntl, gpiopad_a = 0, dac2_cntl, disp_output_cntl = 0;
1013 uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0;
1014
1015 DRM_DEBUG("\n");
1016
1017 if (radeon_crtc->crtc_id == 0)
1018 radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode);
1019
1020 if (rdev->family != CHIP_R200) {
1021 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
1022 if (rdev->family == CHIP_R420 ||
1023 rdev->family == CHIP_R423 ||
1024 rdev->family == CHIP_RV410) {
1025 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
1026 RADEON_TV_DAC_BGADJ_MASK |
1027 R420_TV_DAC_DACADJ_MASK |
1028 R420_TV_DAC_RDACPD |
1029 R420_TV_DAC_GDACPD |
1030 R420_TV_DAC_GDACPD |
1031 R420_TV_DAC_TVENABLE);
1032 } else {
1033 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
1034 RADEON_TV_DAC_BGADJ_MASK |
1035 RADEON_TV_DAC_DACADJ_MASK |
1036 RADEON_TV_DAC_RDACPD |
1037 RADEON_TV_DAC_GDACPD |
1038 RADEON_TV_DAC_GDACPD);
1039 }
1040
1041 /* FIXME TV */
1042 if (radeon_encoder->enc_priv) {
1043 struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
1044 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
1045 RADEON_TV_DAC_NHOLD |
1046 RADEON_TV_DAC_STD_PS2 |
1047 tv_dac->ps2_tvdac_adj);
1048 } else
1049 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
1050 RADEON_TV_DAC_NHOLD |
1051 RADEON_TV_DAC_STD_PS2);
1052
1053 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
1054 }
1055
1056 if (ASIC_IS_R300(rdev)) {
1057 gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1;
1058 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
1059 } else if (rdev->family == CHIP_R200)
1060 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
1061 else
1062 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
1063
1064 dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL;
1065
1066 if (radeon_crtc->crtc_id == 0) {
1067 if (ASIC_IS_R300(rdev)) {
1068 disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
1069 disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC;
1070 } else if (rdev->family == CHIP_R200) {
1071 fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
1072 RADEON_FP2_DVO_RATE_SEL_SDR);
1073 } else
1074 disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
1075 } else {
1076 if (ASIC_IS_R300(rdev)) {
1077 disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK;
1078 disp_output_cntl |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
1079 } else if (rdev->family == CHIP_R200) {
1080 fp2_gen_cntl &= ~(R200_FP2_SOURCE_SEL_MASK |
1081 RADEON_FP2_DVO_RATE_SEL_SDR);
1082 fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC2;
1083 } else
1084 disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL;
1085 }
1086
1087 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
1088
1089 if (ASIC_IS_R300(rdev)) {
1090 WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
1091 WREG32(RADEON_DISP_TV_OUT_CNTL, disp_output_cntl);
1092 } else if (rdev->family == CHIP_R200)
1093 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
1094 else
1095 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
1096
1097 if (rdev->is_atom_bios)
1098 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
1099 else
1100 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
1101
1102}
1103
1104static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder,
1105 struct drm_connector *connector)
1106{
1107 struct drm_device *dev = encoder->dev;
1108 struct radeon_device *rdev = dev->dev_private;
1109 uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
1110 uint32_t disp_hw_debug, disp_output_cntl, gpiopad_a, pixclks_cntl, tmp;
1111 enum drm_connector_status found = connector_status_disconnected;
1112 bool color = true;
1113
1114 /* FIXME tv */
1115
1116 /* save the regs we need */
1117 pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
1118 gpiopad_a = ASIC_IS_R300(rdev) ? RREG32(RADEON_GPIOPAD_A) : 0;
1119 disp_output_cntl = ASIC_IS_R300(rdev) ? RREG32(RADEON_DISP_OUTPUT_CNTL) : 0;
1120 disp_hw_debug = ASIC_IS_R300(rdev) ? 0 : RREG32(RADEON_DISP_HW_DEBUG);
1121 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1122 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
1123 dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
1124 dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
1125
1126 tmp = pixclks_cntl & ~(RADEON_PIX2CLK_ALWAYS_ONb
1127 | RADEON_PIX2CLK_DAC_ALWAYS_ONb);
1128 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
1129
1130 if (ASIC_IS_R300(rdev))
1131 WREG32_P(RADEON_GPIOPAD_A, 1, ~1);
1132
1133 tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
1134 tmp |= RADEON_CRTC2_CRT2_ON |
1135 (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
1136
1137 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
1138
1139 if (ASIC_IS_R300(rdev)) {
1140 tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
1141 tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
1142 WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
1143 } else {
1144 tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL;
1145 WREG32(RADEON_DISP_HW_DEBUG, tmp);
1146 }
1147
1148 tmp = RADEON_TV_DAC_NBLANK |
1149 RADEON_TV_DAC_NHOLD |
1150 RADEON_TV_MONITOR_DETECT_EN |
1151 RADEON_TV_DAC_STD_PS2;
1152
1153 WREG32(RADEON_TV_DAC_CNTL, tmp);
1154
1155 tmp = RADEON_DAC2_FORCE_BLANK_OFF_EN |
1156 RADEON_DAC2_FORCE_DATA_EN;
1157
1158 if (color)
1159 tmp |= RADEON_DAC_FORCE_DATA_SEL_RGB;
1160 else
1161 tmp |= RADEON_DAC_FORCE_DATA_SEL_G;
1162
1163 if (ASIC_IS_R300(rdev))
1164 tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
1165 else
1166 tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
1167
1168 WREG32(RADEON_DAC_EXT_CNTL, tmp);
1169
1170 tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN;
1171 WREG32(RADEON_DAC_CNTL2, tmp);
1172
1173 udelay(10000);
1174
1175 if (ASIC_IS_R300(rdev)) {
1176 if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B)
1177 found = connector_status_connected;
1178 } else {
1179 if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUTPUT)
1180 found = connector_status_connected;
1181 }
1182
1183 /* restore regs we used */
1184 WREG32(RADEON_DAC_CNTL2, dac_cntl2);
1185 WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
1186 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
1187 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
1188
1189 if (ASIC_IS_R300(rdev)) {
1190 WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
1191 WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
1192 } else {
1193 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
1194 }
1195 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
1196
1197 /* return found; */
1198 return connector_status_disconnected;
1199
1200}
1201
1202static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
1203 .dpms = radeon_legacy_tv_dac_dpms,
1204 .mode_fixup = radeon_legacy_tv_dac_mode_fixup,
1205 .prepare = radeon_legacy_tv_dac_prepare,
1206 .mode_set = radeon_legacy_tv_dac_mode_set,
1207 .commit = radeon_legacy_tv_dac_commit,
1208 .detect = radeon_legacy_tv_dac_detect,
1209};
1210
1211
1212static const struct drm_encoder_funcs radeon_legacy_tv_dac_enc_funcs = {
1213 .destroy = radeon_enc_destroy,
1214};
1215
1216void
1217radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
1218{
1219 struct radeon_device *rdev = dev->dev_private;
1220 struct drm_encoder *encoder;
1221 struct radeon_encoder *radeon_encoder;
1222
1223 /* see if we already added it */
1224 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1225 radeon_encoder = to_radeon_encoder(encoder);
1226 if (radeon_encoder->encoder_id == encoder_id) {
1227 radeon_encoder->devices |= supported_device;
1228 return;
1229 }
1230
1231 }
1232
1233 /* add a new one */
1234 radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
1235 if (!radeon_encoder)
1236 return;
1237
1238 encoder = &radeon_encoder->base;
1239 encoder->possible_crtcs = 0x3;
1240 encoder->possible_clones = 0;
1241
1242 radeon_encoder->enc_priv = NULL;
1243
1244 radeon_encoder->encoder_id = encoder_id;
1245 radeon_encoder->devices = supported_device;
1246
1247 switch (radeon_encoder->encoder_id) {
1248 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1249 drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS);
1250 drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs);
1251 if (rdev->is_atom_bios)
1252 radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
1253 else
1254 radeon_encoder->enc_priv = radeon_combios_get_lvds_info(radeon_encoder);
1255 radeon_encoder->rmx_type = RMX_FULL;
1256 break;
1257 case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
1258 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS);
1259 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs);
1260 if (rdev->is_atom_bios)
1261 radeon_encoder->enc_priv = radeon_atombios_get_tmds_info(radeon_encoder);
1262 else
1263 radeon_encoder->enc_priv = radeon_combios_get_tmds_info(radeon_encoder);
1264 break;
1265 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1266 drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC);
1267 drm_encoder_helper_add(encoder, &radeon_legacy_primary_dac_helper_funcs);
1268 if (rdev->is_atom_bios)
1269 radeon_encoder->enc_priv = radeon_atombios_get_primary_dac_info(radeon_encoder);
1270 else
1271 radeon_encoder->enc_priv = radeon_combios_get_primary_dac_info(radeon_encoder);
1272 break;
1273 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1274 drm_encoder_init(dev, encoder, &radeon_legacy_tv_dac_enc_funcs, DRM_MODE_ENCODER_TVDAC);
1275 drm_encoder_helper_add(encoder, &radeon_legacy_tv_dac_helper_funcs);
1276 if (rdev->is_atom_bios)
1277 radeon_encoder->enc_priv = radeon_atombios_get_tv_dac_info(radeon_encoder);
1278 else
1279 radeon_encoder->enc_priv = radeon_combios_get_tv_dac_info(radeon_encoder);
1280 break;
1281 case ENCODER_OBJECT_ID_INTERNAL_DVO1:
1282 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
1283 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
1284 if (!rdev->is_atom_bios)
1285 radeon_combios_get_ext_tmds_info(radeon_encoder);
1286 break;
1287 }
1288}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
new file mode 100644
index 000000000000..9173b687462b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -0,0 +1,398 @@
1/*
2 * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
3 * VA Linux Systems Inc., Fremont, California.
4 * Copyright 2008 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Original Authors:
25 * Kevin E. Martin, Rickard E. Faith, Alan Hourihane
26 *
27 * Kernel port Author: Dave Airlie
28 */
29
30#ifndef RADEON_MODE_H
31#define RADEON_MODE_H
32
33#include <drm_crtc.h>
34#include <drm_mode.h>
35#include <drm_edid.h>
36#include <linux/i2c.h>
37#include <linux/i2c-id.h>
38#include <linux/i2c-algo-bit.h>
39
40#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base)
41#define to_radeon_connector(x) container_of(x, struct radeon_connector, base)
42#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
43#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
44
45enum radeon_connector_type {
46 CONNECTOR_NONE,
47 CONNECTOR_VGA,
48 CONNECTOR_DVI_I,
49 CONNECTOR_DVI_D,
50 CONNECTOR_DVI_A,
51 CONNECTOR_STV,
52 CONNECTOR_CTV,
53 CONNECTOR_LVDS,
54 CONNECTOR_DIGITAL,
55 CONNECTOR_SCART,
56 CONNECTOR_HDMI_TYPE_A,
57 CONNECTOR_HDMI_TYPE_B,
58 CONNECTOR_0XC,
59 CONNECTOR_0XD,
60 CONNECTOR_DIN,
61 CONNECTOR_DISPLAY_PORT,
62 CONNECTOR_UNSUPPORTED
63};
64
65enum radeon_dvi_type {
66 DVI_AUTO,
67 DVI_DIGITAL,
68 DVI_ANALOG
69};
70
71enum radeon_rmx_type {
72 RMX_OFF,
73 RMX_FULL,
74 RMX_CENTER,
75 RMX_ASPECT
76};
77
78enum radeon_tv_std {
79 TV_STD_NTSC,
80 TV_STD_PAL,
81 TV_STD_PAL_M,
82 TV_STD_PAL_60,
83 TV_STD_NTSC_J,
84 TV_STD_SCART_PAL,
85 TV_STD_SECAM,
86 TV_STD_PAL_CN,
87};
88
89struct radeon_i2c_bus_rec {
90 bool valid;
91 uint32_t mask_clk_reg;
92 uint32_t mask_data_reg;
93 uint32_t a_clk_reg;
94 uint32_t a_data_reg;
95 uint32_t put_clk_reg;
96 uint32_t put_data_reg;
97 uint32_t get_clk_reg;
98 uint32_t get_data_reg;
99 uint32_t mask_clk_mask;
100 uint32_t mask_data_mask;
101 uint32_t put_clk_mask;
102 uint32_t put_data_mask;
103 uint32_t get_clk_mask;
104 uint32_t get_data_mask;
105 uint32_t a_clk_mask;
106 uint32_t a_data_mask;
107};
108
109struct radeon_tmds_pll {
110 uint32_t freq;
111 uint32_t value;
112};
113
114#define RADEON_MAX_BIOS_CONNECTOR 16
115
116#define RADEON_PLL_USE_BIOS_DIVS (1 << 0)
117#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1)
118#define RADEON_PLL_USE_REF_DIV (1 << 2)
119#define RADEON_PLL_LEGACY (1 << 3)
120#define RADEON_PLL_PREFER_LOW_REF_DIV (1 << 4)
121#define RADEON_PLL_PREFER_HIGH_REF_DIV (1 << 5)
122#define RADEON_PLL_PREFER_LOW_FB_DIV (1 << 6)
123#define RADEON_PLL_PREFER_HIGH_FB_DIV (1 << 7)
124#define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8)
125#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
126#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
127
128struct radeon_pll {
129 uint16_t reference_freq;
130 uint16_t reference_div;
131 uint32_t pll_in_min;
132 uint32_t pll_in_max;
133 uint32_t pll_out_min;
134 uint32_t pll_out_max;
135 uint16_t xclk;
136
137 uint32_t min_ref_div;
138 uint32_t max_ref_div;
139 uint32_t min_post_div;
140 uint32_t max_post_div;
141 uint32_t min_feedback_div;
142 uint32_t max_feedback_div;
143 uint32_t min_frac_feedback_div;
144 uint32_t max_frac_feedback_div;
145 uint32_t best_vco;
146};
147
148struct radeon_i2c_chan {
149 struct drm_device *dev;
150 struct i2c_adapter adapter;
151 struct i2c_algo_bit_data algo;
152 struct radeon_i2c_bus_rec rec;
153};
154
155/* mostly for macs, but really any system without connector tables */
156enum radeon_connector_table {
157 CT_NONE,
158 CT_GENERIC,
159 CT_IBOOK,
160 CT_POWERBOOK_EXTERNAL,
161 CT_POWERBOOK_INTERNAL,
162 CT_POWERBOOK_VGA,
163 CT_MINI_EXTERNAL,
164 CT_MINI_INTERNAL,
165 CT_IMAC_G5_ISIGHT,
166 CT_EMAC,
167};
168
169struct radeon_mode_info {
170 struct atom_context *atom_context;
171 enum radeon_connector_table connector_table;
172 bool mode_config_initialized;
173};
174
175struct radeon_crtc {
176 struct drm_crtc base;
177 int crtc_id;
178 u16 lut_r[256], lut_g[256], lut_b[256];
179 bool enabled;
180 bool can_tile;
181 uint32_t crtc_offset;
182 struct radeon_framebuffer *fbdev_fb;
183 struct drm_mode_set mode_set;
184 struct drm_gem_object *cursor_bo;
185 uint64_t cursor_addr;
186 int cursor_width;
187 int cursor_height;
188};
189
190#define RADEON_USE_RMX 1
191
192struct radeon_native_mode {
193 /* preferred mode */
194 uint32_t panel_xres, panel_yres;
195 uint32_t hoverplus, hsync_width;
196 uint32_t hblank;
197 uint32_t voverplus, vsync_width;
198 uint32_t vblank;
199 uint32_t dotclock;
200 uint32_t flags;
201};
202
203struct radeon_encoder_primary_dac {
204 /* legacy primary dac */
205 uint32_t ps2_pdac_adj;
206};
207
208struct radeon_encoder_lvds {
209 /* legacy lvds */
210 uint16_t panel_vcc_delay;
211 uint8_t panel_pwr_delay;
212 uint8_t panel_digon_delay;
213 uint8_t panel_blon_delay;
214 uint16_t panel_ref_divider;
215 uint8_t panel_post_divider;
216 uint16_t panel_fb_divider;
217 bool use_bios_dividers;
218 uint32_t lvds_gen_cntl;
219 /* panel mode */
220 struct radeon_native_mode native_mode;
221};
222
223struct radeon_encoder_tv_dac {
224 /* legacy tv dac */
225 uint32_t ps2_tvdac_adj;
226 uint32_t ntsc_tvdac_adj;
227 uint32_t pal_tvdac_adj;
228
229 enum radeon_tv_std tv_std;
230};
231
232struct radeon_encoder_int_tmds {
233 /* legacy int tmds */
234 struct radeon_tmds_pll tmds_pll[4];
235};
236
237struct radeon_encoder_atom_dig {
238 /* atom dig */
239 bool coherent_mode;
240 int dig_block;
241 /* atom lvds */
242 uint32_t lvds_misc;
243 uint16_t panel_pwr_delay;
244 /* panel mode */
245 struct radeon_native_mode native_mode;
246};
247
248struct radeon_encoder {
249 struct drm_encoder base;
250 uint32_t encoder_id;
251 uint32_t devices;
252 uint32_t flags;
253 uint32_t pixel_clock;
254 enum radeon_rmx_type rmx_type;
255 struct radeon_native_mode native_mode;
256 void *enc_priv;
257};
258
259struct radeon_connector_atom_dig {
260 uint32_t igp_lane_info;
261 bool linkb;
262};
263
264struct radeon_connector {
265 struct drm_connector base;
266 uint32_t connector_id;
267 uint32_t devices;
268 struct radeon_i2c_chan *ddc_bus;
269 int use_digital;
270 void *con_priv;
271};
272
273struct radeon_framebuffer {
274 struct drm_framebuffer base;
275 struct drm_gem_object *obj;
276};
277
278extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
279 struct radeon_i2c_bus_rec *rec,
280 const char *name);
281extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
282extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
283extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
284
285extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
286
287extern void radeon_compute_pll(struct radeon_pll *pll,
288 uint64_t freq,
289 uint32_t *dot_clock_p,
290 uint32_t *fb_div_p,
291 uint32_t *frac_fb_div_p,
292 uint32_t *ref_div_p,
293 uint32_t *post_div_p,
294 int flags);
295
296struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
297struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
298struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
299struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
300struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
301extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
302extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
303
304extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
305extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
306 struct drm_framebuffer *old_fb);
307extern int atombios_crtc_mode_set(struct drm_crtc *crtc,
308 struct drm_display_mode *mode,
309 struct drm_display_mode *adjusted_mode,
310 int x, int y,
311 struct drm_framebuffer *old_fb);
312extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
313
314extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
315 struct drm_framebuffer *old_fb);
316extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
317
318extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
319 struct drm_file *file_priv,
320 uint32_t handle,
321 uint32_t width,
322 uint32_t height);
323extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
324 int x, int y);
325
326extern bool radeon_atom_get_clock_info(struct drm_device *dev);
327extern bool radeon_combios_get_clock_info(struct drm_device *dev);
328extern struct radeon_encoder_atom_dig *
329radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
330extern struct radeon_encoder_int_tmds *
331radeon_atombios_get_tmds_info(struct radeon_encoder *encoder);
332extern struct radeon_encoder_primary_dac *
333radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
334extern struct radeon_encoder_tv_dac *
335radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder);
336extern struct radeon_encoder_lvds *
337radeon_combios_get_lvds_info(struct radeon_encoder *encoder);
338extern struct radeon_encoder_int_tmds *
339radeon_combios_get_tmds_info(struct radeon_encoder *encoder);
340extern void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder);
341extern struct radeon_encoder_tv_dac *
342radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
343extern struct radeon_encoder_primary_dac *
344radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
345extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
346extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
347extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
348extern void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev);
349extern void
350radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
351extern void
352radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
353extern void
354radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
355extern void
356radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
357extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
358 u16 blue, int regno);
359struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
360 struct drm_mode_fb_cmd *mode_cmd,
361 struct drm_gem_object *obj);
362
363int radeonfb_probe(struct drm_device *dev);
364
365int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
366bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev);
367bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev);
368void radeon_atombios_init_crtc(struct drm_device *dev,
369 struct radeon_crtc *radeon_crtc);
370void radeon_legacy_init_crtc(struct drm_device *dev,
371 struct radeon_crtc *radeon_crtc);
372void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state);
373
374void radeon_get_clock_info(struct drm_device *dev);
375
376extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
377extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
378
379void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
380 struct drm_display_mode *mode,
381 struct drm_display_mode *adjusted_mode);
382void radeon_enc_destroy(struct drm_encoder *encoder);
383void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
384void radeon_combios_asic_init(struct drm_device *dev);
385extern int radeon_static_clocks_init(struct drm_device *dev);
386void radeon_init_disp_bw_legacy(struct drm_device *dev,
387 struct drm_display_mode *mode1,
388 uint32_t pixel_bytes1,
389 struct drm_display_mode *mode2,
390 uint32_t pixel_bytes2);
391void radeon_init_disp_bw_avivo(struct drm_device *dev,
392 struct drm_display_mode *mode1,
393 uint32_t pixel_bytes1,
394 struct drm_display_mode *mode2,
395 uint32_t pixel_bytes2);
396void radeon_init_disp_bandwidth(struct drm_device *dev);
397
398#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
new file mode 100644
index 000000000000..983e8df5e000
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -0,0 +1,511 @@
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <linux/list.h>
33#include <drm/drmP.h>
34#include "radeon_drm.h"
35#include "radeon.h"
36
37struct radeon_object {
38 struct ttm_buffer_object tobj;
39 struct list_head list;
40 struct radeon_device *rdev;
41 struct drm_gem_object *gobj;
42 struct ttm_bo_kmap_obj kmap;
43 unsigned pin_count;
44 uint64_t gpu_addr;
45 void *kptr;
46 bool is_iomem;
47};
48
49int radeon_ttm_init(struct radeon_device *rdev);
50void radeon_ttm_fini(struct radeon_device *rdev);
51
52/*
53 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
54 * function are calling it.
55 */
56
57static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
58{
59 return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
60}
61
62static void radeon_object_unreserve(struct radeon_object *robj)
63{
64 ttm_bo_unreserve(&robj->tobj);
65}
66
67static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
68{
69 struct radeon_object *robj;
70
71 robj = container_of(tobj, struct radeon_object, tobj);
72 list_del_init(&robj->list);
73 kfree(robj);
74}
75
76static inline void radeon_object_gpu_addr(struct radeon_object *robj)
77{
78 /* Default gpu address */
79 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
80 if (robj->tobj.mem.mm_node == NULL) {
81 return;
82 }
83 robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
84 switch (robj->tobj.mem.mem_type) {
85 case TTM_PL_VRAM:
86 robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
87 break;
88 case TTM_PL_TT:
89 robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
90 break;
91 default:
92 DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
93 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
94 return;
95 }
96}
97
98static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
99{
100 uint32_t flags = 0;
101 if (domain & RADEON_GEM_DOMAIN_VRAM) {
102 flags |= TTM_PL_FLAG_VRAM;
103 }
104 if (domain & RADEON_GEM_DOMAIN_GTT) {
105 flags |= TTM_PL_FLAG_TT;
106 }
107 if (domain & RADEON_GEM_DOMAIN_CPU) {
108 flags |= TTM_PL_FLAG_SYSTEM;
109 }
110 if (!flags) {
111 flags |= TTM_PL_FLAG_SYSTEM;
112 }
113 return flags;
114}
115
116int radeon_object_create(struct radeon_device *rdev,
117 struct drm_gem_object *gobj,
118 unsigned long size,
119 bool kernel,
120 uint32_t domain,
121 bool interruptible,
122 struct radeon_object **robj_ptr)
123{
124 struct radeon_object *robj;
125 enum ttm_bo_type type;
126 uint32_t flags;
127 int r;
128
129 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
130 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
131 }
132 if (kernel) {
133 type = ttm_bo_type_kernel;
134 } else {
135 type = ttm_bo_type_device;
136 }
137 *robj_ptr = NULL;
138 robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
139 if (robj == NULL) {
140 return -ENOMEM;
141 }
142 robj->rdev = rdev;
143 robj->gobj = gobj;
144 INIT_LIST_HEAD(&robj->list);
145
146 flags = radeon_object_flags_from_domain(domain);
147 r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
148 0, 0, false, NULL, size,
149 &radeon_ttm_object_object_destroy);
150 if (unlikely(r != 0)) {
151 /* ttm call radeon_ttm_object_object_destroy if error happen */
152 DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
153 size, flags, 0);
154 return r;
155 }
156 *robj_ptr = robj;
157 if (gobj) {
158 list_add_tail(&robj->list, &rdev->gem.objects);
159 }
160 return 0;
161}
162
163int radeon_object_kmap(struct radeon_object *robj, void **ptr)
164{
165 int r;
166
167 spin_lock(&robj->tobj.lock);
168 if (robj->kptr) {
169 if (ptr) {
170 *ptr = robj->kptr;
171 }
172 spin_unlock(&robj->tobj.lock);
173 return 0;
174 }
175 spin_unlock(&robj->tobj.lock);
176 r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
177 if (r) {
178 return r;
179 }
180 spin_lock(&robj->tobj.lock);
181 robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
182 spin_unlock(&robj->tobj.lock);
183 if (ptr) {
184 *ptr = robj->kptr;
185 }
186 return 0;
187}
188
189void radeon_object_kunmap(struct radeon_object *robj)
190{
191 spin_lock(&robj->tobj.lock);
192 if (robj->kptr == NULL) {
193 spin_unlock(&robj->tobj.lock);
194 return;
195 }
196 robj->kptr = NULL;
197 spin_unlock(&robj->tobj.lock);
198 ttm_bo_kunmap(&robj->kmap);
199}
200
201void radeon_object_unref(struct radeon_object **robj)
202{
203 struct ttm_buffer_object *tobj;
204
205 if ((*robj) == NULL) {
206 return;
207 }
208 tobj = &((*robj)->tobj);
209 ttm_bo_unref(&tobj);
210 if (tobj == NULL) {
211 *robj = NULL;
212 }
213}
214
215int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
216{
217 *offset = robj->tobj.addr_space_offset;
218 return 0;
219}
220
221int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
222 uint64_t *gpu_addr)
223{
224 uint32_t flags;
225 uint32_t tmp;
226 void *fbptr;
227 int r;
228
229 flags = radeon_object_flags_from_domain(domain);
230 spin_lock(&robj->tobj.lock);
231 if (robj->pin_count) {
232 robj->pin_count++;
233 if (gpu_addr != NULL) {
234 *gpu_addr = robj->gpu_addr;
235 }
236 spin_unlock(&robj->tobj.lock);
237 return 0;
238 }
239 spin_unlock(&robj->tobj.lock);
240 r = radeon_object_reserve(robj, false);
241 if (unlikely(r != 0)) {
242 DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
243 return r;
244 }
245 if (robj->rdev->fbdev_robj == robj) {
246 mutex_lock(&robj->rdev->fbdev_info->lock);
247 radeon_object_kunmap(robj);
248 }
249 tmp = robj->tobj.mem.placement;
250 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
251 robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
252 r = ttm_buffer_object_validate(&robj->tobj,
253 robj->tobj.proposed_placement,
254 false, false);
255 radeon_object_gpu_addr(robj);
256 if (gpu_addr != NULL) {
257 *gpu_addr = robj->gpu_addr;
258 }
259 robj->pin_count = 1;
260 if (unlikely(r != 0)) {
261 DRM_ERROR("radeon: failed to pin object.\n");
262 }
263 radeon_object_unreserve(robj);
264 if (robj->rdev->fbdev_robj == robj) {
265 if (!r) {
266 r = radeon_object_kmap(robj, &fbptr);
267 }
268 if (!r) {
269 robj->rdev->fbdev_info->screen_base = fbptr;
270 robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
271 }
272 mutex_unlock(&robj->rdev->fbdev_info->lock);
273 }
274 return r;
275}
276
277void radeon_object_unpin(struct radeon_object *robj)
278{
279 uint32_t flags;
280 void *fbptr;
281 int r;
282
283 spin_lock(&robj->tobj.lock);
284 if (!robj->pin_count) {
285 spin_unlock(&robj->tobj.lock);
286 printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
287 return;
288 }
289 robj->pin_count--;
290 if (robj->pin_count) {
291 spin_unlock(&robj->tobj.lock);
292 return;
293 }
294 spin_unlock(&robj->tobj.lock);
295 r = radeon_object_reserve(robj, false);
296 if (unlikely(r != 0)) {
297 DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
298 return;
299 }
300 if (robj->rdev->fbdev_robj == robj) {
301 mutex_lock(&robj->rdev->fbdev_info->lock);
302 radeon_object_kunmap(robj);
303 }
304 flags = robj->tobj.mem.placement;
305 robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
306 r = ttm_buffer_object_validate(&robj->tobj,
307 robj->tobj.proposed_placement,
308 false, false);
309 if (unlikely(r != 0)) {
310 DRM_ERROR("radeon: failed to unpin buffer.\n");
311 }
312 radeon_object_unreserve(robj);
313 if (robj->rdev->fbdev_robj == robj) {
314 if (!r) {
315 r = radeon_object_kmap(robj, &fbptr);
316 }
317 if (!r) {
318 robj->rdev->fbdev_info->screen_base = fbptr;
319 robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
320 }
321 mutex_unlock(&robj->rdev->fbdev_info->lock);
322 }
323}
324
325int radeon_object_wait(struct radeon_object *robj)
326{
327 int r = 0;
328
329 /* FIXME: should use block reservation instead */
330 r = radeon_object_reserve(robj, true);
331 if (unlikely(r != 0)) {
332 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
333 return r;
334 }
335 spin_lock(&robj->tobj.lock);
336 if (robj->tobj.sync_obj) {
337 r = ttm_bo_wait(&robj->tobj, true, false, false);
338 }
339 spin_unlock(&robj->tobj.lock);
340 radeon_object_unreserve(robj);
341 return r;
342}
343
344int radeon_object_evict_vram(struct radeon_device *rdev)
345{
346 if (rdev->flags & RADEON_IS_IGP) {
347 /* Useless to evict on IGP chips */
348 return 0;
349 }
350 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
351}
352
353void radeon_object_force_delete(struct radeon_device *rdev)
354{
355 struct radeon_object *robj, *n;
356 struct drm_gem_object *gobj;
357
358 if (list_empty(&rdev->gem.objects)) {
359 return;
360 }
361 DRM_ERROR("Userspace still has active objects !\n");
362 list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
363 mutex_lock(&rdev->ddev->struct_mutex);
364 gobj = robj->gobj;
365 DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
366 gobj, robj, (unsigned long)gobj->size,
367 *((unsigned long *)&gobj->refcount));
368 list_del_init(&robj->list);
369 radeon_object_unref(&robj);
370 gobj->driver_private = NULL;
371 drm_gem_object_unreference(gobj);
372 mutex_unlock(&rdev->ddev->struct_mutex);
373 }
374}
375
376int radeon_object_init(struct radeon_device *rdev)
377{
378 return radeon_ttm_init(rdev);
379}
380
381void radeon_object_fini(struct radeon_device *rdev)
382{
383 radeon_ttm_fini(rdev);
384}
385
386void radeon_object_list_add_object(struct radeon_object_list *lobj,
387 struct list_head *head)
388{
389 if (lobj->wdomain) {
390 list_add(&lobj->list, head);
391 } else {
392 list_add_tail(&lobj->list, head);
393 }
394}
395
396int radeon_object_list_reserve(struct list_head *head)
397{
398 struct radeon_object_list *lobj;
399 struct list_head *i;
400 int r;
401
402 list_for_each(i, head) {
403 lobj = list_entry(i, struct radeon_object_list, list);
404 if (!lobj->robj->pin_count) {
405 r = radeon_object_reserve(lobj->robj, true);
406 if (unlikely(r != 0)) {
407 DRM_ERROR("radeon: failed to reserve object.\n");
408 return r;
409 }
410 } else {
411 }
412 }
413 return 0;
414}
415
416void radeon_object_list_unreserve(struct list_head *head)
417{
418 struct radeon_object_list *lobj;
419 struct list_head *i;
420
421 list_for_each(i, head) {
422 lobj = list_entry(i, struct radeon_object_list, list);
423 if (!lobj->robj->pin_count) {
424 radeon_object_unreserve(lobj->robj);
425 } else {
426 }
427 }
428}
429
430int radeon_object_list_validate(struct list_head *head, void *fence)
431{
432 struct radeon_object_list *lobj;
433 struct radeon_object *robj;
434 struct radeon_fence *old_fence = NULL;
435 struct list_head *i;
436 uint32_t flags;
437 int r;
438
439 r = radeon_object_list_reserve(head);
440 if (unlikely(r != 0)) {
441 radeon_object_list_unreserve(head);
442 return r;
443 }
444 list_for_each(i, head) {
445 lobj = list_entry(i, struct radeon_object_list, list);
446 robj = lobj->robj;
447 if (lobj->wdomain) {
448 flags = radeon_object_flags_from_domain(lobj->wdomain);
449 flags |= TTM_PL_FLAG_TT;
450 } else {
451 flags = radeon_object_flags_from_domain(lobj->rdomain);
452 flags |= TTM_PL_FLAG_TT;
453 flags |= TTM_PL_FLAG_VRAM;
454 }
455 if (!robj->pin_count) {
456 robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING;
457 r = ttm_buffer_object_validate(&robj->tobj,
458 robj->tobj.proposed_placement,
459 true, false);
460 if (unlikely(r)) {
461 radeon_object_list_unreserve(head);
462 DRM_ERROR("radeon: failed to validate.\n");
463 return r;
464 }
465 radeon_object_gpu_addr(robj);
466 }
467 lobj->gpu_offset = robj->gpu_addr;
468 if (fence) {
469 old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
470 robj->tobj.sync_obj = radeon_fence_ref(fence);
471 robj->tobj.sync_obj_arg = NULL;
472 }
473 if (old_fence) {
474 radeon_fence_unref(&old_fence);
475 }
476 }
477 return 0;
478}
479
480void radeon_object_list_unvalidate(struct list_head *head)
481{
482 struct radeon_object_list *lobj;
483 struct radeon_fence *old_fence = NULL;
484 struct list_head *i;
485
486 list_for_each(i, head) {
487 lobj = list_entry(i, struct radeon_object_list, list);
488 old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
489 lobj->robj->tobj.sync_obj = NULL;
490 if (old_fence) {
491 radeon_fence_unref(&old_fence);
492 }
493 }
494 radeon_object_list_unreserve(head);
495}
496
497void radeon_object_list_clean(struct list_head *head)
498{
499 radeon_object_list_unreserve(head);
500}
501
502int radeon_object_fbdev_mmap(struct radeon_object *robj,
503 struct vm_area_struct *vma)
504{
505 return ttm_fbdev_mmap(vma, &robj->tobj);
506}
507
508unsigned long radeon_object_size(struct radeon_object *robj)
509{
510 return robj->tobj.num_pages << PAGE_SHIFT;
511}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
new file mode 100644
index 000000000000..473e4775dc5a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RADEON_OBJECT_H__
29#define __RADEON_OBJECT_H__
30
31#include <ttm/ttm_bo_api.h>
32#include <ttm/ttm_bo_driver.h>
33#include <ttm/ttm_placement.h>
34#include <ttm/ttm_module.h>
35
36/*
37 * TTM.
38 */
39struct radeon_mman {
40 struct ttm_global_reference mem_global_ref;
41 bool mem_global_referenced;
42 struct ttm_bo_device bdev;
43};
44
45#endif
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
new file mode 100644
index 000000000000..6d3d90406a24
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -0,0 +1,3570 @@
1/*
2 * Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
3 * VA Linux Systems Inc., Fremont, California.
4 *
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation on the rights to use, copy, modify, merge,
11 * publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so,
13 * subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial
17 * portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22 * NON-INFRINGEMENT. IN NO EVENT SHALL ATI, VA LINUX SYSTEMS AND/OR
23 * THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 */
28
29/*
30 * Authors:
31 * Kevin E. Martin <martin@xfree86.org>
32 * Rickard E. Faith <faith@valinux.com>
33 * Alan Hourihane <alanh@fairlite.demon.co.uk>
34 *
35 * References:
36 *
37 * !!!! FIXME !!!!
38 * RAGE 128 VR/ RAGE 128 GL Register Reference Manual (Technical
39 * Reference Manual P/N RRG-G04100-C Rev. 0.04), ATI Technologies: April
40 * 1999.
41 *
42 * !!!! FIXME !!!!
43 * RAGE 128 Software Development Manual (Technical Reference Manual P/N
44 * SDK-G04000 Rev. 0.01), ATI Technologies: June 1999.
45 *
46 */
47
48/* !!!! FIXME !!!! NOTE: THIS FILE HAS BEEN CONVERTED FROM r128_reg.h
49 * AND CONTAINS REGISTERS AND REGISTER DEFINITIONS THAT ARE NOT CORRECT
50 * ON THE RADEON. A FULL AUDIT OF THIS CODE IS NEEDED! */
51#ifndef _RADEON_REG_H_
52#define _RADEON_REG_H_
53
54#include "r300_reg.h"
55#include "r500_reg.h"
56#include "r600_reg.h"
57
58
59#define RADEON_MC_AGP_LOCATION 0x014c
60#define RADEON_MC_AGP_START_MASK 0x0000FFFF
61#define RADEON_MC_AGP_START_SHIFT 0
62#define RADEON_MC_AGP_TOP_MASK 0xFFFF0000
63#define RADEON_MC_AGP_TOP_SHIFT 16
64#define RADEON_MC_FB_LOCATION 0x0148
65#define RADEON_MC_FB_START_MASK 0x0000FFFF
66#define RADEON_MC_FB_START_SHIFT 0
67#define RADEON_MC_FB_TOP_MASK 0xFFFF0000
68#define RADEON_MC_FB_TOP_SHIFT 16
69#define RADEON_AGP_BASE_2 0x015c /* r200+ only */
70#define RADEON_AGP_BASE 0x0170
71
72#define ATI_DATATYPE_VQ 0
73#define ATI_DATATYPE_CI4 1
74#define ATI_DATATYPE_CI8 2
75#define ATI_DATATYPE_ARGB1555 3
76#define ATI_DATATYPE_RGB565 4
77#define ATI_DATATYPE_RGB888 5
78#define ATI_DATATYPE_ARGB8888 6
79#define ATI_DATATYPE_RGB332 7
80#define ATI_DATATYPE_Y8 8
81#define ATI_DATATYPE_RGB8 9
82#define ATI_DATATYPE_CI16 10
83#define ATI_DATATYPE_VYUY_422 11
84#define ATI_DATATYPE_YVYU_422 12
85#define ATI_DATATYPE_AYUV_444 14
86#define ATI_DATATYPE_ARGB4444 15
87
88 /* Registers for 2D/Video/Overlay */
89#define RADEON_ADAPTER_ID 0x0f2c /* PCI */
90#define RADEON_AGP_BASE 0x0170
91#define RADEON_AGP_CNTL 0x0174
92# define RADEON_AGP_APER_SIZE_256MB (0x00 << 0)
93# define RADEON_AGP_APER_SIZE_128MB (0x20 << 0)
94# define RADEON_AGP_APER_SIZE_64MB (0x30 << 0)
95# define RADEON_AGP_APER_SIZE_32MB (0x38 << 0)
96# define RADEON_AGP_APER_SIZE_16MB (0x3c << 0)
97# define RADEON_AGP_APER_SIZE_8MB (0x3e << 0)
98# define RADEON_AGP_APER_SIZE_4MB (0x3f << 0)
99# define RADEON_AGP_APER_SIZE_MASK (0x3f << 0)
100#define RADEON_STATUS_PCI_CONFIG 0x06
101# define RADEON_CAP_LIST 0x100000
102#define RADEON_CAPABILITIES_PTR_PCI_CONFIG 0x34 /* offset in PCI config*/
103# define RADEON_CAP_PTR_MASK 0xfc /* mask off reserved bits of CAP_PTR */
104# define RADEON_CAP_ID_NULL 0x00 /* End of capability list */
105# define RADEON_CAP_ID_AGP 0x02 /* AGP capability ID */
106# define RADEON_CAP_ID_EXP 0x10 /* PCI Express */
107#define RADEON_AGP_COMMAND 0x0f60 /* PCI */
108#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config*/
109# define RADEON_AGP_ENABLE (1<<8)
110#define RADEON_AGP_PLL_CNTL 0x000b /* PLL */
111#define RADEON_AGP_STATUS 0x0f5c /* PCI */
112# define RADEON_AGP_1X_MODE 0x01
113# define RADEON_AGP_2X_MODE 0x02
114# define RADEON_AGP_4X_MODE 0x04
115# define RADEON_AGP_FW_MODE 0x10
116# define RADEON_AGP_MODE_MASK 0x17
117# define RADEON_AGPv3_MODE 0x08
118# define RADEON_AGPv3_4X_MODE 0x01
119# define RADEON_AGPv3_8X_MODE 0x02
120#define RADEON_ATTRDR 0x03c1 /* VGA */
121#define RADEON_ATTRDW 0x03c0 /* VGA */
122#define RADEON_ATTRX 0x03c0 /* VGA */
123#define RADEON_AUX_SC_CNTL 0x1660
124# define RADEON_AUX1_SC_EN (1 << 0)
125# define RADEON_AUX1_SC_MODE_OR (0 << 1)
126# define RADEON_AUX1_SC_MODE_NAND (1 << 1)
127# define RADEON_AUX2_SC_EN (1 << 2)
128# define RADEON_AUX2_SC_MODE_OR (0 << 3)
129# define RADEON_AUX2_SC_MODE_NAND (1 << 3)
130# define RADEON_AUX3_SC_EN (1 << 4)
131# define RADEON_AUX3_SC_MODE_OR (0 << 5)
132# define RADEON_AUX3_SC_MODE_NAND (1 << 5)
133#define RADEON_AUX1_SC_BOTTOM 0x1670
134#define RADEON_AUX1_SC_LEFT 0x1664
135#define RADEON_AUX1_SC_RIGHT 0x1668
136#define RADEON_AUX1_SC_TOP 0x166c
137#define RADEON_AUX2_SC_BOTTOM 0x1680
138#define RADEON_AUX2_SC_LEFT 0x1674
139#define RADEON_AUX2_SC_RIGHT 0x1678
140#define RADEON_AUX2_SC_TOP 0x167c
141#define RADEON_AUX3_SC_BOTTOM 0x1690
142#define RADEON_AUX3_SC_LEFT 0x1684
143#define RADEON_AUX3_SC_RIGHT 0x1688
144#define RADEON_AUX3_SC_TOP 0x168c
145#define RADEON_AUX_WINDOW_HORZ_CNTL 0x02d8
146#define RADEON_AUX_WINDOW_VERT_CNTL 0x02dc
147
148#define RADEON_BASE_CODE 0x0f0b
149#define RADEON_BIOS_0_SCRATCH 0x0010
150# define RADEON_FP_PANEL_SCALABLE (1 << 16)
151# define RADEON_FP_PANEL_SCALE_EN (1 << 17)
152# define RADEON_FP_CHIP_SCALE_EN (1 << 18)
153# define RADEON_DRIVER_BRIGHTNESS_EN (1 << 26)
154# define RADEON_DISPLAY_ROT_MASK (3 << 28)
155# define RADEON_DISPLAY_ROT_00 (0 << 28)
156# define RADEON_DISPLAY_ROT_90 (1 << 28)
157# define RADEON_DISPLAY_ROT_180 (2 << 28)
158# define RADEON_DISPLAY_ROT_270 (3 << 28)
159#define RADEON_BIOS_1_SCRATCH 0x0014
160#define RADEON_BIOS_2_SCRATCH 0x0018
161#define RADEON_BIOS_3_SCRATCH 0x001c
162#define RADEON_BIOS_4_SCRATCH 0x0020
163# define RADEON_CRT1_ATTACHED_MASK (3 << 0)
164# define RADEON_CRT1_ATTACHED_MONO (1 << 0)
165# define RADEON_CRT1_ATTACHED_COLOR (2 << 0)
166# define RADEON_LCD1_ATTACHED (1 << 2)
167# define RADEON_DFP1_ATTACHED (1 << 3)
168# define RADEON_TV1_ATTACHED_MASK (3 << 4)
169# define RADEON_TV1_ATTACHED_COMP (1 << 4)
170# define RADEON_TV1_ATTACHED_SVIDEO (2 << 4)
171# define RADEON_CRT2_ATTACHED_MASK (3 << 8)
172# define RADEON_CRT2_ATTACHED_MONO (1 << 8)
173# define RADEON_CRT2_ATTACHED_COLOR (2 << 8)
174# define RADEON_DFP2_ATTACHED (1 << 11)
175#define RADEON_BIOS_5_SCRATCH 0x0024
176# define RADEON_LCD1_ON (1 << 0)
177# define RADEON_CRT1_ON (1 << 1)
178# define RADEON_TV1_ON (1 << 2)
179# define RADEON_DFP1_ON (1 << 3)
180# define RADEON_CRT2_ON (1 << 5)
181# define RADEON_CV1_ON (1 << 6)
182# define RADEON_DFP2_ON (1 << 7)
183# define RADEON_LCD1_CRTC_MASK (1 << 8)
184# define RADEON_LCD1_CRTC_SHIFT 8
185# define RADEON_CRT1_CRTC_MASK (1 << 9)
186# define RADEON_CRT1_CRTC_SHIFT 9
187# define RADEON_TV1_CRTC_MASK (1 << 10)
188# define RADEON_TV1_CRTC_SHIFT 10
189# define RADEON_DFP1_CRTC_MASK (1 << 11)
190# define RADEON_DFP1_CRTC_SHIFT 11
191# define RADEON_CRT2_CRTC_MASK (1 << 12)
192# define RADEON_CRT2_CRTC_SHIFT 12
193# define RADEON_CV1_CRTC_MASK (1 << 13)
194# define RADEON_CV1_CRTC_SHIFT 13
195# define RADEON_DFP2_CRTC_MASK (1 << 14)
196# define RADEON_DFP2_CRTC_SHIFT 14
197# define RADEON_ACC_REQ_LCD1 (1 << 16)
198# define RADEON_ACC_REQ_CRT1 (1 << 17)
199# define RADEON_ACC_REQ_TV1 (1 << 18)
200# define RADEON_ACC_REQ_DFP1 (1 << 19)
201# define RADEON_ACC_REQ_CRT2 (1 << 21)
202# define RADEON_ACC_REQ_TV2 (1 << 22)
203# define RADEON_ACC_REQ_DFP2 (1 << 23)
204#define RADEON_BIOS_6_SCRATCH 0x0028
205# define RADEON_ACC_MODE_CHANGE (1 << 2)
206# define RADEON_EXT_DESKTOP_MODE (1 << 3)
207# define RADEON_LCD_DPMS_ON (1 << 20)
208# define RADEON_CRT_DPMS_ON (1 << 21)
209# define RADEON_TV_DPMS_ON (1 << 22)
210# define RADEON_DFP_DPMS_ON (1 << 23)
211# define RADEON_DPMS_MASK (3 << 24)
212# define RADEON_DPMS_ON (0 << 24)
213# define RADEON_DPMS_STANDBY (1 << 24)
214# define RADEON_DPMS_SUSPEND (2 << 24)
215# define RADEON_DPMS_OFF (3 << 24)
216# define RADEON_SCREEN_BLANKING (1 << 26)
217# define RADEON_DRIVER_CRITICAL (1 << 27)
218# define RADEON_DISPLAY_SWITCHING_DIS (1 << 30)
219#define RADEON_BIOS_7_SCRATCH 0x002c
220# define RADEON_SYS_HOTKEY (1 << 10)
221# define RADEON_DRV_LOADED (1 << 12)
222#define RADEON_BIOS_ROM 0x0f30 /* PCI */
223#define RADEON_BIST 0x0f0f /* PCI */
224#define RADEON_BRUSH_DATA0 0x1480
225#define RADEON_BRUSH_DATA1 0x1484
226#define RADEON_BRUSH_DATA10 0x14a8
227#define RADEON_BRUSH_DATA11 0x14ac
228#define RADEON_BRUSH_DATA12 0x14b0
229#define RADEON_BRUSH_DATA13 0x14b4
230#define RADEON_BRUSH_DATA14 0x14b8
231#define RADEON_BRUSH_DATA15 0x14bc
232#define RADEON_BRUSH_DATA16 0x14c0
233#define RADEON_BRUSH_DATA17 0x14c4
234#define RADEON_BRUSH_DATA18 0x14c8
235#define RADEON_BRUSH_DATA19 0x14cc
236#define RADEON_BRUSH_DATA2 0x1488
237#define RADEON_BRUSH_DATA20 0x14d0
238#define RADEON_BRUSH_DATA21 0x14d4
239#define RADEON_BRUSH_DATA22 0x14d8
240#define RADEON_BRUSH_DATA23 0x14dc
241#define RADEON_BRUSH_DATA24 0x14e0
242#define RADEON_BRUSH_DATA25 0x14e4
243#define RADEON_BRUSH_DATA26 0x14e8
244#define RADEON_BRUSH_DATA27 0x14ec
245#define RADEON_BRUSH_DATA28 0x14f0
246#define RADEON_BRUSH_DATA29 0x14f4
247#define RADEON_BRUSH_DATA3 0x148c
248#define RADEON_BRUSH_DATA30 0x14f8
249#define RADEON_BRUSH_DATA31 0x14fc
250#define RADEON_BRUSH_DATA32 0x1500
251#define RADEON_BRUSH_DATA33 0x1504
252#define RADEON_BRUSH_DATA34 0x1508
253#define RADEON_BRUSH_DATA35 0x150c
254#define RADEON_BRUSH_DATA36 0x1510
255#define RADEON_BRUSH_DATA37 0x1514
256#define RADEON_BRUSH_DATA38 0x1518
257#define RADEON_BRUSH_DATA39 0x151c
258#define RADEON_BRUSH_DATA4 0x1490
259#define RADEON_BRUSH_DATA40 0x1520
260#define RADEON_BRUSH_DATA41 0x1524
261#define RADEON_BRUSH_DATA42 0x1528
262#define RADEON_BRUSH_DATA43 0x152c
263#define RADEON_BRUSH_DATA44 0x1530
264#define RADEON_BRUSH_DATA45 0x1534
265#define RADEON_BRUSH_DATA46 0x1538
266#define RADEON_BRUSH_DATA47 0x153c
267#define RADEON_BRUSH_DATA48 0x1540
268#define RADEON_BRUSH_DATA49 0x1544
269#define RADEON_BRUSH_DATA5 0x1494
270#define RADEON_BRUSH_DATA50 0x1548
271#define RADEON_BRUSH_DATA51 0x154c
272#define RADEON_BRUSH_DATA52 0x1550
273#define RADEON_BRUSH_DATA53 0x1554
274#define RADEON_BRUSH_DATA54 0x1558
275#define RADEON_BRUSH_DATA55 0x155c
276#define RADEON_BRUSH_DATA56 0x1560
277#define RADEON_BRUSH_DATA57 0x1564
278#define RADEON_BRUSH_DATA58 0x1568
279#define RADEON_BRUSH_DATA59 0x156c
280#define RADEON_BRUSH_DATA6 0x1498
281#define RADEON_BRUSH_DATA60 0x1570
282#define RADEON_BRUSH_DATA61 0x1574
283#define RADEON_BRUSH_DATA62 0x1578
284#define RADEON_BRUSH_DATA63 0x157c
285#define RADEON_BRUSH_DATA7 0x149c
286#define RADEON_BRUSH_DATA8 0x14a0
287#define RADEON_BRUSH_DATA9 0x14a4
288#define RADEON_BRUSH_SCALE 0x1470
289#define RADEON_BRUSH_Y_X 0x1474
290#define RADEON_BUS_CNTL 0x0030
291# define RADEON_BUS_MASTER_DIS (1 << 6)
292# define RADEON_BUS_BIOS_DIS_ROM (1 << 12)
293# define RADEON_BUS_RD_DISCARD_EN (1 << 24)
294# define RADEON_BUS_RD_ABORT_EN (1 << 25)
295# define RADEON_BUS_MSTR_DISCONNECT_EN (1 << 28)
296# define RADEON_BUS_WRT_BURST (1 << 29)
297# define RADEON_BUS_READ_BURST (1 << 30)
298#define RADEON_BUS_CNTL1 0x0034
299# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4)
300
301/* #define RADEON_PCIE_INDEX 0x0030 */
302/* #define RADEON_PCIE_DATA 0x0034 */
303#define RADEON_PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE */
304# define RADEON_PCIE_LC_LINK_WIDTH_SHIFT 0
305# define RADEON_PCIE_LC_LINK_WIDTH_MASK 0x7
306# define RADEON_PCIE_LC_LINK_WIDTH_X0 0
307# define RADEON_PCIE_LC_LINK_WIDTH_X1 1
308# define RADEON_PCIE_LC_LINK_WIDTH_X2 2
309# define RADEON_PCIE_LC_LINK_WIDTH_X4 3
310# define RADEON_PCIE_LC_LINK_WIDTH_X8 4
311# define RADEON_PCIE_LC_LINK_WIDTH_X12 5
312# define RADEON_PCIE_LC_LINK_WIDTH_X16 6
313# define RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT 4
314# define RADEON_PCIE_LC_LINK_WIDTH_RD_MASK 0x70
315# define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8)
316# define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9)
317# define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10)
318
319#define RADEON_CACHE_CNTL 0x1724
320#define RADEON_CACHE_LINE 0x0f0c /* PCI */
321#define RADEON_CAPABILITIES_ID 0x0f50 /* PCI */
322#define RADEON_CAPABILITIES_PTR 0x0f34 /* PCI */
323#define RADEON_CLK_PIN_CNTL 0x0001 /* PLL */
324# define RADEON_DONT_USE_XTALIN (1 << 4)
325# define RADEON_SCLK_DYN_START_CNTL (1 << 15)
326#define RADEON_CLOCK_CNTL_DATA 0x000c
327#define RADEON_CLOCK_CNTL_INDEX 0x0008
328# define RADEON_PLL_WR_EN (1 << 7)
329# define RADEON_PLL_DIV_SEL (3 << 8)
330# define RADEON_PLL2_DIV_SEL_MASK (~(3 << 8))
331#define RADEON_CLK_PWRMGT_CNTL 0x0014
332# define RADEON_ENGIN_DYNCLK_MODE (1 << 12)
333# define RADEON_ACTIVE_HILO_LAT_MASK (3 << 13)
334# define RADEON_ACTIVE_HILO_LAT_SHIFT 13
335# define RADEON_DISP_DYN_STOP_LAT_MASK (1 << 12)
336# define RADEON_MC_BUSY (1 << 16)
337# define RADEON_DLL_READY (1 << 19)
338# define RADEON_CG_NO1_DEBUG_0 (1 << 24)
339# define RADEON_CG_NO1_DEBUG_MASK (0x1f << 24)
340# define RADEON_DYN_STOP_MODE_MASK (7 << 21)
341# define RADEON_TVPLL_PWRMGT_OFF (1 << 30)
342# define RADEON_TVCLK_TURNOFF (1 << 31)
343#define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */
344# define RADEON_TCL_BYPASS_DISABLE (1 << 20)
345#define RADEON_CLR_CMP_CLR_3D 0x1a24
346#define RADEON_CLR_CMP_CLR_DST 0x15c8
347#define RADEON_CLR_CMP_CLR_SRC 0x15c4
348#define RADEON_CLR_CMP_CNTL 0x15c0
349# define RADEON_SRC_CMP_EQ_COLOR (4 << 0)
350# define RADEON_SRC_CMP_NEQ_COLOR (5 << 0)
351# define RADEON_CLR_CMP_SRC_SOURCE (1 << 24)
352#define RADEON_CLR_CMP_MASK 0x15cc
353# define RADEON_CLR_CMP_MSK 0xffffffff
354#define RADEON_CLR_CMP_MASK_3D 0x1A28
355#define RADEON_COMMAND 0x0f04 /* PCI */
356#define RADEON_COMPOSITE_SHADOW_ID 0x1a0c
357#define RADEON_CONFIG_APER_0_BASE 0x0100
358#define RADEON_CONFIG_APER_1_BASE 0x0104
359#define RADEON_CONFIG_APER_SIZE 0x0108
360#define RADEON_CONFIG_BONDS 0x00e8
361#define RADEON_CONFIG_CNTL 0x00e0
362# define RADEON_CFG_ATI_REV_A11 (0 << 16)
363# define RADEON_CFG_ATI_REV_A12 (1 << 16)
364# define RADEON_CFG_ATI_REV_A13 (2 << 16)
365# define RADEON_CFG_ATI_REV_ID_MASK (0xf << 16)
366#define RADEON_CONFIG_MEMSIZE 0x00f8
367#define RADEON_CONFIG_MEMSIZE_EMBEDDED 0x0114
368#define RADEON_CONFIG_REG_1_BASE 0x010c
369#define RADEON_CONFIG_REG_APER_SIZE 0x0110
370#define RADEON_CONFIG_XSTRAP 0x00e4
371#define RADEON_CONSTANT_COLOR_C 0x1d34
372# define RADEON_CONSTANT_COLOR_MASK 0x00ffffff
373# define RADEON_CONSTANT_COLOR_ONE 0x00ffffff
374# define RADEON_CONSTANT_COLOR_ZERO 0x00000000
375#define RADEON_CRC_CMDFIFO_ADDR 0x0740
376#define RADEON_CRC_CMDFIFO_DOUT 0x0744
377#define RADEON_GRPH_BUFFER_CNTL 0x02f0
378# define RADEON_GRPH_START_REQ_MASK (0x7f)
379# define RADEON_GRPH_START_REQ_SHIFT 0
380# define RADEON_GRPH_STOP_REQ_MASK (0x7f<<8)
381# define RADEON_GRPH_STOP_REQ_SHIFT 8
382# define RADEON_GRPH_CRITICAL_POINT_MASK (0x7f<<16)
383# define RADEON_GRPH_CRITICAL_POINT_SHIFT 16
384# define RADEON_GRPH_CRITICAL_CNTL (1<<28)
385# define RADEON_GRPH_BUFFER_SIZE (1<<29)
386# define RADEON_GRPH_CRITICAL_AT_SOF (1<<30)
387# define RADEON_GRPH_STOP_CNTL (1<<31)
388#define RADEON_GRPH2_BUFFER_CNTL 0x03f0
389# define RADEON_GRPH2_START_REQ_MASK (0x7f)
390# define RADEON_GRPH2_START_REQ_SHIFT 0
391# define RADEON_GRPH2_STOP_REQ_MASK (0x7f<<8)
392# define RADEON_GRPH2_STOP_REQ_SHIFT 8
393# define RADEON_GRPH2_CRITICAL_POINT_MASK (0x7f<<16)
394# define RADEON_GRPH2_CRITICAL_POINT_SHIFT 16
395# define RADEON_GRPH2_CRITICAL_CNTL (1<<28)
396# define RADEON_GRPH2_BUFFER_SIZE (1<<29)
397# define RADEON_GRPH2_CRITICAL_AT_SOF (1<<30)
398# define RADEON_GRPH2_STOP_CNTL (1<<31)
399#define RADEON_CRTC_CRNT_FRAME 0x0214
400#define RADEON_CRTC_EXT_CNTL 0x0054
401# define RADEON_CRTC_VGA_XOVERSCAN (1 << 0)
402# define RADEON_VGA_ATI_LINEAR (1 << 3)
403# define RADEON_XCRT_CNT_EN (1 << 6)
404# define RADEON_CRTC_HSYNC_DIS (1 << 8)
405# define RADEON_CRTC_VSYNC_DIS (1 << 9)
406# define RADEON_CRTC_DISPLAY_DIS (1 << 10)
407# define RADEON_CRTC_SYNC_TRISTAT (1 << 11)
408# define RADEON_CRTC_CRT_ON (1 << 15)
409#define RADEON_CRTC_EXT_CNTL_DPMS_BYTE 0x0055
410# define RADEON_CRTC_HSYNC_DIS_BYTE (1 << 0)
411# define RADEON_CRTC_VSYNC_DIS_BYTE (1 << 1)
412# define RADEON_CRTC_DISPLAY_DIS_BYTE (1 << 2)
413#define RADEON_CRTC_GEN_CNTL 0x0050
414# define RADEON_CRTC_DBL_SCAN_EN (1 << 0)
415# define RADEON_CRTC_INTERLACE_EN (1 << 1)
416# define RADEON_CRTC_CSYNC_EN (1 << 4)
417# define RADEON_CRTC_ICON_EN (1 << 15)
418# define RADEON_CRTC_CUR_EN (1 << 16)
419# define RADEON_CRTC_CUR_MODE_MASK (7 << 20)
420# define RADEON_CRTC_CUR_MODE_SHIFT 20
421# define RADEON_CRTC_CUR_MODE_MONO 0
422# define RADEON_CRTC_CUR_MODE_24BPP 2
423# define RADEON_CRTC_EXT_DISP_EN (1 << 24)
424# define RADEON_CRTC_EN (1 << 25)
425# define RADEON_CRTC_DISP_REQ_EN_B (1 << 26)
426#define RADEON_CRTC2_GEN_CNTL 0x03f8
427# define RADEON_CRTC2_DBL_SCAN_EN (1 << 0)
428# define RADEON_CRTC2_INTERLACE_EN (1 << 1)
429# define RADEON_CRTC2_SYNC_TRISTAT (1 << 4)
430# define RADEON_CRTC2_HSYNC_TRISTAT (1 << 5)
431# define RADEON_CRTC2_VSYNC_TRISTAT (1 << 6)
432# define RADEON_CRTC2_CRT2_ON (1 << 7)
433# define RADEON_CRTC2_PIX_WIDTH_SHIFT 8
434# define RADEON_CRTC2_PIX_WIDTH_MASK (0xf << 8)
435# define RADEON_CRTC2_ICON_EN (1 << 15)
436# define RADEON_CRTC2_CUR_EN (1 << 16)
437# define RADEON_CRTC2_CUR_MODE_MASK (7 << 20)
438# define RADEON_CRTC2_DISP_DIS (1 << 23)
439# define RADEON_CRTC2_EN (1 << 25)
440# define RADEON_CRTC2_DISP_REQ_EN_B (1 << 26)
441# define RADEON_CRTC2_CSYNC_EN (1 << 27)
442# define RADEON_CRTC2_HSYNC_DIS (1 << 28)
443# define RADEON_CRTC2_VSYNC_DIS (1 << 29)
444#define RADEON_CRTC_MORE_CNTL 0x27c
445# define RADEON_CRTC_AUTO_HORZ_CENTER_EN (1<<2)
446# define RADEON_CRTC_AUTO_VERT_CENTER_EN (1<<3)
447# define RADEON_CRTC_H_CUTOFF_ACTIVE_EN (1<<4)
448# define RADEON_CRTC_V_CUTOFF_ACTIVE_EN (1<<5)
449#define RADEON_CRTC_GUI_TRIG_VLINE 0x0218
450#define RADEON_CRTC_H_SYNC_STRT_WID 0x0204
451# define RADEON_CRTC_H_SYNC_STRT_PIX (0x07 << 0)
452# define RADEON_CRTC_H_SYNC_STRT_CHAR (0x3ff << 3)
453# define RADEON_CRTC_H_SYNC_STRT_CHAR_SHIFT 3
454# define RADEON_CRTC_H_SYNC_WID (0x3f << 16)
455# define RADEON_CRTC_H_SYNC_WID_SHIFT 16
456# define RADEON_CRTC_H_SYNC_POL (1 << 23)
457#define RADEON_CRTC2_H_SYNC_STRT_WID 0x0304
458# define RADEON_CRTC2_H_SYNC_STRT_PIX (0x07 << 0)
459# define RADEON_CRTC2_H_SYNC_STRT_CHAR (0x3ff << 3)
460# define RADEON_CRTC2_H_SYNC_STRT_CHAR_SHIFT 3
461# define RADEON_CRTC2_H_SYNC_WID (0x3f << 16)
462# define RADEON_CRTC2_H_SYNC_WID_SHIFT 16
463# define RADEON_CRTC2_H_SYNC_POL (1 << 23)
464#define RADEON_CRTC_H_TOTAL_DISP 0x0200
465# define RADEON_CRTC_H_TOTAL (0x03ff << 0)
466# define RADEON_CRTC_H_TOTAL_SHIFT 0
467# define RADEON_CRTC_H_DISP (0x01ff << 16)
468# define RADEON_CRTC_H_DISP_SHIFT 16
469#define RADEON_CRTC2_H_TOTAL_DISP 0x0300
470# define RADEON_CRTC2_H_TOTAL (0x03ff << 0)
471# define RADEON_CRTC2_H_TOTAL_SHIFT 0
472# define RADEON_CRTC2_H_DISP (0x01ff << 16)
473# define RADEON_CRTC2_H_DISP_SHIFT 16
474
475#define RADEON_CRTC_OFFSET_RIGHT 0x0220
476#define RADEON_CRTC_OFFSET 0x0224
477# define RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET (1<<30)
478# define RADEON_CRTC_OFFSET__OFFSET_LOCK (1<<31)
479
480#define RADEON_CRTC2_OFFSET 0x0324
481# define RADEON_CRTC2_OFFSET__GUI_TRIG_OFFSET (1<<30)
482# define RADEON_CRTC2_OFFSET__OFFSET_LOCK (1<<31)
483#define RADEON_CRTC_OFFSET_CNTL 0x0228
484# define RADEON_CRTC_TILE_LINE_SHIFT 0
485# define RADEON_CRTC_TILE_LINE_RIGHT_SHIFT 4
486# define R300_CRTC_X_Y_MODE_EN_RIGHT (1 << 6)
487# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_MASK (3 << 7)
488# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_AUTO (0 << 7)
489# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_SINGLE (1 << 7)
490# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DOUBLE (2 << 7)
491# define R300_CRTC_MICRO_TILE_BUFFER_RIGHT_DIS (3 << 7)
492# define R300_CRTC_X_Y_MODE_EN (1 << 9)
493# define R300_CRTC_MICRO_TILE_BUFFER_MASK (3 << 10)
494# define R300_CRTC_MICRO_TILE_BUFFER_AUTO (0 << 10)
495# define R300_CRTC_MICRO_TILE_BUFFER_SINGLE (1 << 10)
496# define R300_CRTC_MICRO_TILE_BUFFER_DOUBLE (2 << 10)
497# define R300_CRTC_MICRO_TILE_BUFFER_DIS (3 << 10)
498# define R300_CRTC_MICRO_TILE_EN_RIGHT (1 << 12)
499# define R300_CRTC_MICRO_TILE_EN (1 << 13)
500# define R300_CRTC_MACRO_TILE_EN_RIGHT (1 << 14)
501# define R300_CRTC_MACRO_TILE_EN (1 << 15)
502# define RADEON_CRTC_TILE_EN_RIGHT (1 << 14)
503# define RADEON_CRTC_TILE_EN (1 << 15)
504# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16)
505# define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17)
506
507#define R300_CRTC_TILE_X0_Y0 0x0350
508#define R300_CRTC2_TILE_X0_Y0 0x0358
509
510#define RADEON_CRTC2_OFFSET_CNTL 0x0328
511# define RADEON_CRTC2_OFFSET_FLIP_CNTL (1 << 16)
512# define RADEON_CRTC2_TILE_EN (1 << 15)
513#define RADEON_CRTC_PITCH 0x022c
514# define RADEON_CRTC_PITCH__SHIFT 0
515# define RADEON_CRTC_PITCH__RIGHT_SHIFT 16
516
517#define RADEON_CRTC2_PITCH 0x032c
518#define RADEON_CRTC_STATUS 0x005c
519# define RADEON_CRTC_VBLANK_SAVE (1 << 1)
520# define RADEON_CRTC_VBLANK_SAVE_CLEAR (1 << 1)
521#define RADEON_CRTC2_STATUS 0x03fc
522# define RADEON_CRTC2_VBLANK_SAVE (1 << 1)
523# define RADEON_CRTC2_VBLANK_SAVE_CLEAR (1 << 1)
524#define RADEON_CRTC_V_SYNC_STRT_WID 0x020c
525# define RADEON_CRTC_V_SYNC_STRT (0x7ff << 0)
526# define RADEON_CRTC_V_SYNC_STRT_SHIFT 0
527# define RADEON_CRTC_V_SYNC_WID (0x1f << 16)
528# define RADEON_CRTC_V_SYNC_WID_SHIFT 16
529# define RADEON_CRTC_V_SYNC_POL (1 << 23)
530#define RADEON_CRTC2_V_SYNC_STRT_WID 0x030c
531# define RADEON_CRTC2_V_SYNC_STRT (0x7ff << 0)
532# define RADEON_CRTC2_V_SYNC_STRT_SHIFT 0
533# define RADEON_CRTC2_V_SYNC_WID (0x1f << 16)
534# define RADEON_CRTC2_V_SYNC_WID_SHIFT 16
535# define RADEON_CRTC2_V_SYNC_POL (1 << 23)
536#define RADEON_CRTC_V_TOTAL_DISP 0x0208
537# define RADEON_CRTC_V_TOTAL (0x07ff << 0)
538# define RADEON_CRTC_V_TOTAL_SHIFT 0
539# define RADEON_CRTC_V_DISP (0x07ff << 16)
540# define RADEON_CRTC_V_DISP_SHIFT 16
541#define RADEON_CRTC2_V_TOTAL_DISP 0x0308
542# define RADEON_CRTC2_V_TOTAL (0x07ff << 0)
543# define RADEON_CRTC2_V_TOTAL_SHIFT 0
544# define RADEON_CRTC2_V_DISP (0x07ff << 16)
545# define RADEON_CRTC2_V_DISP_SHIFT 16
546#define RADEON_CRTC_VLINE_CRNT_VLINE 0x0210
547# define RADEON_CRTC_CRNT_VLINE_MASK (0x7ff << 16)
548#define RADEON_CRTC2_CRNT_FRAME 0x0314
549#define RADEON_CRTC2_GUI_TRIG_VLINE 0x0318
550#define RADEON_CRTC2_STATUS 0x03fc
551#define RADEON_CRTC2_VLINE_CRNT_VLINE 0x0310
552#define RADEON_CRTC8_DATA 0x03d5 /* VGA, 0x3b5 */
553#define RADEON_CRTC8_IDX 0x03d4 /* VGA, 0x3b4 */
554#define RADEON_CUR_CLR0 0x026c
555#define RADEON_CUR_CLR1 0x0270
556#define RADEON_CUR_HORZ_VERT_OFF 0x0268
557#define RADEON_CUR_HORZ_VERT_POSN 0x0264
558#define RADEON_CUR_OFFSET 0x0260
559# define RADEON_CUR_LOCK (1 << 31)
560#define RADEON_CUR2_CLR0 0x036c
561#define RADEON_CUR2_CLR1 0x0370
562#define RADEON_CUR2_HORZ_VERT_OFF 0x0368
563#define RADEON_CUR2_HORZ_VERT_POSN 0x0364
564#define RADEON_CUR2_OFFSET 0x0360
565# define RADEON_CUR2_LOCK (1 << 31)
566
567#define RADEON_DAC_CNTL 0x0058
568# define RADEON_DAC_RANGE_CNTL (3 << 0)
569# define RADEON_DAC_RANGE_CNTL_PS2 (2 << 0)
570# define RADEON_DAC_RANGE_CNTL_MASK 0x03
571# define RADEON_DAC_BLANKING (1 << 2)
572# define RADEON_DAC_CMP_EN (1 << 3)
573# define RADEON_DAC_CMP_OUTPUT (1 << 7)
574# define RADEON_DAC_8BIT_EN (1 << 8)
575# define RADEON_DAC_TVO_EN (1 << 10)
576# define RADEON_DAC_VGA_ADR_EN (1 << 13)
577# define RADEON_DAC_PDWN (1 << 15)
578# define RADEON_DAC_MASK_ALL (0xff << 24)
579#define RADEON_DAC_CNTL2 0x007c
580# define RADEON_DAC2_TV_CLK_SEL (0 << 1)
581# define RADEON_DAC2_DAC_CLK_SEL (1 << 0)
582# define RADEON_DAC2_DAC2_CLK_SEL (1 << 1)
583# define RADEON_DAC2_PALETTE_ACC_CTL (1 << 5)
584# define RADEON_DAC2_CMP_EN (1 << 7)
585# define RADEON_DAC2_CMP_OUT_R (1 << 8)
586# define RADEON_DAC2_CMP_OUT_G (1 << 9)
587# define RADEON_DAC2_CMP_OUT_B (1 << 10)
588# define RADEON_DAC2_CMP_OUTPUT (1 << 11)
589#define RADEON_DAC_EXT_CNTL 0x0280
590# define RADEON_DAC2_FORCE_BLANK_OFF_EN (1 << 0)
591# define RADEON_DAC2_FORCE_DATA_EN (1 << 1)
592# define RADEON_DAC_FORCE_BLANK_OFF_EN (1 << 4)
593# define RADEON_DAC_FORCE_DATA_EN (1 << 5)
594# define RADEON_DAC_FORCE_DATA_SEL_MASK (3 << 6)
595# define RADEON_DAC_FORCE_DATA_SEL_R (0 << 6)
596# define RADEON_DAC_FORCE_DATA_SEL_G (1 << 6)
597# define RADEON_DAC_FORCE_DATA_SEL_B (2 << 6)
598# define RADEON_DAC_FORCE_DATA_SEL_RGB (3 << 6)
599# define RADEON_DAC_FORCE_DATA_MASK 0x0003ff00
600# define RADEON_DAC_FORCE_DATA_SHIFT 8
601#define RADEON_DAC_MACRO_CNTL 0x0d04
602# define RADEON_DAC_PDWN_R (1 << 16)
603# define RADEON_DAC_PDWN_G (1 << 17)
604# define RADEON_DAC_PDWN_B (1 << 18)
605#define RADEON_DISP_PWR_MAN 0x0d08
606# define RADEON_DISP_PWR_MAN_D3_CRTC_EN (1 << 0)
607# define RADEON_DISP_PWR_MAN_D3_CRTC2_EN (1 << 4)
608# define RADEON_DISP_PWR_MAN_DPMS_ON (0 << 8)
609# define RADEON_DISP_PWR_MAN_DPMS_STANDBY (1 << 8)
610# define RADEON_DISP_PWR_MAN_DPMS_SUSPEND (2 << 8)
611# define RADEON_DISP_PWR_MAN_DPMS_OFF (3 << 8)
612# define RADEON_DISP_D3_RST (1 << 16)
613# define RADEON_DISP_D3_REG_RST (1 << 17)
614# define RADEON_DISP_D3_GRPH_RST (1 << 18)
615# define RADEON_DISP_D3_SUBPIC_RST (1 << 19)
616# define RADEON_DISP_D3_OV0_RST (1 << 20)
617# define RADEON_DISP_D1D2_GRPH_RST (1 << 21)
618# define RADEON_DISP_D1D2_SUBPIC_RST (1 << 22)
619# define RADEON_DISP_D1D2_OV0_RST (1 << 23)
620# define RADEON_DIG_TMDS_ENABLE_RST (1 << 24)
621# define RADEON_TV_ENABLE_RST (1 << 25)
622# define RADEON_AUTO_PWRUP_EN (1 << 26)
623#define RADEON_TV_DAC_CNTL 0x088c
624# define RADEON_TV_DAC_NBLANK (1 << 0)
625# define RADEON_TV_DAC_NHOLD (1 << 1)
626# define RADEON_TV_DAC_PEDESTAL (1 << 2)
627# define RADEON_TV_MONITOR_DETECT_EN (1 << 4)
628# define RADEON_TV_DAC_CMPOUT (1 << 5)
629# define RADEON_TV_DAC_STD_MASK (3 << 8)
630# define RADEON_TV_DAC_STD_PAL (0 << 8)
631# define RADEON_TV_DAC_STD_NTSC (1 << 8)
632# define RADEON_TV_DAC_STD_PS2 (2 << 8)
633# define RADEON_TV_DAC_STD_RS343 (3 << 8)
634# define RADEON_TV_DAC_BGSLEEP (1 << 6)
635# define RADEON_TV_DAC_BGADJ_MASK (0xf << 16)
636# define RADEON_TV_DAC_BGADJ_SHIFT 16
637# define RADEON_TV_DAC_DACADJ_MASK (0xf << 20)
638# define RADEON_TV_DAC_DACADJ_SHIFT 20
639# define RADEON_TV_DAC_RDACPD (1 << 24)
640# define RADEON_TV_DAC_GDACPD (1 << 25)
641# define RADEON_TV_DAC_BDACPD (1 << 26)
642# define RADEON_TV_DAC_RDACDET (1 << 29)
643# define RADEON_TV_DAC_GDACDET (1 << 30)
644# define RADEON_TV_DAC_BDACDET (1 << 31)
645# define R420_TV_DAC_DACADJ_MASK (0x1f << 20)
646# define R420_TV_DAC_RDACPD (1 << 25)
647# define R420_TV_DAC_GDACPD (1 << 26)
648# define R420_TV_DAC_BDACPD (1 << 27)
649# define R420_TV_DAC_TVENABLE (1 << 28)
650#define RADEON_DISP_HW_DEBUG 0x0d14
651# define RADEON_CRT2_DISP1_SEL (1 << 5)
652#define RADEON_DISP_OUTPUT_CNTL 0x0d64
653# define RADEON_DISP_DAC_SOURCE_MASK 0x03
654# define RADEON_DISP_DAC2_SOURCE_MASK 0x0c
655# define RADEON_DISP_DAC_SOURCE_CRTC2 0x01
656# define RADEON_DISP_DAC_SOURCE_RMX 0x02
657# define RADEON_DISP_DAC_SOURCE_LTU 0x03
658# define RADEON_DISP_DAC2_SOURCE_CRTC2 0x04
659# define RADEON_DISP_TVDAC_SOURCE_MASK (0x03 << 2)
660# define RADEON_DISP_TVDAC_SOURCE_CRTC 0x0
661# define RADEON_DISP_TVDAC_SOURCE_CRTC2 (0x01 << 2)
662# define RADEON_DISP_TVDAC_SOURCE_RMX (0x02 << 2)
663# define RADEON_DISP_TVDAC_SOURCE_LTU (0x03 << 2)
664# define RADEON_DISP_TRANS_MATRIX_MASK (0x03 << 4)
665# define RADEON_DISP_TRANS_MATRIX_ALPHA_MSB (0x00 << 4)
666# define RADEON_DISP_TRANS_MATRIX_GRAPHICS (0x01 << 4)
667# define RADEON_DISP_TRANS_MATRIX_VIDEO (0x02 << 4)
668# define RADEON_DISP_TV_SOURCE_CRTC (1 << 16) /* crtc1 or crtc2 */
669# define RADEON_DISP_TV_SOURCE_LTU (0 << 16) /* linear transform unit */
670#define RADEON_DISP_TV_OUT_CNTL 0x0d6c
671# define RADEON_DISP_TV_PATH_SRC_CRTC2 (1 << 16)
672# define RADEON_DISP_TV_PATH_SRC_CRTC1 (0 << 16)
673#define RADEON_DAC_CRC_SIG 0x02cc
674#define RADEON_DAC_DATA 0x03c9 /* VGA */
675#define RADEON_DAC_MASK 0x03c6 /* VGA */
676#define RADEON_DAC_R_INDEX 0x03c7 /* VGA */
677#define RADEON_DAC_W_INDEX 0x03c8 /* VGA */
678#define RADEON_DDA_CONFIG 0x02e0
679#define RADEON_DDA_ON_OFF 0x02e4
680#define RADEON_DEFAULT_OFFSET 0x16e0
681#define RADEON_DEFAULT_PITCH 0x16e4
682#define RADEON_DEFAULT_SC_BOTTOM_RIGHT 0x16e8
683# define RADEON_DEFAULT_SC_RIGHT_MAX (0x1fff << 0)
684# define RADEON_DEFAULT_SC_BOTTOM_MAX (0x1fff << 16)
685#define RADEON_DESTINATION_3D_CLR_CMP_VAL 0x1820
686#define RADEON_DESTINATION_3D_CLR_CMP_MSK 0x1824
687#define RADEON_DEVICE_ID 0x0f02 /* PCI */
688#define RADEON_DISP_MISC_CNTL 0x0d00
689# define RADEON_SOFT_RESET_GRPH_PP (1 << 0)
690#define RADEON_DISP_MERGE_CNTL 0x0d60
691# define RADEON_DISP_ALPHA_MODE_MASK 0x03
692# define RADEON_DISP_ALPHA_MODE_KEY 0
693# define RADEON_DISP_ALPHA_MODE_PER_PIXEL 1
694# define RADEON_DISP_ALPHA_MODE_GLOBAL 2
695# define RADEON_DISP_RGB_OFFSET_EN (1 << 8)
696# define RADEON_DISP_GRPH_ALPHA_MASK (0xff << 16)
697# define RADEON_DISP_OV0_ALPHA_MASK (0xff << 24)
698# define RADEON_DISP_LIN_TRANS_BYPASS (0x01 << 9)
699#define RADEON_DISP2_MERGE_CNTL 0x0d68
700# define RADEON_DISP2_RGB_OFFSET_EN (1 << 8)
701#define RADEON_DISP_LIN_TRANS_GRPH_A 0x0d80
702#define RADEON_DISP_LIN_TRANS_GRPH_B 0x0d84
703#define RADEON_DISP_LIN_TRANS_GRPH_C 0x0d88
704#define RADEON_DISP_LIN_TRANS_GRPH_D 0x0d8c
705#define RADEON_DISP_LIN_TRANS_GRPH_E 0x0d90
706#define RADEON_DISP_LIN_TRANS_GRPH_F 0x0d98
707#define RADEON_DP_BRUSH_BKGD_CLR 0x1478
708#define RADEON_DP_BRUSH_FRGD_CLR 0x147c
709#define RADEON_DP_CNTL 0x16c0
710# define RADEON_DST_X_LEFT_TO_RIGHT (1 << 0)
711# define RADEON_DST_Y_TOP_TO_BOTTOM (1 << 1)
712# define RADEON_DP_DST_TILE_LINEAR (0 << 3)
713# define RADEON_DP_DST_TILE_MACRO (1 << 3)
714# define RADEON_DP_DST_TILE_MICRO (2 << 3)
715# define RADEON_DP_DST_TILE_BOTH (3 << 3)
716#define RADEON_DP_CNTL_XDIR_YDIR_YMAJOR 0x16d0
717# define RADEON_DST_Y_MAJOR (1 << 2)
718# define RADEON_DST_Y_DIR_TOP_TO_BOTTOM (1 << 15)
719# define RADEON_DST_X_DIR_LEFT_TO_RIGHT (1 << 31)
720#define RADEON_DP_DATATYPE 0x16c4
721# define RADEON_HOST_BIG_ENDIAN_EN (1 << 29)
722#define RADEON_DP_GUI_MASTER_CNTL 0x146c
723# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
724# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
725# define RADEON_GMC_SRC_CLIPPING (1 << 2)
726# define RADEON_GMC_DST_CLIPPING (1 << 3)
727# define RADEON_GMC_BRUSH_DATATYPE_MASK (0x0f << 4)
728# define RADEON_GMC_BRUSH_8X8_MONO_FG_BG (0 << 4)
729# define RADEON_GMC_BRUSH_8X8_MONO_FG_LA (1 << 4)
730# define RADEON_GMC_BRUSH_1X8_MONO_FG_BG (4 << 4)
731# define RADEON_GMC_BRUSH_1X8_MONO_FG_LA (5 << 4)
732# define RADEON_GMC_BRUSH_32x1_MONO_FG_BG (6 << 4)
733# define RADEON_GMC_BRUSH_32x1_MONO_FG_LA (7 << 4)
734# define RADEON_GMC_BRUSH_32x32_MONO_FG_BG (8 << 4)
735# define RADEON_GMC_BRUSH_32x32_MONO_FG_LA (9 << 4)
736# define RADEON_GMC_BRUSH_8x8_COLOR (10 << 4)
737# define RADEON_GMC_BRUSH_1X8_COLOR (12 << 4)
738# define RADEON_GMC_BRUSH_SOLID_COLOR (13 << 4)
739# define RADEON_GMC_BRUSH_NONE (15 << 4)
740# define RADEON_GMC_DST_8BPP_CI (2 << 8)
741# define RADEON_GMC_DST_15BPP (3 << 8)
742# define RADEON_GMC_DST_16BPP (4 << 8)
743# define RADEON_GMC_DST_24BPP (5 << 8)
744# define RADEON_GMC_DST_32BPP (6 << 8)
745# define RADEON_GMC_DST_8BPP_RGB (7 << 8)
746# define RADEON_GMC_DST_Y8 (8 << 8)
747# define RADEON_GMC_DST_RGB8 (9 << 8)
748# define RADEON_GMC_DST_VYUY (11 << 8)
749# define RADEON_GMC_DST_YVYU (12 << 8)
750# define RADEON_GMC_DST_AYUV444 (14 << 8)
751# define RADEON_GMC_DST_ARGB4444 (15 << 8)
752# define RADEON_GMC_DST_DATATYPE_MASK (0x0f << 8)
753# define RADEON_GMC_DST_DATATYPE_SHIFT 8
754# define RADEON_GMC_SRC_DATATYPE_MASK (3 << 12)
755# define RADEON_GMC_SRC_DATATYPE_MONO_FG_BG (0 << 12)
756# define RADEON_GMC_SRC_DATATYPE_MONO_FG_LA (1 << 12)
757# define RADEON_GMC_SRC_DATATYPE_COLOR (3 << 12)
758# define RADEON_GMC_BYTE_PIX_ORDER (1 << 14)
759# define RADEON_GMC_BYTE_MSB_TO_LSB (0 << 14)
760# define RADEON_GMC_BYTE_LSB_TO_MSB (1 << 14)
761# define RADEON_GMC_CONVERSION_TEMP (1 << 15)
762# define RADEON_GMC_CONVERSION_TEMP_6500 (0 << 15)
763# define RADEON_GMC_CONVERSION_TEMP_9300 (1 << 15)
764# define RADEON_GMC_ROP3_MASK (0xff << 16)
765# define RADEON_DP_SRC_SOURCE_MASK (7 << 24)
766# define RADEON_DP_SRC_SOURCE_MEMORY (2 << 24)
767# define RADEON_DP_SRC_SOURCE_HOST_DATA (3 << 24)
768# define RADEON_GMC_3D_FCN_EN (1 << 27)
769# define RADEON_GMC_CLR_CMP_CNTL_DIS (1 << 28)
770# define RADEON_GMC_AUX_CLIP_DIS (1 << 29)
771# define RADEON_GMC_WR_MSK_DIS (1 << 30)
772# define RADEON_GMC_LD_BRUSH_Y_X (1 << 31)
773# define RADEON_ROP3_ZERO 0x00000000
774# define RADEON_ROP3_DSa 0x00880000
775# define RADEON_ROP3_SDna 0x00440000
776# define RADEON_ROP3_S 0x00cc0000
777# define RADEON_ROP3_DSna 0x00220000
778# define RADEON_ROP3_D 0x00aa0000
779# define RADEON_ROP3_DSx 0x00660000
780# define RADEON_ROP3_DSo 0x00ee0000
781# define RADEON_ROP3_DSon 0x00110000
782# define RADEON_ROP3_DSxn 0x00990000
783# define RADEON_ROP3_Dn 0x00550000
784# define RADEON_ROP3_SDno 0x00dd0000
785# define RADEON_ROP3_Sn 0x00330000
786# define RADEON_ROP3_DSno 0x00bb0000
787# define RADEON_ROP3_DSan 0x00770000
788# define RADEON_ROP3_ONE 0x00ff0000
789# define RADEON_ROP3_DPa 0x00a00000
790# define RADEON_ROP3_PDna 0x00500000
791# define RADEON_ROP3_P 0x00f00000
792# define RADEON_ROP3_DPna 0x000a0000
793# define RADEON_ROP3_D 0x00aa0000
794# define RADEON_ROP3_DPx 0x005a0000
795# define RADEON_ROP3_DPo 0x00fa0000
796# define RADEON_ROP3_DPon 0x00050000
797# define RADEON_ROP3_PDxn 0x00a50000
798# define RADEON_ROP3_PDno 0x00f50000
799# define RADEON_ROP3_Pn 0x000f0000
800# define RADEON_ROP3_DPno 0x00af0000
801# define RADEON_ROP3_DPan 0x005f0000
802#define RADEON_DP_GUI_MASTER_CNTL_C 0x1c84
803#define RADEON_DP_MIX 0x16c8
804#define RADEON_DP_SRC_BKGD_CLR 0x15dc
805#define RADEON_DP_SRC_FRGD_CLR 0x15d8
806#define RADEON_DP_WRITE_MASK 0x16cc
807#define RADEON_DST_BRES_DEC 0x1630
808#define RADEON_DST_BRES_ERR 0x1628
809#define RADEON_DST_BRES_INC 0x162c
810#define RADEON_DST_BRES_LNTH 0x1634
811#define RADEON_DST_BRES_LNTH_SUB 0x1638
812#define RADEON_DST_HEIGHT 0x1410
813#define RADEON_DST_HEIGHT_WIDTH 0x143c
814#define RADEON_DST_HEIGHT_WIDTH_8 0x158c
815#define RADEON_DST_HEIGHT_WIDTH_BW 0x15b4
816#define RADEON_DST_HEIGHT_Y 0x15a0
817#define RADEON_DST_LINE_START 0x1600
818#define RADEON_DST_LINE_END 0x1604
819#define RADEON_DST_LINE_PATCOUNT 0x1608
820# define RADEON_BRES_CNTL_SHIFT 8
821#define RADEON_DST_OFFSET 0x1404
822#define RADEON_DST_PITCH 0x1408
823#define RADEON_DST_PITCH_OFFSET 0x142c
824#define RADEON_DST_PITCH_OFFSET_C 0x1c80
825# define RADEON_PITCH_SHIFT 21
826# define RADEON_DST_TILE_LINEAR (0 << 30)
827# define RADEON_DST_TILE_MACRO (1 << 30)
828# define RADEON_DST_TILE_MICRO (2 << 30)
829# define RADEON_DST_TILE_BOTH (3 << 30)
830#define RADEON_DST_WIDTH 0x140c
831#define RADEON_DST_WIDTH_HEIGHT 0x1598
832#define RADEON_DST_WIDTH_X 0x1588
833#define RADEON_DST_WIDTH_X_INCY 0x159c
834#define RADEON_DST_X 0x141c
835#define RADEON_DST_X_SUB 0x15a4
836#define RADEON_DST_X_Y 0x1594
837#define RADEON_DST_Y 0x1420
838#define RADEON_DST_Y_SUB 0x15a8
839#define RADEON_DST_Y_X 0x1438
840
841#define RADEON_FCP_CNTL 0x0910
842# define RADEON_FCP0_SRC_PCICLK 0
843# define RADEON_FCP0_SRC_PCLK 1
844# define RADEON_FCP0_SRC_PCLKb 2
845# define RADEON_FCP0_SRC_HREF 3
846# define RADEON_FCP0_SRC_GND 4
847# define RADEON_FCP0_SRC_HREFb 5
848#define RADEON_FLUSH_1 0x1704
849#define RADEON_FLUSH_2 0x1708
850#define RADEON_FLUSH_3 0x170c
851#define RADEON_FLUSH_4 0x1710
852#define RADEON_FLUSH_5 0x1714
853#define RADEON_FLUSH_6 0x1718
854#define RADEON_FLUSH_7 0x171c
855#define RADEON_FOG_3D_TABLE_START 0x1810
856#define RADEON_FOG_3D_TABLE_END 0x1814
857#define RADEON_FOG_3D_TABLE_DENSITY 0x181c
858#define RADEON_FOG_TABLE_INDEX 0x1a14
859#define RADEON_FOG_TABLE_DATA 0x1a18
860#define RADEON_FP_CRTC_H_TOTAL_DISP 0x0250
861#define RADEON_FP_CRTC_V_TOTAL_DISP 0x0254
862# define RADEON_FP_CRTC_H_TOTAL_MASK 0x000003ff
863# define RADEON_FP_CRTC_H_DISP_MASK 0x01ff0000
864# define RADEON_FP_CRTC_V_TOTAL_MASK 0x00000fff
865# define RADEON_FP_CRTC_V_DISP_MASK 0x0fff0000
866# define RADEON_FP_H_SYNC_STRT_CHAR_MASK 0x00001ff8
867# define RADEON_FP_H_SYNC_WID_MASK 0x003f0000
868# define RADEON_FP_V_SYNC_STRT_MASK 0x00000fff
869# define RADEON_FP_V_SYNC_WID_MASK 0x001f0000
870# define RADEON_FP_CRTC_H_TOTAL_SHIFT 0x00000000
871# define RADEON_FP_CRTC_H_DISP_SHIFT 0x00000010
872# define RADEON_FP_CRTC_V_TOTAL_SHIFT 0x00000000
873# define RADEON_FP_CRTC_V_DISP_SHIFT 0x00000010
874# define RADEON_FP_H_SYNC_STRT_CHAR_SHIFT 0x00000003
875# define RADEON_FP_H_SYNC_WID_SHIFT 0x00000010
876# define RADEON_FP_V_SYNC_STRT_SHIFT 0x00000000
877# define RADEON_FP_V_SYNC_WID_SHIFT 0x00000010
878#define RADEON_FP_GEN_CNTL 0x0284
879# define RADEON_FP_FPON (1 << 0)
880# define RADEON_FP_BLANK_EN (1 << 1)
881# define RADEON_FP_TMDS_EN (1 << 2)
882# define RADEON_FP_PANEL_FORMAT (1 << 3)
883# define RADEON_FP_EN_TMDS (1 << 7)
884# define RADEON_FP_DETECT_SENSE (1 << 8)
885# define R200_FP_SOURCE_SEL_MASK (3 << 10)
886# define R200_FP_SOURCE_SEL_CRTC1 (0 << 10)
887# define R200_FP_SOURCE_SEL_CRTC2 (1 << 10)
888# define R200_FP_SOURCE_SEL_RMX (2 << 10)
889# define R200_FP_SOURCE_SEL_TRANS (3 << 10)
890# define RADEON_FP_SEL_CRTC1 (0 << 13)
891# define RADEON_FP_SEL_CRTC2 (1 << 13)
892# define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
893# define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
894# define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
895# define RADEON_FP_CRTC_USE_SHADOW_VEND (1 << 18)
896# define RADEON_FP_RMX_HVSYNC_CONTROL_EN (1 << 20)
897# define RADEON_FP_DFP_SYNC_SEL (1 << 21)
898# define RADEON_FP_CRTC_LOCK_8DOT (1 << 22)
899# define RADEON_FP_CRT_SYNC_SEL (1 << 23)
900# define RADEON_FP_USE_SHADOW_EN (1 << 24)
901# define RADEON_FP_CRT_SYNC_ALT (1 << 26)
902#define RADEON_FP2_GEN_CNTL 0x0288
903# define RADEON_FP2_BLANK_EN (1 << 1)
904# define RADEON_FP2_ON (1 << 2)
905# define RADEON_FP2_PANEL_FORMAT (1 << 3)
906# define RADEON_FP2_DETECT_SENSE (1 << 8)
907# define R200_FP2_SOURCE_SEL_MASK (3 << 10)
908# define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10)
909# define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10)
910# define R200_FP2_SOURCE_SEL_RMX (2 << 10)
911# define R200_FP2_SOURCE_SEL_TRANS_UNIT (3 << 10)
912# define RADEON_FP2_SRC_SEL_MASK (3 << 13)
913# define RADEON_FP2_SRC_SEL_CRTC2 (1 << 13)
914# define RADEON_FP2_FP_POL (1 << 16)
915# define RADEON_FP2_LP_POL (1 << 17)
916# define RADEON_FP2_SCK_POL (1 << 18)
917# define RADEON_FP2_LCD_CNTL_MASK (7 << 19)
918# define RADEON_FP2_PAD_FLOP_EN (1 << 22)
919# define RADEON_FP2_CRC_EN (1 << 23)
920# define RADEON_FP2_CRC_READ_EN (1 << 24)
921# define RADEON_FP2_DVO_EN (1 << 25)
922# define RADEON_FP2_DVO_RATE_SEL_SDR (1 << 26)
923# define R200_FP2_DVO_RATE_SEL_SDR (1 << 27)
924# define R300_FP2_DVO_CLOCK_MODE_SINGLE (1 << 28)
925# define R300_FP2_DVO_DUAL_CHANNEL_EN (1 << 29)
926#define RADEON_FP_H_SYNC_STRT_WID 0x02c4
927#define RADEON_FP_H2_SYNC_STRT_WID 0x03c4
928#define RADEON_FP_HORZ_STRETCH 0x028c
929#define RADEON_FP_HORZ2_STRETCH 0x038c
930# define RADEON_HORZ_STRETCH_RATIO_MASK 0xffff
931# define RADEON_HORZ_STRETCH_RATIO_MAX 4096
932# define RADEON_HORZ_PANEL_SIZE (0x1ff << 16)
933# define RADEON_HORZ_PANEL_SHIFT 16
934# define RADEON_HORZ_STRETCH_PIXREP (0 << 25)
935# define RADEON_HORZ_STRETCH_BLEND (1 << 26)
936# define RADEON_HORZ_STRETCH_ENABLE (1 << 25)
937# define RADEON_HORZ_AUTO_RATIO (1 << 27)
938# define RADEON_HORZ_FP_LOOP_STRETCH (0x7 << 28)
939# define RADEON_HORZ_AUTO_RATIO_INC (1 << 31)
940#define RADEON_FP_HORZ_VERT_ACTIVE 0x0278
941#define RADEON_FP_V_SYNC_STRT_WID 0x02c8
942#define RADEON_FP_VERT_STRETCH 0x0290
943#define RADEON_FP_V2_SYNC_STRT_WID 0x03c8
944#define RADEON_FP_VERT2_STRETCH 0x0390
945# define RADEON_VERT_PANEL_SIZE (0xfff << 12)
946# define RADEON_VERT_PANEL_SHIFT 12
947# define RADEON_VERT_STRETCH_RATIO_MASK 0xfff
948# define RADEON_VERT_STRETCH_RATIO_SHIFT 0
949# define RADEON_VERT_STRETCH_RATIO_MAX 4096
950# define RADEON_VERT_STRETCH_ENABLE (1 << 25)
951# define RADEON_VERT_STRETCH_LINEREP (0 << 26)
952# define RADEON_VERT_STRETCH_BLEND (1 << 26)
953# define RADEON_VERT_AUTO_RATIO_EN (1 << 27)
954# define RADEON_VERT_AUTO_RATIO_INC (1 << 31)
955# define RADEON_VERT_STRETCH_RESERVED 0x71000000
956#define RS400_FP_2ND_GEN_CNTL 0x0384
957# define RS400_FP_2ND_ON (1 << 0)
958# define RS400_FP_2ND_BLANK_EN (1 << 1)
959# define RS400_TMDS_2ND_EN (1 << 2)
960# define RS400_PANEL_FORMAT_2ND (1 << 3)
961# define RS400_FP_2ND_EN_TMDS (1 << 7)
962# define RS400_FP_2ND_DETECT_SENSE (1 << 8)
963# define RS400_FP_2ND_SOURCE_SEL_MASK (3 << 10)
964# define RS400_FP_2ND_SOURCE_SEL_CRTC1 (0 << 10)
965# define RS400_FP_2ND_SOURCE_SEL_CRTC2 (1 << 10)
966# define RS400_FP_2ND_SOURCE_SEL_RMX (2 << 10)
967# define RS400_FP_2ND_DETECT_EN (1 << 12)
968# define RS400_HPD_2ND_SEL (1 << 13)
969#define RS400_FP2_2_GEN_CNTL 0x0388
970# define RS400_FP2_2_BLANK_EN (1 << 1)
971# define RS400_FP2_2_ON (1 << 2)
972# define RS400_FP2_2_PANEL_FORMAT (1 << 3)
973# define RS400_FP2_2_DETECT_SENSE (1 << 8)
974# define RS400_FP2_2_SOURCE_SEL_MASK (3 << 10)
975# define RS400_FP2_2_SOURCE_SEL_CRTC1 (0 << 10)
976# define RS400_FP2_2_SOURCE_SEL_CRTC2 (1 << 10)
977# define RS400_FP2_2_SOURCE_SEL_RMX (2 << 10)
978# define RS400_FP2_2_DVO2_EN (1 << 25)
979#define RS400_TMDS2_CNTL 0x0394
980#define RS400_TMDS2_TRANSMITTER_CNTL 0x03a4
981# define RS400_TMDS2_PLLEN (1 << 0)
982# define RS400_TMDS2_PLLRST (1 << 1)
983
984#define RADEON_GEN_INT_CNTL 0x0040
985# define RADEON_SW_INT_ENABLE (1 << 25)
986#define RADEON_GEN_INT_STATUS 0x0044
987# define RADEON_VSYNC_INT_AK (1 << 2)
988# define RADEON_VSYNC_INT (1 << 2)
989# define RADEON_VSYNC2_INT_AK (1 << 6)
990# define RADEON_VSYNC2_INT (1 << 6)
991# define RADEON_SW_INT_FIRE (1 << 26)
992# define RADEON_SW_INT_TEST (1 << 25)
993# define RADEON_SW_INT_TEST_ACK (1 << 25)
994#define RADEON_GENENB 0x03c3 /* VGA */
995#define RADEON_GENFC_RD 0x03ca /* VGA */
996#define RADEON_GENFC_WT 0x03da /* VGA, 0x03ba */
997#define RADEON_GENMO_RD 0x03cc /* VGA */
998#define RADEON_GENMO_WT 0x03c2 /* VGA */
999#define RADEON_GENS0 0x03c2 /* VGA */
1000#define RADEON_GENS1 0x03da /* VGA, 0x03ba */
1001#define RADEON_GPIO_MONID 0x0068 /* DDC interface via I2C */ /* DDC3 */
1002#define RADEON_GPIO_MONIDB 0x006c
1003#define RADEON_GPIO_CRT2_DDC 0x006c
1004#define RADEON_GPIO_DVI_DDC 0x0064 /* DDC2 */
1005#define RADEON_GPIO_VGA_DDC 0x0060 /* DDC1 */
1006# define RADEON_GPIO_A_0 (1 << 0)
1007# define RADEON_GPIO_A_1 (1 << 1)
1008# define RADEON_GPIO_Y_0 (1 << 8)
1009# define RADEON_GPIO_Y_1 (1 << 9)
1010# define RADEON_GPIO_Y_SHIFT_0 8
1011# define RADEON_GPIO_Y_SHIFT_1 9
1012# define RADEON_GPIO_EN_0 (1 << 16)
1013# define RADEON_GPIO_EN_1 (1 << 17)
1014# define RADEON_GPIO_MASK_0 (1 << 24) /*??*/
1015# define RADEON_GPIO_MASK_1 (1 << 25) /*??*/
1016#define RADEON_GRPH8_DATA 0x03cf /* VGA */
1017#define RADEON_GRPH8_IDX 0x03ce /* VGA */
1018#define RADEON_GUI_SCRATCH_REG0 0x15e0
1019#define RADEON_GUI_SCRATCH_REG1 0x15e4
1020#define RADEON_GUI_SCRATCH_REG2 0x15e8
1021#define RADEON_GUI_SCRATCH_REG3 0x15ec
1022#define RADEON_GUI_SCRATCH_REG4 0x15f0
1023#define RADEON_GUI_SCRATCH_REG5 0x15f4
1024
1025#define RADEON_HEADER 0x0f0e /* PCI */
1026#define RADEON_HOST_DATA0 0x17c0
1027#define RADEON_HOST_DATA1 0x17c4
1028#define RADEON_HOST_DATA2 0x17c8
1029#define RADEON_HOST_DATA3 0x17cc
1030#define RADEON_HOST_DATA4 0x17d0
1031#define RADEON_HOST_DATA5 0x17d4
1032#define RADEON_HOST_DATA6 0x17d8
1033#define RADEON_HOST_DATA7 0x17dc
1034#define RADEON_HOST_DATA_LAST 0x17e0
1035#define RADEON_HOST_PATH_CNTL 0x0130
1036# define RADEON_HP_LIN_RD_CACHE_DIS (1 << 24)
1037# define RADEON_HDP_READ_BUFFER_INVALIDATE (1 << 27)
1038# define RADEON_HDP_SOFT_RESET (1 << 26)
1039# define RADEON_HDP_APER_CNTL (1 << 23)
1040#define RADEON_HTOTAL_CNTL 0x0009 /* PLL */
1041# define RADEON_HTOT_CNTL_VGA_EN (1 << 28)
1042#define RADEON_HTOTAL2_CNTL 0x002e /* PLL */
1043
1044 /* Multimedia I2C bus */
1045#define RADEON_I2C_CNTL_0 0x0090
1046#define RADEON_I2C_DONE (1<<0)
1047#define RADEON_I2C_NACK (1<<1)
1048#define RADEON_I2C_HALT (1<<2)
1049#define RADEON_I2C_SOFT_RST (1<<5)
1050#define RADEON_I2C_DRIVE_EN (1<<6)
1051#define RADEON_I2C_DRIVE_SEL (1<<7)
1052#define RADEON_I2C_START (1<<8)
1053#define RADEON_I2C_STOP (1<<9)
1054#define RADEON_I2C_RECEIVE (1<<10)
1055#define RADEON_I2C_ABORT (1<<11)
1056#define RADEON_I2C_GO (1<<12)
1057#define RADEON_I2C_CNTL_1 0x0094
1058#define RADEON_I2C_SEL (1<<16)
1059#define RADEON_I2C_EN (1<<17)
1060#define RADEON_I2C_DATA 0x0098
1061
1062#define RADEON_DVI_I2C_CNTL_0 0x02e0
1063# define R200_DVI_I2C_PIN_SEL(x) ((x) << 3)
1064# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */
1065# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */
1066# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */
1067#define RADEON_DVI_I2C_CNTL_1 0x02e4 /* ? */
1068#define RADEON_DVI_I2C_DATA 0x02e8
1069
1070#define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */
1071#define RADEON_INTERRUPT_PIN 0x0f3d /* PCI */
1072#define RADEON_IO_BASE 0x0f14 /* PCI */
1073
1074#define RADEON_LATENCY 0x0f0d /* PCI */
1075#define RADEON_LEAD_BRES_DEC 0x1608
1076#define RADEON_LEAD_BRES_LNTH 0x161c
1077#define RADEON_LEAD_BRES_LNTH_SUB 0x1624
1078#define RADEON_LVDS_GEN_CNTL 0x02d0
1079# define RADEON_LVDS_ON (1 << 0)
1080# define RADEON_LVDS_DISPLAY_DIS (1 << 1)
1081# define RADEON_LVDS_PANEL_TYPE (1 << 2)
1082# define RADEON_LVDS_PANEL_FORMAT (1 << 3)
1083# define RADEON_LVDS_NO_FM (0 << 4)
1084# define RADEON_LVDS_2_GREY (1 << 4)
1085# define RADEON_LVDS_4_GREY (2 << 4)
1086# define RADEON_LVDS_RST_FM (1 << 6)
1087# define RADEON_LVDS_EN (1 << 7)
1088# define RADEON_LVDS_BL_MOD_LEVEL_SHIFT 8
1089# define RADEON_LVDS_BL_MOD_LEVEL_MASK (0xff << 8)
1090# define RADEON_LVDS_BL_MOD_EN (1 << 16)
1091# define RADEON_LVDS_BL_CLK_SEL (1 << 17)
1092# define RADEON_LVDS_DIGON (1 << 18)
1093# define RADEON_LVDS_BLON (1 << 19)
1094# define RADEON_LVDS_FP_POL_LOW (1 << 20)
1095# define RADEON_LVDS_LP_POL_LOW (1 << 21)
1096# define RADEON_LVDS_DTM_POL_LOW (1 << 22)
1097# define RADEON_LVDS_SEL_CRTC2 (1 << 23)
1098# define RADEON_LVDS_FPDI_EN (1 << 27)
1099# define RADEON_LVDS_HSYNC_DELAY_SHIFT 28
1100#define RADEON_LVDS_PLL_CNTL 0x02d4
1101# define RADEON_HSYNC_DELAY_SHIFT 28
1102# define RADEON_HSYNC_DELAY_MASK (0xf << 28)
1103# define RADEON_LVDS_PLL_EN (1 << 16)
1104# define RADEON_LVDS_PLL_RESET (1 << 17)
1105# define R300_LVDS_SRC_SEL_MASK (3 << 18)
1106# define R300_LVDS_SRC_SEL_CRTC1 (0 << 18)
1107# define R300_LVDS_SRC_SEL_CRTC2 (1 << 18)
1108# define R300_LVDS_SRC_SEL_RMX (2 << 18)
1109#define RADEON_LVDS_SS_GEN_CNTL 0x02ec
1110# define RADEON_LVDS_PWRSEQ_DELAY1_SHIFT 16
1111# define RADEON_LVDS_PWRSEQ_DELAY2_SHIFT 20
1112
1113#define RADEON_MAX_LATENCY 0x0f3f /* PCI */
1114#define RADEON_DISPLAY_BASE_ADDR 0x23c
1115#define RADEON_DISPLAY2_BASE_ADDR 0x33c
1116#define RADEON_OV0_BASE_ADDR 0x43c
1117#define RADEON_NB_TOM 0x15c
1118#define R300_MC_INIT_MISC_LAT_TIMER 0x180
1119# define R300_MC_DISP0R_INIT_LAT_SHIFT 8
1120# define R300_MC_DISP0R_INIT_LAT_MASK 0xf
1121# define R300_MC_DISP1R_INIT_LAT_SHIFT 12
1122# define R300_MC_DISP1R_INIT_LAT_MASK 0xf
1123#define RADEON_MCLK_CNTL 0x0012 /* PLL */
1124# define RADEON_MCLKA_SRC_SEL_MASK 0x7
1125# define RADEON_FORCEON_MCLKA (1 << 16)
1126# define RADEON_FORCEON_MCLKB (1 << 17)
1127# define RADEON_FORCEON_YCLKA (1 << 18)
1128# define RADEON_FORCEON_YCLKB (1 << 19)
1129# define RADEON_FORCEON_MC (1 << 20)
1130# define RADEON_FORCEON_AIC (1 << 21)
1131# define R300_DISABLE_MC_MCLKA (1 << 21)
1132# define R300_DISABLE_MC_MCLKB (1 << 21)
1133#define RADEON_MCLK_MISC 0x001f /* PLL */
1134# define RADEON_MC_MCLK_MAX_DYN_STOP_LAT (1 << 12)
1135# define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
1136# define RADEON_MC_MCLK_DYN_ENABLE (1 << 14)
1137# define RADEON_IO_MCLK_DYN_ENABLE (1 << 15)
1138#define RADEON_LCD_GPIO_MASK 0x01a0
1139#define RADEON_GPIOPAD_EN 0x01a0
1140#define RADEON_LCD_GPIO_Y_REG 0x01a4
1141#define RADEON_MDGPIO_A_REG 0x01ac
1142#define RADEON_MDGPIO_EN_REG 0x01b0
1143#define RADEON_MDGPIO_MASK 0x0198
1144#define RADEON_GPIOPAD_MASK 0x0198
1145#define RADEON_GPIOPAD_A 0x019c
1146#define RADEON_MDGPIO_Y_REG 0x01b4
1147#define RADEON_MEM_ADDR_CONFIG 0x0148
1148#define RADEON_MEM_BASE 0x0f10 /* PCI */
1149#define RADEON_MEM_CNTL 0x0140
1150# define RADEON_MEM_NUM_CHANNELS_MASK 0x01
1151# define RADEON_MEM_USE_B_CH_ONLY (1 << 1)
1152# define RV100_HALF_MODE (1 << 3)
1153# define R300_MEM_NUM_CHANNELS_MASK 0x03
1154# define R300_MEM_USE_CD_CH_ONLY (1 << 2)
1155#define RADEON_MEM_TIMING_CNTL 0x0144 /* EXT_MEM_CNTL */
1156#define RADEON_MEM_INIT_LAT_TIMER 0x0154
1157#define RADEON_MEM_INTF_CNTL 0x014c
1158#define RADEON_MEM_SDRAM_MODE_REG 0x0158
1159# define RADEON_SDRAM_MODE_MASK 0xffff0000
1160# define RADEON_B3MEM_RESET_MASK 0x6fffffff
1161# define RADEON_MEM_CFG_TYPE_DDR (1 << 30)
1162#define RADEON_MEM_STR_CNTL 0x0150
1163# define RADEON_MEM_PWRUP_COMPL_A (1 << 0)
1164# define RADEON_MEM_PWRUP_COMPL_B (1 << 1)
1165# define R300_MEM_PWRUP_COMPL_C (1 << 2)
1166# define R300_MEM_PWRUP_COMPL_D (1 << 3)
1167# define RADEON_MEM_PWRUP_COMPLETE 0x03
1168# define R300_MEM_PWRUP_COMPLETE 0x0f
1169#define RADEON_MC_STATUS 0x0150
1170# define RADEON_MC_IDLE (1 << 2)
1171# define R300_MC_IDLE (1 << 4)
1172#define RADEON_MEM_VGA_RP_SEL 0x003c
1173#define RADEON_MEM_VGA_WP_SEL 0x0038
1174#define RADEON_MIN_GRANT 0x0f3e /* PCI */
1175#define RADEON_MM_DATA 0x0004
1176#define RADEON_MM_INDEX 0x0000
1177# define RADEON_MM_APER (1 << 31)
1178#define RADEON_MPLL_CNTL 0x000e /* PLL */
1179#define RADEON_MPP_TB_CONFIG 0x01c0 /* ? */
1180#define RADEON_MPP_GP_CONFIG 0x01c8 /* ? */
1181#define RADEON_SEPROM_CNTL1 0x01c0
1182# define RADEON_SCK_PRESCALE_SHIFT 24
1183# define RADEON_SCK_PRESCALE_MASK (0xff << 24)
1184#define R300_MC_IND_INDEX 0x01f8
1185# define R300_MC_IND_ADDR_MASK 0x3f
1186# define R300_MC_IND_WR_EN (1 << 8)
1187#define R300_MC_IND_DATA 0x01fc
1188#define R300_MC_READ_CNTL_AB 0x017c
1189# define R300_MEM_RBS_POSITION_A_MASK 0x03
1190#define R300_MC_READ_CNTL_CD_mcind 0x24
1191# define R300_MEM_RBS_POSITION_C_MASK 0x03
1192
1193#define RADEON_N_VIF_COUNT 0x0248
1194
1195#define RADEON_OV0_AUTO_FLIP_CNTL 0x0470
1196# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_NUM 0x00000007
1197# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_REPEAT_FIELD 0x00000008
1198# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_BUF_ODD 0x00000010
1199# define RADEON_OV0_AUTO_FLIP_CNTL_IGNORE_REPEAT_FIELD 0x00000020
1200# define RADEON_OV0_AUTO_FLIP_CNTL_SOFT_EOF_TOGGLE 0x00000040
1201# define RADEON_OV0_AUTO_FLIP_CNTL_VID_PORT_SELECT 0x00000300
1202# define RADEON_OV0_AUTO_FLIP_CNTL_P1_FIRST_LINE_EVEN 0x00010000
1203# define RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_EVEN_DOWN 0x00040000
1204# define RADEON_OV0_AUTO_FLIP_CNTL_SHIFT_ODD_DOWN 0x00080000
1205# define RADEON_OV0_AUTO_FLIP_CNTL_FIELD_POL_SOURCE 0x00800000
1206
1207#define RADEON_OV0_COLOUR_CNTL 0x04E0
1208#define RADEON_OV0_DEINTERLACE_PATTERN 0x0474
1209#define RADEON_OV0_EXCLUSIVE_HORZ 0x0408
1210# define RADEON_EXCL_HORZ_START_MASK 0x000000ff
1211# define RADEON_EXCL_HORZ_END_MASK 0x0000ff00
1212# define RADEON_EXCL_HORZ_BACK_PORCH_MASK 0x00ff0000
1213# define RADEON_EXCL_HORZ_EXCLUSIVE_EN 0x80000000
1214#define RADEON_OV0_EXCLUSIVE_VERT 0x040C
1215# define RADEON_EXCL_VERT_START_MASK 0x000003ff
1216# define RADEON_EXCL_VERT_END_MASK 0x03ff0000
1217#define RADEON_OV0_FILTER_CNTL 0x04A0
1218# define RADEON_FILTER_PROGRAMMABLE_COEF 0x0
1219# define RADEON_FILTER_HC_COEF_HORZ_Y 0x1
1220# define RADEON_FILTER_HC_COEF_HORZ_UV 0x2
1221# define RADEON_FILTER_HC_COEF_VERT_Y 0x4
1222# define RADEON_FILTER_HC_COEF_VERT_UV 0x8
1223# define RADEON_FILTER_HARDCODED_COEF 0xf
1224# define RADEON_FILTER_COEF_MASK 0xf
1225
1226#define RADEON_OV0_FOUR_TAP_COEF_0 0x04B0
1227#define RADEON_OV0_FOUR_TAP_COEF_1 0x04B4
1228#define RADEON_OV0_FOUR_TAP_COEF_2 0x04B8
1229#define RADEON_OV0_FOUR_TAP_COEF_3 0x04BC
1230#define RADEON_OV0_FOUR_TAP_COEF_4 0x04C0
1231#define RADEON_OV0_FLAG_CNTL 0x04DC
1232#define RADEON_OV0_GAMMA_000_00F 0x0d40
1233#define RADEON_OV0_GAMMA_010_01F 0x0d44
1234#define RADEON_OV0_GAMMA_020_03F 0x0d48
1235#define RADEON_OV0_GAMMA_040_07F 0x0d4c
1236#define RADEON_OV0_GAMMA_080_0BF 0x0e00
1237#define RADEON_OV0_GAMMA_0C0_0FF 0x0e04
1238#define RADEON_OV0_GAMMA_100_13F 0x0e08
1239#define RADEON_OV0_GAMMA_140_17F 0x0e0c
1240#define RADEON_OV0_GAMMA_180_1BF 0x0e10
1241#define RADEON_OV0_GAMMA_1C0_1FF 0x0e14
1242#define RADEON_OV0_GAMMA_200_23F 0x0e18
1243#define RADEON_OV0_GAMMA_240_27F 0x0e1c
1244#define RADEON_OV0_GAMMA_280_2BF 0x0e20
1245#define RADEON_OV0_GAMMA_2C0_2FF 0x0e24
1246#define RADEON_OV0_GAMMA_300_33F 0x0e28
1247#define RADEON_OV0_GAMMA_340_37F 0x0e2c
1248#define RADEON_OV0_GAMMA_380_3BF 0x0d50
1249#define RADEON_OV0_GAMMA_3C0_3FF 0x0d54
1250#define RADEON_OV0_GRAPHICS_KEY_CLR_LOW 0x04EC
1251#define RADEON_OV0_GRAPHICS_KEY_CLR_HIGH 0x04F0
1252#define RADEON_OV0_H_INC 0x0480
1253#define RADEON_OV0_KEY_CNTL 0x04F4
1254# define RADEON_VIDEO_KEY_FN_MASK 0x00000003L
1255# define RADEON_VIDEO_KEY_FN_FALSE 0x00000000L
1256# define RADEON_VIDEO_KEY_FN_TRUE 0x00000001L
1257# define RADEON_VIDEO_KEY_FN_EQ 0x00000002L
1258# define RADEON_VIDEO_KEY_FN_NE 0x00000003L
1259# define RADEON_GRAPHIC_KEY_FN_MASK 0x00000030L
1260# define RADEON_GRAPHIC_KEY_FN_FALSE 0x00000000L
1261# define RADEON_GRAPHIC_KEY_FN_TRUE 0x00000010L
1262# define RADEON_GRAPHIC_KEY_FN_EQ 0x00000020L
1263# define RADEON_GRAPHIC_KEY_FN_NE 0x00000030L
1264# define RADEON_CMP_MIX_MASK 0x00000100L
1265# define RADEON_CMP_MIX_OR 0x00000000L
1266# define RADEON_CMP_MIX_AND 0x00000100L
1267#define RADEON_OV0_LIN_TRANS_A 0x0d20
1268#define RADEON_OV0_LIN_TRANS_B 0x0d24
1269#define RADEON_OV0_LIN_TRANS_C 0x0d28
1270#define RADEON_OV0_LIN_TRANS_D 0x0d2c
1271#define RADEON_OV0_LIN_TRANS_E 0x0d30
1272#define RADEON_OV0_LIN_TRANS_F 0x0d34
1273#define RADEON_OV0_P1_BLANK_LINES_AT_TOP 0x0430
1274# define RADEON_P1_BLNK_LN_AT_TOP_M1_MASK 0x00000fffL
1275# define RADEON_P1_ACTIVE_LINES_M1 0x0fff0000L
1276#define RADEON_OV0_P1_H_ACCUM_INIT 0x0488
1277#define RADEON_OV0_P1_V_ACCUM_INIT 0x0428
1278# define RADEON_OV0_P1_MAX_LN_IN_PER_LN_OUT 0x00000003L
1279# define RADEON_OV0_P1_V_ACCUM_INIT_MASK 0x01ff8000L
1280#define RADEON_OV0_P1_X_START_END 0x0494
1281#define RADEON_OV0_P2_X_START_END 0x0498
1282#define RADEON_OV0_P23_BLANK_LINES_AT_TOP 0x0434
1283# define RADEON_P23_BLNK_LN_AT_TOP_M1_MASK 0x000007ffL
1284# define RADEON_P23_ACTIVE_LINES_M1 0x07ff0000L
1285#define RADEON_OV0_P23_H_ACCUM_INIT 0x048C
1286#define RADEON_OV0_P23_V_ACCUM_INIT 0x042C
1287#define RADEON_OV0_P3_X_START_END 0x049C
1288#define RADEON_OV0_REG_LOAD_CNTL 0x0410
1289# define RADEON_REG_LD_CTL_LOCK 0x00000001L
1290# define RADEON_REG_LD_CTL_VBLANK_DURING_LOCK 0x00000002L
1291# define RADEON_REG_LD_CTL_STALL_GUI_UNTIL_FLIP 0x00000004L
1292# define RADEON_REG_LD_CTL_LOCK_READBACK 0x00000008L
1293# define RADEON_REG_LD_CTL_FLIP_READBACK 0x00000010L
1294#define RADEON_OV0_SCALE_CNTL 0x0420
1295# define RADEON_SCALER_HORZ_PICK_NEAREST 0x00000004L
1296# define RADEON_SCALER_VERT_PICK_NEAREST 0x00000008L
1297# define RADEON_SCALER_SIGNED_UV 0x00000010L
1298# define RADEON_SCALER_GAMMA_SEL_MASK 0x00000060L
1299# define RADEON_SCALER_GAMMA_SEL_BRIGHT 0x00000000L
1300# define RADEON_SCALER_GAMMA_SEL_G22 0x00000020L
1301# define RADEON_SCALER_GAMMA_SEL_G18 0x00000040L
1302# define RADEON_SCALER_GAMMA_SEL_G14 0x00000060L
1303# define RADEON_SCALER_COMCORE_SHIFT_UP_ONE 0x00000080L
1304# define RADEON_SCALER_SURFAC_FORMAT 0x00000f00L
1305# define RADEON_SCALER_SOURCE_15BPP 0x00000300L
1306# define RADEON_SCALER_SOURCE_16BPP 0x00000400L
1307# define RADEON_SCALER_SOURCE_32BPP 0x00000600L
1308# define RADEON_SCALER_SOURCE_YUV9 0x00000900L
1309# define RADEON_SCALER_SOURCE_YUV12 0x00000A00L
1310# define RADEON_SCALER_SOURCE_VYUY422 0x00000B00L
1311# define RADEON_SCALER_SOURCE_YVYU422 0x00000C00L
1312# define RADEON_SCALER_ADAPTIVE_DEINT 0x00001000L
1313# define RADEON_SCALER_TEMPORAL_DEINT 0x00002000L
1314# define RADEON_SCALER_CRTC_SEL 0x00004000L
1315# define RADEON_SCALER_SMART_SWITCH 0x00008000L
1316# define RADEON_SCALER_BURST_PER_PLANE 0x007F0000L
1317# define RADEON_SCALER_DOUBLE_BUFFER 0x01000000L
1318# define RADEON_SCALER_DIS_LIMIT 0x08000000L
1319# define RADEON_SCALER_LIN_TRANS_BYPASS 0x10000000L
1320# define RADEON_SCALER_INT_EMU 0x20000000L
1321# define RADEON_SCALER_ENABLE 0x40000000L
1322# define RADEON_SCALER_SOFT_RESET 0x80000000L
1323#define RADEON_OV0_STEP_BY 0x0484
1324#define RADEON_OV0_TEST 0x04F8
1325#define RADEON_OV0_V_INC 0x0424
1326#define RADEON_OV0_VID_BUF_PITCH0_VALUE 0x0460
1327#define RADEON_OV0_VID_BUF_PITCH1_VALUE 0x0464
1328#define RADEON_OV0_VID_BUF0_BASE_ADRS 0x0440
1329# define RADEON_VIF_BUF0_PITCH_SEL 0x00000001L
1330# define RADEON_VIF_BUF0_TILE_ADRS 0x00000002L
1331# define RADEON_VIF_BUF0_BASE_ADRS_MASK 0x03fffff0L
1332# define RADEON_VIF_BUF0_1ST_LINE_LSBS_MASK 0x48000000L
1333#define RADEON_OV0_VID_BUF1_BASE_ADRS 0x0444
1334# define RADEON_VIF_BUF1_PITCH_SEL 0x00000001L
1335# define RADEON_VIF_BUF1_TILE_ADRS 0x00000002L
1336# define RADEON_VIF_BUF1_BASE_ADRS_MASK 0x03fffff0L
1337# define RADEON_VIF_BUF1_1ST_LINE_LSBS_MASK 0x48000000L
1338#define RADEON_OV0_VID_BUF2_BASE_ADRS 0x0448
1339# define RADEON_VIF_BUF2_PITCH_SEL 0x00000001L
1340# define RADEON_VIF_BUF2_TILE_ADRS 0x00000002L
1341# define RADEON_VIF_BUF2_BASE_ADRS_MASK 0x03fffff0L
1342# define RADEON_VIF_BUF2_1ST_LINE_LSBS_MASK 0x48000000L
1343#define RADEON_OV0_VID_BUF3_BASE_ADRS 0x044C
1344#define RADEON_OV0_VID_BUF4_BASE_ADRS 0x0450
1345#define RADEON_OV0_VID_BUF5_BASE_ADRS 0x0454
1346#define RADEON_OV0_VIDEO_KEY_CLR_HIGH 0x04E8
1347#define RADEON_OV0_VIDEO_KEY_CLR_LOW 0x04E4
1348#define RADEON_OV0_Y_X_START 0x0400
1349#define RADEON_OV0_Y_X_END 0x0404
1350#define RADEON_OV1_Y_X_START 0x0600
1351#define RADEON_OV1_Y_X_END 0x0604
1352#define RADEON_OVR_CLR 0x0230
1353#define RADEON_OVR_WID_LEFT_RIGHT 0x0234
1354#define RADEON_OVR_WID_TOP_BOTTOM 0x0238
1355
1356/* first capture unit */
1357
1358#define RADEON_CAP0_BUF0_OFFSET 0x0920
1359#define RADEON_CAP0_BUF1_OFFSET 0x0924
1360#define RADEON_CAP0_BUF0_EVEN_OFFSET 0x0928
1361#define RADEON_CAP0_BUF1_EVEN_OFFSET 0x092C
1362
1363#define RADEON_CAP0_BUF_PITCH 0x0930
1364#define RADEON_CAP0_V_WINDOW 0x0934
1365#define RADEON_CAP0_H_WINDOW 0x0938
1366#define RADEON_CAP0_VBI0_OFFSET 0x093C
1367#define RADEON_CAP0_VBI1_OFFSET 0x0940
1368#define RADEON_CAP0_VBI_V_WINDOW 0x0944
1369#define RADEON_CAP0_VBI_H_WINDOW 0x0948
1370#define RADEON_CAP0_PORT_MODE_CNTL 0x094C
1371#define RADEON_CAP0_TRIG_CNTL 0x0950
1372#define RADEON_CAP0_DEBUG 0x0954
1373#define RADEON_CAP0_CONFIG 0x0958
1374# define RADEON_CAP0_CONFIG_CONTINUOS 0x00000001
1375# define RADEON_CAP0_CONFIG_START_FIELD_EVEN 0x00000002
1376# define RADEON_CAP0_CONFIG_START_BUF_GET 0x00000004
1377# define RADEON_CAP0_CONFIG_START_BUF_SET 0x00000008
1378# define RADEON_CAP0_CONFIG_BUF_TYPE_ALT 0x00000010
1379# define RADEON_CAP0_CONFIG_BUF_TYPE_FRAME 0x00000020
1380# define RADEON_CAP0_CONFIG_ONESHOT_MODE_FRAME 0x00000040
1381# define RADEON_CAP0_CONFIG_BUF_MODE_DOUBLE 0x00000080
1382# define RADEON_CAP0_CONFIG_BUF_MODE_TRIPLE 0x00000100
1383# define RADEON_CAP0_CONFIG_MIRROR_EN 0x00000200
1384# define RADEON_CAP0_CONFIG_ONESHOT_MIRROR_EN 0x00000400
1385# define RADEON_CAP0_CONFIG_VIDEO_SIGNED_UV 0x00000800
1386# define RADEON_CAP0_CONFIG_ANC_DECODE_EN 0x00001000
1387# define RADEON_CAP0_CONFIG_VBI_EN 0x00002000
1388# define RADEON_CAP0_CONFIG_SOFT_PULL_DOWN_EN 0x00004000
1389# define RADEON_CAP0_CONFIG_VIP_EXTEND_FLAG_EN 0x00008000
1390# define RADEON_CAP0_CONFIG_FAKE_FIELD_EN 0x00010000
1391# define RADEON_CAP0_CONFIG_ODD_ONE_MORE_LINE 0x00020000
1392# define RADEON_CAP0_CONFIG_EVEN_ONE_MORE_LINE 0x00040000
1393# define RADEON_CAP0_CONFIG_HORZ_DIVIDE_2 0x00080000
1394# define RADEON_CAP0_CONFIG_HORZ_DIVIDE_4 0x00100000
1395# define RADEON_CAP0_CONFIG_VERT_DIVIDE_2 0x00200000
1396# define RADEON_CAP0_CONFIG_VERT_DIVIDE_4 0x00400000
1397# define RADEON_CAP0_CONFIG_FORMAT_BROOKTREE 0x00000000
1398# define RADEON_CAP0_CONFIG_FORMAT_CCIR656 0x00800000
1399# define RADEON_CAP0_CONFIG_FORMAT_ZV 0x01000000
1400# define RADEON_CAP0_CONFIG_FORMAT_VIP 0x01800000
1401# define RADEON_CAP0_CONFIG_FORMAT_TRANSPORT 0x02000000
1402# define RADEON_CAP0_CONFIG_HORZ_DECIMATOR 0x04000000
1403# define RADEON_CAP0_CONFIG_VIDEO_IN_YVYU422 0x00000000
1404# define RADEON_CAP0_CONFIG_VIDEO_IN_VYUY422 0x20000000
1405# define RADEON_CAP0_CONFIG_VBI_DIVIDE_2 0x40000000
1406# define RADEON_CAP0_CONFIG_VBI_DIVIDE_4 0x80000000
1407#define RADEON_CAP0_ANC_ODD_OFFSET 0x095C
1408#define RADEON_CAP0_ANC_EVEN_OFFSET 0x0960
1409#define RADEON_CAP0_ANC_H_WINDOW 0x0964
1410#define RADEON_CAP0_VIDEO_SYNC_TEST 0x0968
1411#define RADEON_CAP0_ONESHOT_BUF_OFFSET 0x096C
1412#define RADEON_CAP0_BUF_STATUS 0x0970
1413/* #define RADEON_CAP0_DWNSC_XRATIO 0x0978 */
1414/* #define RADEON_CAP0_XSHARPNESS 0x097C */
1415#define RADEON_CAP0_VBI2_OFFSET 0x0980
1416#define RADEON_CAP0_VBI3_OFFSET 0x0984
1417#define RADEON_CAP0_ANC2_OFFSET 0x0988
1418#define RADEON_CAP0_ANC3_OFFSET 0x098C
1419#define RADEON_VID_BUFFER_CONTROL 0x0900
1420
1421/* second capture unit */
1422
1423#define RADEON_CAP1_BUF0_OFFSET 0x0990
1424#define RADEON_CAP1_BUF1_OFFSET 0x0994
1425#define RADEON_CAP1_BUF0_EVEN_OFFSET 0x0998
1426#define RADEON_CAP1_BUF1_EVEN_OFFSET 0x099C
1427
1428#define RADEON_CAP1_BUF_PITCH 0x09A0
1429#define RADEON_CAP1_V_WINDOW 0x09A4
1430#define RADEON_CAP1_H_WINDOW 0x09A8
1431#define RADEON_CAP1_VBI_ODD_OFFSET 0x09AC
1432#define RADEON_CAP1_VBI_EVEN_OFFSET 0x09B0
1433#define RADEON_CAP1_VBI_V_WINDOW 0x09B4
1434#define RADEON_CAP1_VBI_H_WINDOW 0x09B8
1435#define RADEON_CAP1_PORT_MODE_CNTL 0x09BC
1436#define RADEON_CAP1_TRIG_CNTL 0x09C0
1437#define RADEON_CAP1_DEBUG 0x09C4
1438#define RADEON_CAP1_CONFIG 0x09C8
1439#define RADEON_CAP1_ANC_ODD_OFFSET 0x09CC
1440#define RADEON_CAP1_ANC_EVEN_OFFSET 0x09D0
1441#define RADEON_CAP1_ANC_H_WINDOW 0x09D4
1442#define RADEON_CAP1_VIDEO_SYNC_TEST 0x09D8
1443#define RADEON_CAP1_ONESHOT_BUF_OFFSET 0x09DC
1444#define RADEON_CAP1_BUF_STATUS 0x09E0
1445#define RADEON_CAP1_DWNSC_XRATIO 0x09E8
1446#define RADEON_CAP1_XSHARPNESS 0x09EC
1447
1448/* misc multimedia registers */
1449
1450#define RADEON_IDCT_RUNS 0x1F80
1451#define RADEON_IDCT_LEVELS 0x1F84
1452#define RADEON_IDCT_CONTROL 0x1FBC
1453#define RADEON_IDCT_AUTH_CONTROL 0x1F88
1454#define RADEON_IDCT_AUTH 0x1F8C
1455
1456#define RADEON_P2PLL_CNTL 0x002a /* P2PLL */
1457# define RADEON_P2PLL_RESET (1 << 0)
1458# define RADEON_P2PLL_SLEEP (1 << 1)
1459# define RADEON_P2PLL_PVG_MASK (7 << 11)
1460# define RADEON_P2PLL_PVG_SHIFT 11
1461# define RADEON_P2PLL_ATOMIC_UPDATE_EN (1 << 16)
1462# define RADEON_P2PLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
1463# define RADEON_P2PLL_ATOMIC_UPDATE_VSYNC (1 << 18)
1464#define RADEON_P2PLL_DIV_0 0x002c
1465# define RADEON_P2PLL_FB0_DIV_MASK 0x07ff
1466# define RADEON_P2PLL_POST0_DIV_MASK 0x00070000
1467#define RADEON_P2PLL_REF_DIV 0x002B /* PLL */
1468# define RADEON_P2PLL_REF_DIV_MASK 0x03ff
1469# define RADEON_P2PLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */
1470# define RADEON_P2PLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */
1471# define R300_PPLL_REF_DIV_ACC_MASK (0x3ff << 18)
1472# define R300_PPLL_REF_DIV_ACC_SHIFT 18
1473#define RADEON_PALETTE_DATA 0x00b4
1474#define RADEON_PALETTE_30_DATA 0x00b8
1475#define RADEON_PALETTE_INDEX 0x00b0
1476#define RADEON_PCI_GART_PAGE 0x017c
1477#define RADEON_PIXCLKS_CNTL 0x002d
1478# define RADEON_PIX2CLK_SRC_SEL_MASK 0x03
1479# define RADEON_PIX2CLK_SRC_SEL_CPUCLK 0x00
1480# define RADEON_PIX2CLK_SRC_SEL_PSCANCLK 0x01
1481# define RADEON_PIX2CLK_SRC_SEL_BYTECLK 0x02
1482# define RADEON_PIX2CLK_SRC_SEL_P2PLLCLK 0x03
1483# define RADEON_PIX2CLK_ALWAYS_ONb (1<<6)
1484# define RADEON_PIX2CLK_DAC_ALWAYS_ONb (1<<7)
1485# define RADEON_PIXCLK_TV_SRC_SEL (1 << 8)
1486# define RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb (1 << 9)
1487# define R300_DVOCLK_ALWAYS_ONb (1 << 10)
1488# define RADEON_PIXCLK_BLEND_ALWAYS_ONb (1 << 11)
1489# define RADEON_PIXCLK_GV_ALWAYS_ONb (1 << 12)
1490# define RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb (1 << 13)
1491# define R300_PIXCLK_DVO_ALWAYS_ONb (1 << 13)
1492# define RADEON_PIXCLK_LVDS_ALWAYS_ONb (1 << 14)
1493# define RADEON_PIXCLK_TMDS_ALWAYS_ONb (1 << 15)
1494# define R300_PIXCLK_TRANS_ALWAYS_ONb (1 << 16)
1495# define R300_PIXCLK_TVO_ALWAYS_ONb (1 << 17)
1496# define R300_P2G2CLK_ALWAYS_ONb (1 << 18)
1497# define R300_P2G2CLK_DAC_ALWAYS_ONb (1 << 19)
1498# define R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF (1 << 23)
1499#define RADEON_PLANE_3D_MASK_C 0x1d44
1500#define RADEON_PLL_TEST_CNTL 0x0013 /* PLL */
1501# define RADEON_PLL_MASK_READ_B (1 << 9)
1502#define RADEON_PMI_CAP_ID 0x0f5c /* PCI */
1503#define RADEON_PMI_DATA 0x0f63 /* PCI */
1504#define RADEON_PMI_NXT_CAP_PTR 0x0f5d /* PCI */
1505#define RADEON_PMI_PMC_REG 0x0f5e /* PCI */
1506#define RADEON_PMI_PMCSR_REG 0x0f60 /* PCI */
1507#define RADEON_PMI_REGISTER 0x0f5c /* PCI */
1508#define RADEON_PPLL_CNTL 0x0002 /* PLL */
1509# define RADEON_PPLL_RESET (1 << 0)
1510# define RADEON_PPLL_SLEEP (1 << 1)
1511# define RADEON_PPLL_PVG_MASK (7 << 11)
1512# define RADEON_PPLL_PVG_SHIFT 11
1513# define RADEON_PPLL_ATOMIC_UPDATE_EN (1 << 16)
1514# define RADEON_PPLL_VGA_ATOMIC_UPDATE_EN (1 << 17)
1515# define RADEON_PPLL_ATOMIC_UPDATE_VSYNC (1 << 18)
1516#define RADEON_PPLL_DIV_0 0x0004 /* PLL */
1517#define RADEON_PPLL_DIV_1 0x0005 /* PLL */
1518#define RADEON_PPLL_DIV_2 0x0006 /* PLL */
1519#define RADEON_PPLL_DIV_3 0x0007 /* PLL */
1520# define RADEON_PPLL_FB3_DIV_MASK 0x07ff
1521# define RADEON_PPLL_POST3_DIV_MASK 0x00070000
1522#define RADEON_PPLL_REF_DIV 0x0003 /* PLL */
1523# define RADEON_PPLL_REF_DIV_MASK 0x03ff
1524# define RADEON_PPLL_ATOMIC_UPDATE_R (1 << 15) /* same as _W */
1525# define RADEON_PPLL_ATOMIC_UPDATE_W (1 << 15) /* same as _R */
1526#define RADEON_PWR_MNGMT_CNTL_STATUS 0x0f60 /* PCI */
1527
1528#define RADEON_RBBM_GUICNTL 0x172c
1529# define RADEON_HOST_DATA_SWAP_NONE (0 << 0)
1530# define RADEON_HOST_DATA_SWAP_16BIT (1 << 0)
1531# define RADEON_HOST_DATA_SWAP_32BIT (2 << 0)
1532# define RADEON_HOST_DATA_SWAP_HDW (3 << 0)
1533#define RADEON_RBBM_SOFT_RESET 0x00f0
1534# define RADEON_SOFT_RESET_CP (1 << 0)
1535# define RADEON_SOFT_RESET_HI (1 << 1)
1536# define RADEON_SOFT_RESET_SE (1 << 2)
1537# define RADEON_SOFT_RESET_RE (1 << 3)
1538# define RADEON_SOFT_RESET_PP (1 << 4)
1539# define RADEON_SOFT_RESET_E2 (1 << 5)
1540# define RADEON_SOFT_RESET_RB (1 << 6)
1541# define RADEON_SOFT_RESET_HDP (1 << 7)
1542#define RADEON_RBBM_STATUS 0x0e40
1543# define RADEON_RBBM_FIFOCNT_MASK 0x007f
1544# define RADEON_RBBM_ACTIVE (1 << 31)
1545#define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c
1546# define RADEON_RB2D_DC_FLUSH (3 << 0)
1547# define RADEON_RB2D_DC_FREE (3 << 2)
1548# define RADEON_RB2D_DC_FLUSH_ALL 0xf
1549# define RADEON_RB2D_DC_BUSY (1 << 31)
1550#define RADEON_RB2D_DSTCACHE_MODE 0x3428
1551#define RADEON_DSTCACHE_CTLSTAT 0x1714
1552
1553#define RADEON_RB3D_ZCACHE_MODE 0x3250
1554#define RADEON_RB3D_ZCACHE_CTLSTAT 0x3254
1555# define RADEON_RB3D_ZC_FLUSH_ALL 0x5
1556#define RADEON_RB3D_DSTCACHE_MODE 0x3258
1557# define RADEON_RB3D_DC_CACHE_ENABLE (0)
1558# define RADEON_RB3D_DC_2D_CACHE_DISABLE (1)
1559# define RADEON_RB3D_DC_3D_CACHE_DISABLE (2)
1560# define RADEON_RB3D_DC_CACHE_DISABLE (3)
1561# define RADEON_RB3D_DC_2D_CACHE_LINESIZE_128 (1 << 2)
1562# define RADEON_RB3D_DC_3D_CACHE_LINESIZE_128 (2 << 2)
1563# define RADEON_RB3D_DC_2D_CACHE_AUTOFLUSH (1 << 8)
1564# define RADEON_RB3D_DC_3D_CACHE_AUTOFLUSH (2 << 8)
1565# define R200_RB3D_DC_2D_CACHE_AUTOFREE (1 << 10)
1566# define R200_RB3D_DC_3D_CACHE_AUTOFREE (2 << 10)
1567# define RADEON_RB3D_DC_FORCE_RMW (1 << 16)
1568# define RADEON_RB3D_DC_DISABLE_RI_FILL (1 << 24)
1569# define RADEON_RB3D_DC_DISABLE_RI_READ (1 << 25)
1570
1571#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325C
1572# define RADEON_RB3D_DC_FLUSH (3 << 0)
1573# define RADEON_RB3D_DC_FREE (3 << 2)
1574# define RADEON_RB3D_DC_FLUSH_ALL 0xf
1575# define RADEON_RB3D_DC_BUSY (1 << 31)
1576
1577#define RADEON_REG_BASE 0x0f18 /* PCI */
1578#define RADEON_REGPROG_INF 0x0f09 /* PCI */
1579#define RADEON_REVISION_ID 0x0f08 /* PCI */
1580
1581#define RADEON_SC_BOTTOM 0x164c
1582#define RADEON_SC_BOTTOM_RIGHT 0x16f0
1583#define RADEON_SC_BOTTOM_RIGHT_C 0x1c8c
1584#define RADEON_SC_LEFT 0x1640
1585#define RADEON_SC_RIGHT 0x1644
1586#define RADEON_SC_TOP 0x1648
1587#define RADEON_SC_TOP_LEFT 0x16ec
1588#define RADEON_SC_TOP_LEFT_C 0x1c88
1589# define RADEON_SC_SIGN_MASK_LO 0x8000
1590# define RADEON_SC_SIGN_MASK_HI 0x80000000
1591#define RADEON_M_SPLL_REF_FB_DIV 0x000a /* PLL */
1592# define RADEON_M_SPLL_REF_DIV_SHIFT 0
1593# define RADEON_M_SPLL_REF_DIV_MASK 0xff
1594# define RADEON_MPLL_FB_DIV_SHIFT 8
1595# define RADEON_MPLL_FB_DIV_MASK 0xff
1596# define RADEON_SPLL_FB_DIV_SHIFT 16
1597# define RADEON_SPLL_FB_DIV_MASK 0xff
1598#define RADEON_SPLL_CNTL 0x000c /* PLL */
1599# define RADEON_SPLL_SLEEP (1 << 0)
1600# define RADEON_SPLL_RESET (1 << 1)
1601# define RADEON_SPLL_PCP_MASK 0x7
1602# define RADEON_SPLL_PCP_SHIFT 8
1603# define RADEON_SPLL_PVG_MASK 0x7
1604# define RADEON_SPLL_PVG_SHIFT 11
1605# define RADEON_SPLL_PDC_MASK 0x3
1606# define RADEON_SPLL_PDC_SHIFT 14
1607#define RADEON_SCLK_CNTL 0x000d /* PLL */
1608# define RADEON_SCLK_SRC_SEL_MASK 0x0007
1609# define RADEON_DYN_STOP_LAT_MASK 0x00007ff8
1610# define RADEON_CP_MAX_DYN_STOP_LAT 0x0008
1611# define RADEON_SCLK_FORCEON_MASK 0xffff8000
1612# define RADEON_SCLK_FORCE_DISP2 (1<<15)
1613# define RADEON_SCLK_FORCE_CP (1<<16)
1614# define RADEON_SCLK_FORCE_HDP (1<<17)
1615# define RADEON_SCLK_FORCE_DISP1 (1<<18)
1616# define RADEON_SCLK_FORCE_TOP (1<<19)
1617# define RADEON_SCLK_FORCE_E2 (1<<20)
1618# define RADEON_SCLK_FORCE_SE (1<<21)
1619# define RADEON_SCLK_FORCE_IDCT (1<<22)
1620# define RADEON_SCLK_FORCE_VIP (1<<23)
1621# define RADEON_SCLK_FORCE_RE (1<<24)
1622# define RADEON_SCLK_FORCE_PB (1<<25)
1623# define RADEON_SCLK_FORCE_TAM (1<<26)
1624# define RADEON_SCLK_FORCE_TDM (1<<27)
1625# define RADEON_SCLK_FORCE_RB (1<<28)
1626# define RADEON_SCLK_FORCE_TV_SCLK (1<<29)
1627# define RADEON_SCLK_FORCE_SUBPIC (1<<30)
1628# define RADEON_SCLK_FORCE_OV0 (1<<31)
1629# define R300_SCLK_FORCE_VAP (1<<21)
1630# define R300_SCLK_FORCE_SR (1<<25)
1631# define R300_SCLK_FORCE_PX (1<<26)
1632# define R300_SCLK_FORCE_TX (1<<27)
1633# define R300_SCLK_FORCE_US (1<<28)
1634# define R300_SCLK_FORCE_SU (1<<30)
1635#define R300_SCLK_CNTL2 0x1e /* PLL */
1636# define R300_SCLK_TCL_MAX_DYN_STOP_LAT (1<<10)
1637# define R300_SCLK_GA_MAX_DYN_STOP_LAT (1<<11)
1638# define R300_SCLK_CBA_MAX_DYN_STOP_LAT (1<<12)
1639# define R300_SCLK_FORCE_TCL (1<<13)
1640# define R300_SCLK_FORCE_CBA (1<<14)
1641# define R300_SCLK_FORCE_GA (1<<15)
1642#define RADEON_SCLK_MORE_CNTL 0x0035 /* PLL */
1643# define RADEON_SCLK_MORE_MAX_DYN_STOP_LAT 0x0007
1644# define RADEON_SCLK_MORE_FORCEON 0x0700
1645#define RADEON_SDRAM_MODE_REG 0x0158
1646#define RADEON_SEQ8_DATA 0x03c5 /* VGA */
1647#define RADEON_SEQ8_IDX 0x03c4 /* VGA */
1648#define RADEON_SNAPSHOT_F_COUNT 0x0244
1649#define RADEON_SNAPSHOT_VH_COUNTS 0x0240
1650#define RADEON_SNAPSHOT_VIF_COUNT 0x024c
1651#define RADEON_SRC_OFFSET 0x15ac
1652#define RADEON_SRC_PITCH 0x15b0
1653#define RADEON_SRC_PITCH_OFFSET 0x1428
1654#define RADEON_SRC_SC_BOTTOM 0x165c
1655#define RADEON_SRC_SC_BOTTOM_RIGHT 0x16f4
1656#define RADEON_SRC_SC_RIGHT 0x1654
1657#define RADEON_SRC_X 0x1414
1658#define RADEON_SRC_X_Y 0x1590
1659#define RADEON_SRC_Y 0x1418
1660#define RADEON_SRC_Y_X 0x1434
1661#define RADEON_STATUS 0x0f06 /* PCI */
1662#define RADEON_SUBPIC_CNTL 0x0540 /* ? */
1663#define RADEON_SUB_CLASS 0x0f0a /* PCI */
1664#define RADEON_SURFACE_CNTL 0x0b00
1665# define RADEON_SURF_TRANSLATION_DIS (1 << 8)
1666# define RADEON_NONSURF_AP0_SWP_16BPP (1 << 20)
1667# define RADEON_NONSURF_AP0_SWP_32BPP (1 << 21)
1668# define RADEON_NONSURF_AP1_SWP_16BPP (1 << 22)
1669# define RADEON_NONSURF_AP1_SWP_32BPP (1 << 23)
1670#define RADEON_SURFACE0_INFO 0x0b0c
1671# define RADEON_SURF_TILE_COLOR_MACRO (0 << 16)
1672# define RADEON_SURF_TILE_COLOR_BOTH (1 << 16)
1673# define RADEON_SURF_TILE_DEPTH_32BPP (2 << 16)
1674# define RADEON_SURF_TILE_DEPTH_16BPP (3 << 16)
1675# define R200_SURF_TILE_NONE (0 << 16)
1676# define R200_SURF_TILE_COLOR_MACRO (1 << 16)
1677# define R200_SURF_TILE_COLOR_MICRO (2 << 16)
1678# define R200_SURF_TILE_COLOR_BOTH (3 << 16)
1679# define R200_SURF_TILE_DEPTH_32BPP (4 << 16)
1680# define R200_SURF_TILE_DEPTH_16BPP (5 << 16)
1681# define R300_SURF_TILE_NONE (0 << 16)
1682# define R300_SURF_TILE_COLOR_MACRO (1 << 16)
1683# define R300_SURF_TILE_DEPTH_32BPP (2 << 16)
1684# define RADEON_SURF_AP0_SWP_16BPP (1 << 20)
1685# define RADEON_SURF_AP0_SWP_32BPP (1 << 21)
1686# define RADEON_SURF_AP1_SWP_16BPP (1 << 22)
1687# define RADEON_SURF_AP1_SWP_32BPP (1 << 23)
1688#define RADEON_SURFACE0_LOWER_BOUND 0x0b04
1689#define RADEON_SURFACE0_UPPER_BOUND 0x0b08
1690#define RADEON_SURFACE1_INFO 0x0b1c
1691#define RADEON_SURFACE1_LOWER_BOUND 0x0b14
1692#define RADEON_SURFACE1_UPPER_BOUND 0x0b18
1693#define RADEON_SURFACE2_INFO 0x0b2c
1694#define RADEON_SURFACE2_LOWER_BOUND 0x0b24
1695#define RADEON_SURFACE2_UPPER_BOUND 0x0b28
1696#define RADEON_SURFACE3_INFO 0x0b3c
1697#define RADEON_SURFACE3_LOWER_BOUND 0x0b34
1698#define RADEON_SURFACE3_UPPER_BOUND 0x0b38
1699#define RADEON_SURFACE4_INFO 0x0b4c
1700#define RADEON_SURFACE4_LOWER_BOUND 0x0b44
1701#define RADEON_SURFACE4_UPPER_BOUND 0x0b48
1702#define RADEON_SURFACE5_INFO 0x0b5c
1703#define RADEON_SURFACE5_LOWER_BOUND 0x0b54
1704#define RADEON_SURFACE5_UPPER_BOUND 0x0b58
1705#define RADEON_SURFACE6_INFO 0x0b6c
1706#define RADEON_SURFACE6_LOWER_BOUND 0x0b64
1707#define RADEON_SURFACE6_UPPER_BOUND 0x0b68
1708#define RADEON_SURFACE7_INFO 0x0b7c
1709#define RADEON_SURFACE7_LOWER_BOUND 0x0b74
1710#define RADEON_SURFACE7_UPPER_BOUND 0x0b78
1711#define RADEON_SW_SEMAPHORE 0x013c
1712
1713#define RADEON_TEST_DEBUG_CNTL 0x0120
1714#define RADEON_TEST_DEBUG_CNTL__TEST_DEBUG_OUT_EN 0x00000001
1715
1716#define RADEON_TEST_DEBUG_MUX 0x0124
1717#define RADEON_TEST_DEBUG_OUT 0x012c
1718#define RADEON_TMDS_PLL_CNTL 0x02a8
1719#define RADEON_TMDS_TRANSMITTER_CNTL 0x02a4
1720# define RADEON_TMDS_TRANSMITTER_PLLEN 1
1721# define RADEON_TMDS_TRANSMITTER_PLLRST 2
1722#define RADEON_TRAIL_BRES_DEC 0x1614
1723#define RADEON_TRAIL_BRES_ERR 0x160c
1724#define RADEON_TRAIL_BRES_INC 0x1610
1725#define RADEON_TRAIL_X 0x1618
1726#define RADEON_TRAIL_X_SUB 0x1620
1727
1728#define RADEON_VCLK_ECP_CNTL 0x0008 /* PLL */
1729# define RADEON_VCLK_SRC_SEL_MASK 0x03
1730# define RADEON_VCLK_SRC_SEL_CPUCLK 0x00
1731# define RADEON_VCLK_SRC_SEL_PSCANCLK 0x01
1732# define RADEON_VCLK_SRC_SEL_BYTECLK 0x02
1733# define RADEON_VCLK_SRC_SEL_PPLLCLK 0x03
1734# define RADEON_PIXCLK_ALWAYS_ONb (1<<6)
1735# define RADEON_PIXCLK_DAC_ALWAYS_ONb (1<<7)
1736# define R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF (1<<23)
1737
1738#define RADEON_VENDOR_ID 0x0f00 /* PCI */
1739#define RADEON_VGA_DDA_CONFIG 0x02e8
1740#define RADEON_VGA_DDA_ON_OFF 0x02ec
1741#define RADEON_VID_BUFFER_CONTROL 0x0900
1742#define RADEON_VIDEOMUX_CNTL 0x0190
1743
1744/* VIP bus */
1745#define RADEON_VIPH_CH0_DATA 0x0c00
1746#define RADEON_VIPH_CH1_DATA 0x0c04
1747#define RADEON_VIPH_CH2_DATA 0x0c08
1748#define RADEON_VIPH_CH3_DATA 0x0c0c
1749#define RADEON_VIPH_CH0_ADDR 0x0c10
1750#define RADEON_VIPH_CH1_ADDR 0x0c14
1751#define RADEON_VIPH_CH2_ADDR 0x0c18
1752#define RADEON_VIPH_CH3_ADDR 0x0c1c
1753#define RADEON_VIPH_CH0_SBCNT 0x0c20
1754#define RADEON_VIPH_CH1_SBCNT 0x0c24
1755#define RADEON_VIPH_CH2_SBCNT 0x0c28
1756#define RADEON_VIPH_CH3_SBCNT 0x0c2c
1757#define RADEON_VIPH_CH0_ABCNT 0x0c30
1758#define RADEON_VIPH_CH1_ABCNT 0x0c34
1759#define RADEON_VIPH_CH2_ABCNT 0x0c38
1760#define RADEON_VIPH_CH3_ABCNT 0x0c3c
1761#define RADEON_VIPH_CONTROL 0x0c40
1762# define RADEON_VIP_BUSY 0
1763# define RADEON_VIP_IDLE 1
1764# define RADEON_VIP_RESET 2
1765# define RADEON_VIPH_EN (1 << 21)
1766#define RADEON_VIPH_DV_LAT 0x0c44
1767#define RADEON_VIPH_BM_CHUNK 0x0c48
1768#define RADEON_VIPH_DV_INT 0x0c4c
1769#define RADEON_VIPH_TIMEOUT_STAT 0x0c50
1770#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_STAT 0x00000010
1771#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REG_AK 0x00000010
1772#define RADEON_VIPH_TIMEOUT_STAT__VIPH_REGR_DIS 0x01000000
1773
1774#define RADEON_VIPH_REG_DATA 0x0084
1775#define RADEON_VIPH_REG_ADDR 0x0080
1776
1777
1778#define RADEON_WAIT_UNTIL 0x1720
1779# define RADEON_WAIT_CRTC_PFLIP (1 << 0)
1780# define RADEON_WAIT_RE_CRTC_VLINE (1 << 1)
1781# define RADEON_WAIT_FE_CRTC_VLINE (1 << 2)
1782# define RADEON_WAIT_CRTC_VLINE (1 << 3)
1783# define RADEON_WAIT_DMA_VID_IDLE (1 << 8)
1784# define RADEON_WAIT_DMA_GUI_IDLE (1 << 9)
1785# define RADEON_WAIT_CMDFIFO (1 << 10) /* wait for CMDFIFO_ENTRIES */
1786# define RADEON_WAIT_OV0_FLIP (1 << 11)
1787# define RADEON_WAIT_AGP_FLUSH (1 << 13)
1788# define RADEON_WAIT_2D_IDLE (1 << 14)
1789# define RADEON_WAIT_3D_IDLE (1 << 15)
1790# define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
1791# define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
1792# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
1793# define RADEON_CMDFIFO_ENTRIES_SHIFT 10
1794# define RADEON_CMDFIFO_ENTRIES_MASK 0x7f
1795# define RADEON_WAIT_VAP_IDLE (1 << 28)
1796# define RADEON_WAIT_BOTH_CRTC_PFLIP (1 << 30)
1797# define RADEON_ENG_DISPLAY_SELECT_CRTC0 (0 << 31)
1798# define RADEON_ENG_DISPLAY_SELECT_CRTC1 (1 << 31)
1799
1800#define RADEON_X_MPLL_REF_FB_DIV 0x000a /* PLL */
1801#define RADEON_XCLK_CNTL 0x000d /* PLL */
1802#define RADEON_XDLL_CNTL 0x000c /* PLL */
1803#define RADEON_XPLL_CNTL 0x000b /* PLL */
1804
1805
1806
1807 /* Registers for 3D/TCL */
1808#define RADEON_PP_BORDER_COLOR_0 0x1d40
1809#define RADEON_PP_BORDER_COLOR_1 0x1d44
1810#define RADEON_PP_BORDER_COLOR_2 0x1d48
1811#define RADEON_PP_CNTL 0x1c38
1812# define RADEON_STIPPLE_ENABLE (1 << 0)
1813# define RADEON_SCISSOR_ENABLE (1 << 1)
1814# define RADEON_PATTERN_ENABLE (1 << 2)
1815# define RADEON_SHADOW_ENABLE (1 << 3)
1816# define RADEON_TEX_ENABLE_MASK (0xf << 4)
1817# define RADEON_TEX_0_ENABLE (1 << 4)
1818# define RADEON_TEX_1_ENABLE (1 << 5)
1819# define RADEON_TEX_2_ENABLE (1 << 6)
1820# define RADEON_TEX_3_ENABLE (1 << 7)
1821# define RADEON_TEX_BLEND_ENABLE_MASK (0xf << 12)
1822# define RADEON_TEX_BLEND_0_ENABLE (1 << 12)
1823# define RADEON_TEX_BLEND_1_ENABLE (1 << 13)
1824# define RADEON_TEX_BLEND_2_ENABLE (1 << 14)
1825# define RADEON_TEX_BLEND_3_ENABLE (1 << 15)
1826# define RADEON_PLANAR_YUV_ENABLE (1 << 20)
1827# define RADEON_SPECULAR_ENABLE (1 << 21)
1828# define RADEON_FOG_ENABLE (1 << 22)
1829# define RADEON_ALPHA_TEST_ENABLE (1 << 23)
1830# define RADEON_ANTI_ALIAS_NONE (0 << 24)
1831# define RADEON_ANTI_ALIAS_LINE (1 << 24)
1832# define RADEON_ANTI_ALIAS_POLY (2 << 24)
1833# define RADEON_ANTI_ALIAS_LINE_POLY (3 << 24)
1834# define RADEON_BUMP_MAP_ENABLE (1 << 26)
1835# define RADEON_BUMPED_MAP_T0 (0 << 27)
1836# define RADEON_BUMPED_MAP_T1 (1 << 27)
1837# define RADEON_BUMPED_MAP_T2 (2 << 27)
1838# define RADEON_TEX_3D_ENABLE_0 (1 << 29)
1839# define RADEON_TEX_3D_ENABLE_1 (1 << 30)
1840# define RADEON_MC_ENABLE (1 << 31)
1841#define RADEON_PP_FOG_COLOR 0x1c18
1842# define RADEON_FOG_COLOR_MASK 0x00ffffff
1843# define RADEON_FOG_VERTEX (0 << 24)
1844# define RADEON_FOG_TABLE (1 << 24)
1845# define RADEON_FOG_USE_DEPTH (0 << 25)
1846# define RADEON_FOG_USE_DIFFUSE_ALPHA (2 << 25)
1847# define RADEON_FOG_USE_SPEC_ALPHA (3 << 25)
1848#define RADEON_PP_LUM_MATRIX 0x1d00
1849#define RADEON_PP_MISC 0x1c14
1850# define RADEON_REF_ALPHA_MASK 0x000000ff
1851# define RADEON_ALPHA_TEST_FAIL (0 << 8)
1852# define RADEON_ALPHA_TEST_LESS (1 << 8)
1853# define RADEON_ALPHA_TEST_LEQUAL (2 << 8)
1854# define RADEON_ALPHA_TEST_EQUAL (3 << 8)
1855# define RADEON_ALPHA_TEST_GEQUAL (4 << 8)
1856# define RADEON_ALPHA_TEST_GREATER (5 << 8)
1857# define RADEON_ALPHA_TEST_NEQUAL (6 << 8)
1858# define RADEON_ALPHA_TEST_PASS (7 << 8)
1859# define RADEON_ALPHA_TEST_OP_MASK (7 << 8)
1860# define RADEON_CHROMA_FUNC_FAIL (0 << 16)
1861# define RADEON_CHROMA_FUNC_PASS (1 << 16)
1862# define RADEON_CHROMA_FUNC_NEQUAL (2 << 16)
1863# define RADEON_CHROMA_FUNC_EQUAL (3 << 16)
1864# define RADEON_CHROMA_KEY_NEAREST (0 << 18)
1865# define RADEON_CHROMA_KEY_ZERO (1 << 18)
1866# define RADEON_SHADOW_ID_AUTO_INC (1 << 20)
1867# define RADEON_SHADOW_FUNC_EQUAL (0 << 21)
1868# define RADEON_SHADOW_FUNC_NEQUAL (1 << 21)
1869# define RADEON_SHADOW_PASS_1 (0 << 22)
1870# define RADEON_SHADOW_PASS_2 (1 << 22)
1871# define RADEON_RIGHT_HAND_CUBE_D3D (0 << 24)
1872# define RADEON_RIGHT_HAND_CUBE_OGL (1 << 24)
1873#define RADEON_PP_ROT_MATRIX_0 0x1d58
1874#define RADEON_PP_ROT_MATRIX_1 0x1d5c
1875#define RADEON_PP_TXFILTER_0 0x1c54
1876#define RADEON_PP_TXFILTER_1 0x1c6c
1877#define RADEON_PP_TXFILTER_2 0x1c84
1878# define RADEON_MAG_FILTER_NEAREST (0 << 0)
1879# define RADEON_MAG_FILTER_LINEAR (1 << 0)
1880# define RADEON_MAG_FILTER_MASK (1 << 0)
1881# define RADEON_MIN_FILTER_NEAREST (0 << 1)
1882# define RADEON_MIN_FILTER_LINEAR (1 << 1)
1883# define RADEON_MIN_FILTER_NEAREST_MIP_NEAREST (2 << 1)
1884# define RADEON_MIN_FILTER_NEAREST_MIP_LINEAR (3 << 1)
1885# define RADEON_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 1)
1886# define RADEON_MIN_FILTER_LINEAR_MIP_LINEAR (7 << 1)
1887# define RADEON_MIN_FILTER_ANISO_NEAREST (8 << 1)
1888# define RADEON_MIN_FILTER_ANISO_LINEAR (9 << 1)
1889# define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 << 1)
1890# define RADEON_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 << 1)
1891# define RADEON_MIN_FILTER_MASK (15 << 1)
1892# define RADEON_MAX_ANISO_1_TO_1 (0 << 5)
1893# define RADEON_MAX_ANISO_2_TO_1 (1 << 5)
1894# define RADEON_MAX_ANISO_4_TO_1 (2 << 5)
1895# define RADEON_MAX_ANISO_8_TO_1 (3 << 5)
1896# define RADEON_MAX_ANISO_16_TO_1 (4 << 5)
1897# define RADEON_MAX_ANISO_MASK (7 << 5)
1898# define RADEON_LOD_BIAS_MASK (0xff << 8)
1899# define RADEON_LOD_BIAS_SHIFT 8
1900# define RADEON_MAX_MIP_LEVEL_MASK (0x0f << 16)
1901# define RADEON_MAX_MIP_LEVEL_SHIFT 16
1902# define RADEON_YUV_TO_RGB (1 << 20)
1903# define RADEON_YUV_TEMPERATURE_COOL (0 << 21)
1904# define RADEON_YUV_TEMPERATURE_HOT (1 << 21)
1905# define RADEON_YUV_TEMPERATURE_MASK (1 << 21)
1906# define RADEON_WRAPEN_S (1 << 22)
1907# define RADEON_CLAMP_S_WRAP (0 << 23)
1908# define RADEON_CLAMP_S_MIRROR (1 << 23)
1909# define RADEON_CLAMP_S_CLAMP_LAST (2 << 23)
1910# define RADEON_CLAMP_S_MIRROR_CLAMP_LAST (3 << 23)
1911# define RADEON_CLAMP_S_CLAMP_BORDER (4 << 23)
1912# define RADEON_CLAMP_S_MIRROR_CLAMP_BORDER (5 << 23)
1913# define RADEON_CLAMP_S_CLAMP_GL (6 << 23)
1914# define RADEON_CLAMP_S_MIRROR_CLAMP_GL (7 << 23)
1915# define RADEON_CLAMP_S_MASK (7 << 23)
1916# define RADEON_WRAPEN_T (1 << 26)
1917# define RADEON_CLAMP_T_WRAP (0 << 27)
1918# define RADEON_CLAMP_T_MIRROR (1 << 27)
1919# define RADEON_CLAMP_T_CLAMP_LAST (2 << 27)
1920# define RADEON_CLAMP_T_MIRROR_CLAMP_LAST (3 << 27)
1921# define RADEON_CLAMP_T_CLAMP_BORDER (4 << 27)
1922# define RADEON_CLAMP_T_MIRROR_CLAMP_BORDER (5 << 27)
1923# define RADEON_CLAMP_T_CLAMP_GL (6 << 27)
1924# define RADEON_CLAMP_T_MIRROR_CLAMP_GL (7 << 27)
1925# define RADEON_CLAMP_T_MASK (7 << 27)
1926# define RADEON_BORDER_MODE_OGL (0 << 31)
1927# define RADEON_BORDER_MODE_D3D (1 << 31)
1928#define RADEON_PP_TXFORMAT_0 0x1c58
1929#define RADEON_PP_TXFORMAT_1 0x1c70
1930#define RADEON_PP_TXFORMAT_2 0x1c88
1931# define RADEON_TXFORMAT_I8 (0 << 0)
1932# define RADEON_TXFORMAT_AI88 (1 << 0)
1933# define RADEON_TXFORMAT_RGB332 (2 << 0)
1934# define RADEON_TXFORMAT_ARGB1555 (3 << 0)
1935# define RADEON_TXFORMAT_RGB565 (4 << 0)
1936# define RADEON_TXFORMAT_ARGB4444 (5 << 0)
1937# define RADEON_TXFORMAT_ARGB8888 (6 << 0)
1938# define RADEON_TXFORMAT_RGBA8888 (7 << 0)
1939# define RADEON_TXFORMAT_Y8 (8 << 0)
1940# define RADEON_TXFORMAT_VYUY422 (10 << 0)
1941# define RADEON_TXFORMAT_YVYU422 (11 << 0)
1942# define RADEON_TXFORMAT_DXT1 (12 << 0)
1943# define RADEON_TXFORMAT_DXT23 (14 << 0)
1944# define RADEON_TXFORMAT_DXT45 (15 << 0)
1945# define RADEON_TXFORMAT_FORMAT_MASK (31 << 0)
1946# define RADEON_TXFORMAT_FORMAT_SHIFT 0
1947# define RADEON_TXFORMAT_APPLE_YUV_MODE (1 << 5)
1948# define RADEON_TXFORMAT_ALPHA_IN_MAP (1 << 6)
1949# define RADEON_TXFORMAT_NON_POWER2 (1 << 7)
1950# define RADEON_TXFORMAT_WIDTH_MASK (15 << 8)
1951# define RADEON_TXFORMAT_WIDTH_SHIFT 8
1952# define RADEON_TXFORMAT_HEIGHT_MASK (15 << 12)
1953# define RADEON_TXFORMAT_HEIGHT_SHIFT 12
1954# define RADEON_TXFORMAT_F5_WIDTH_MASK (15 << 16)
1955# define RADEON_TXFORMAT_F5_WIDTH_SHIFT 16
1956# define RADEON_TXFORMAT_F5_HEIGHT_MASK (15 << 20)
1957# define RADEON_TXFORMAT_F5_HEIGHT_SHIFT 20
1958# define RADEON_TXFORMAT_ST_ROUTE_STQ0 (0 << 24)
1959# define RADEON_TXFORMAT_ST_ROUTE_MASK (3 << 24)
1960# define RADEON_TXFORMAT_ST_ROUTE_STQ1 (1 << 24)
1961# define RADEON_TXFORMAT_ST_ROUTE_STQ2 (2 << 24)
1962# define RADEON_TXFORMAT_ENDIAN_NO_SWAP (0 << 26)
1963# define RADEON_TXFORMAT_ENDIAN_16BPP_SWAP (1 << 26)
1964# define RADEON_TXFORMAT_ENDIAN_32BPP_SWAP (2 << 26)
1965# define RADEON_TXFORMAT_ENDIAN_HALFDW_SWAP (3 << 26)
1966# define RADEON_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28)
1967# define RADEON_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29)
1968# define RADEON_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30)
1969# define RADEON_TXFORMAT_PERSPECTIVE_ENABLE (1 << 31)
1970#define RADEON_PP_CUBIC_FACES_0 0x1d24
1971#define RADEON_PP_CUBIC_FACES_1 0x1d28
1972#define RADEON_PP_CUBIC_FACES_2 0x1d2c
1973# define RADEON_FACE_WIDTH_1_SHIFT 0
1974# define RADEON_FACE_HEIGHT_1_SHIFT 4
1975# define RADEON_FACE_WIDTH_1_MASK (0xf << 0)
1976# define RADEON_FACE_HEIGHT_1_MASK (0xf << 4)
1977# define RADEON_FACE_WIDTH_2_SHIFT 8
1978# define RADEON_FACE_HEIGHT_2_SHIFT 12
1979# define RADEON_FACE_WIDTH_2_MASK (0xf << 8)
1980# define RADEON_FACE_HEIGHT_2_MASK (0xf << 12)
1981# define RADEON_FACE_WIDTH_3_SHIFT 16
1982# define RADEON_FACE_HEIGHT_3_SHIFT 20
1983# define RADEON_FACE_WIDTH_3_MASK (0xf << 16)
1984# define RADEON_FACE_HEIGHT_3_MASK (0xf << 20)
1985# define RADEON_FACE_WIDTH_4_SHIFT 24
1986# define RADEON_FACE_HEIGHT_4_SHIFT 28
1987# define RADEON_FACE_WIDTH_4_MASK (0xf << 24)
1988# define RADEON_FACE_HEIGHT_4_MASK (0xf << 28)
1989
1990#define RADEON_PP_TXOFFSET_0 0x1c5c
1991#define RADEON_PP_TXOFFSET_1 0x1c74
1992#define RADEON_PP_TXOFFSET_2 0x1c8c
1993# define RADEON_TXO_ENDIAN_NO_SWAP (0 << 0)
1994# define RADEON_TXO_ENDIAN_BYTE_SWAP (1 << 0)
1995# define RADEON_TXO_ENDIAN_WORD_SWAP (2 << 0)
1996# define RADEON_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
1997# define RADEON_TXO_MACRO_LINEAR (0 << 2)
1998# define RADEON_TXO_MACRO_TILE (1 << 2)
1999# define RADEON_TXO_MICRO_LINEAR (0 << 3)
2000# define RADEON_TXO_MICRO_TILE_X2 (1 << 3)
2001# define RADEON_TXO_MICRO_TILE_OPT (2 << 3)
2002# define RADEON_TXO_OFFSET_MASK 0xffffffe0
2003# define RADEON_TXO_OFFSET_SHIFT 5
2004
2005#define RADEON_PP_CUBIC_OFFSET_T0_0 0x1dd0 /* bits [31:5] */
2006#define RADEON_PP_CUBIC_OFFSET_T0_1 0x1dd4
2007#define RADEON_PP_CUBIC_OFFSET_T0_2 0x1dd8
2008#define RADEON_PP_CUBIC_OFFSET_T0_3 0x1ddc
2009#define RADEON_PP_CUBIC_OFFSET_T0_4 0x1de0
2010#define RADEON_PP_CUBIC_OFFSET_T1_0 0x1e00
2011#define RADEON_PP_CUBIC_OFFSET_T1_1 0x1e04
2012#define RADEON_PP_CUBIC_OFFSET_T1_2 0x1e08
2013#define RADEON_PP_CUBIC_OFFSET_T1_3 0x1e0c
2014#define RADEON_PP_CUBIC_OFFSET_T1_4 0x1e10
2015#define RADEON_PP_CUBIC_OFFSET_T2_0 0x1e14
2016#define RADEON_PP_CUBIC_OFFSET_T2_1 0x1e18
2017#define RADEON_PP_CUBIC_OFFSET_T2_2 0x1e1c
2018#define RADEON_PP_CUBIC_OFFSET_T2_3 0x1e20
2019#define RADEON_PP_CUBIC_OFFSET_T2_4 0x1e24
2020
2021#define RADEON_PP_TEX_SIZE_0 0x1d04 /* NPOT */
2022#define RADEON_PP_TEX_SIZE_1 0x1d0c
2023#define RADEON_PP_TEX_SIZE_2 0x1d14
2024# define RADEON_TEX_USIZE_MASK (0x7ff << 0)
2025# define RADEON_TEX_USIZE_SHIFT 0
2026# define RADEON_TEX_VSIZE_MASK (0x7ff << 16)
2027# define RADEON_TEX_VSIZE_SHIFT 16
2028# define RADEON_SIGNED_RGB_MASK (1 << 30)
2029# define RADEON_SIGNED_RGB_SHIFT 30
2030# define RADEON_SIGNED_ALPHA_MASK (1 << 31)
2031# define RADEON_SIGNED_ALPHA_SHIFT 31
2032#define RADEON_PP_TEX_PITCH_0 0x1d08 /* NPOT */
2033#define RADEON_PP_TEX_PITCH_1 0x1d10 /* NPOT */
2034#define RADEON_PP_TEX_PITCH_2 0x1d18 /* NPOT */
2035/* note: bits 13-5: 32 byte aligned stride of texture map */
2036
2037#define RADEON_PP_TXCBLEND_0 0x1c60
2038#define RADEON_PP_TXCBLEND_1 0x1c78
2039#define RADEON_PP_TXCBLEND_2 0x1c90
2040# define RADEON_COLOR_ARG_A_SHIFT 0
2041# define RADEON_COLOR_ARG_A_MASK (0x1f << 0)
2042# define RADEON_COLOR_ARG_A_ZERO (0 << 0)
2043# define RADEON_COLOR_ARG_A_CURRENT_COLOR (2 << 0)
2044# define RADEON_COLOR_ARG_A_CURRENT_ALPHA (3 << 0)
2045# define RADEON_COLOR_ARG_A_DIFFUSE_COLOR (4 << 0)
2046# define RADEON_COLOR_ARG_A_DIFFUSE_ALPHA (5 << 0)
2047# define RADEON_COLOR_ARG_A_SPECULAR_COLOR (6 << 0)
2048# define RADEON_COLOR_ARG_A_SPECULAR_ALPHA (7 << 0)
2049# define RADEON_COLOR_ARG_A_TFACTOR_COLOR (8 << 0)
2050# define RADEON_COLOR_ARG_A_TFACTOR_ALPHA (9 << 0)
2051# define RADEON_COLOR_ARG_A_T0_COLOR (10 << 0)
2052# define RADEON_COLOR_ARG_A_T0_ALPHA (11 << 0)
2053# define RADEON_COLOR_ARG_A_T1_COLOR (12 << 0)
2054# define RADEON_COLOR_ARG_A_T1_ALPHA (13 << 0)
2055# define RADEON_COLOR_ARG_A_T2_COLOR (14 << 0)
2056# define RADEON_COLOR_ARG_A_T2_ALPHA (15 << 0)
2057# define RADEON_COLOR_ARG_A_T3_COLOR (16 << 0)
2058# define RADEON_COLOR_ARG_A_T3_ALPHA (17 << 0)
2059# define RADEON_COLOR_ARG_B_SHIFT 5
2060# define RADEON_COLOR_ARG_B_MASK (0x1f << 5)
2061# define RADEON_COLOR_ARG_B_ZERO (0 << 5)
2062# define RADEON_COLOR_ARG_B_CURRENT_COLOR (2 << 5)
2063# define RADEON_COLOR_ARG_B_CURRENT_ALPHA (3 << 5)
2064# define RADEON_COLOR_ARG_B_DIFFUSE_COLOR (4 << 5)
2065# define RADEON_COLOR_ARG_B_DIFFUSE_ALPHA (5 << 5)
2066# define RADEON_COLOR_ARG_B_SPECULAR_COLOR (6 << 5)
2067# define RADEON_COLOR_ARG_B_SPECULAR_ALPHA (7 << 5)
2068# define RADEON_COLOR_ARG_B_TFACTOR_COLOR (8 << 5)
2069# define RADEON_COLOR_ARG_B_TFACTOR_ALPHA (9 << 5)
2070# define RADEON_COLOR_ARG_B_T0_COLOR (10 << 5)
2071# define RADEON_COLOR_ARG_B_T0_ALPHA (11 << 5)
2072# define RADEON_COLOR_ARG_B_T1_COLOR (12 << 5)
2073# define RADEON_COLOR_ARG_B_T1_ALPHA (13 << 5)
2074# define RADEON_COLOR_ARG_B_T2_COLOR (14 << 5)
2075# define RADEON_COLOR_ARG_B_T2_ALPHA (15 << 5)
2076# define RADEON_COLOR_ARG_B_T3_COLOR (16 << 5)
2077# define RADEON_COLOR_ARG_B_T3_ALPHA (17 << 5)
2078# define RADEON_COLOR_ARG_C_SHIFT 10
2079# define RADEON_COLOR_ARG_C_MASK (0x1f << 10)
2080# define RADEON_COLOR_ARG_C_ZERO (0 << 10)
2081# define RADEON_COLOR_ARG_C_CURRENT_COLOR (2 << 10)
2082# define RADEON_COLOR_ARG_C_CURRENT_ALPHA (3 << 10)
2083# define RADEON_COLOR_ARG_C_DIFFUSE_COLOR (4 << 10)
2084# define RADEON_COLOR_ARG_C_DIFFUSE_ALPHA (5 << 10)
2085# define RADEON_COLOR_ARG_C_SPECULAR_COLOR (6 << 10)
2086# define RADEON_COLOR_ARG_C_SPECULAR_ALPHA (7 << 10)
2087# define RADEON_COLOR_ARG_C_TFACTOR_COLOR (8 << 10)
2088# define RADEON_COLOR_ARG_C_TFACTOR_ALPHA (9 << 10)
2089# define RADEON_COLOR_ARG_C_T0_COLOR (10 << 10)
2090# define RADEON_COLOR_ARG_C_T0_ALPHA (11 << 10)
2091# define RADEON_COLOR_ARG_C_T1_COLOR (12 << 10)
2092# define RADEON_COLOR_ARG_C_T1_ALPHA (13 << 10)
2093# define RADEON_COLOR_ARG_C_T2_COLOR (14 << 10)
2094# define RADEON_COLOR_ARG_C_T2_ALPHA (15 << 10)
2095# define RADEON_COLOR_ARG_C_T3_COLOR (16 << 10)
2096# define RADEON_COLOR_ARG_C_T3_ALPHA (17 << 10)
2097# define RADEON_COMP_ARG_A (1 << 15)
2098# define RADEON_COMP_ARG_A_SHIFT 15
2099# define RADEON_COMP_ARG_B (1 << 16)
2100# define RADEON_COMP_ARG_B_SHIFT 16
2101# define RADEON_COMP_ARG_C (1 << 17)
2102# define RADEON_COMP_ARG_C_SHIFT 17
2103# define RADEON_BLEND_CTL_MASK (7 << 18)
2104# define RADEON_BLEND_CTL_ADD (0 << 18)
2105# define RADEON_BLEND_CTL_SUBTRACT (1 << 18)
2106# define RADEON_BLEND_CTL_ADDSIGNED (2 << 18)
2107# define RADEON_BLEND_CTL_BLEND (3 << 18)
2108# define RADEON_BLEND_CTL_DOT3 (4 << 18)
2109# define RADEON_SCALE_SHIFT 21
2110# define RADEON_SCALE_MASK (3 << 21)
2111# define RADEON_SCALE_1X (0 << 21)
2112# define RADEON_SCALE_2X (1 << 21)
2113# define RADEON_SCALE_4X (2 << 21)
2114# define RADEON_CLAMP_TX (1 << 23)
2115# define RADEON_T0_EQ_TCUR (1 << 24)
2116# define RADEON_T1_EQ_TCUR (1 << 25)
2117# define RADEON_T2_EQ_TCUR (1 << 26)
2118# define RADEON_T3_EQ_TCUR (1 << 27)
2119# define RADEON_COLOR_ARG_MASK 0x1f
2120# define RADEON_COMP_ARG_SHIFT 15
2121#define RADEON_PP_TXABLEND_0 0x1c64
2122#define RADEON_PP_TXABLEND_1 0x1c7c
2123#define RADEON_PP_TXABLEND_2 0x1c94
2124# define RADEON_ALPHA_ARG_A_SHIFT 0
2125# define RADEON_ALPHA_ARG_A_MASK (0xf << 0)
2126# define RADEON_ALPHA_ARG_A_ZERO (0 << 0)
2127# define RADEON_ALPHA_ARG_A_CURRENT_ALPHA (1 << 0)
2128# define RADEON_ALPHA_ARG_A_DIFFUSE_ALPHA (2 << 0)
2129# define RADEON_ALPHA_ARG_A_SPECULAR_ALPHA (3 << 0)
2130# define RADEON_ALPHA_ARG_A_TFACTOR_ALPHA (4 << 0)
2131# define RADEON_ALPHA_ARG_A_T0_ALPHA (5 << 0)
2132# define RADEON_ALPHA_ARG_A_T1_ALPHA (6 << 0)
2133# define RADEON_ALPHA_ARG_A_T2_ALPHA (7 << 0)
2134# define RADEON_ALPHA_ARG_A_T3_ALPHA (8 << 0)
2135# define RADEON_ALPHA_ARG_B_SHIFT 4
2136# define RADEON_ALPHA_ARG_B_MASK (0xf << 4)
2137# define RADEON_ALPHA_ARG_B_ZERO (0 << 4)
2138# define RADEON_ALPHA_ARG_B_CURRENT_ALPHA (1 << 4)
2139# define RADEON_ALPHA_ARG_B_DIFFUSE_ALPHA (2 << 4)
2140# define RADEON_ALPHA_ARG_B_SPECULAR_ALPHA (3 << 4)
2141# define RADEON_ALPHA_ARG_B_TFACTOR_ALPHA (4 << 4)
2142# define RADEON_ALPHA_ARG_B_T0_ALPHA (5 << 4)
2143# define RADEON_ALPHA_ARG_B_T1_ALPHA (6 << 4)
2144# define RADEON_ALPHA_ARG_B_T2_ALPHA (7 << 4)
2145# define RADEON_ALPHA_ARG_B_T3_ALPHA (8 << 4)
2146# define RADEON_ALPHA_ARG_C_SHIFT 8
2147# define RADEON_ALPHA_ARG_C_MASK (0xf << 8)
2148# define RADEON_ALPHA_ARG_C_ZERO (0 << 8)
2149# define RADEON_ALPHA_ARG_C_CURRENT_ALPHA (1 << 8)
2150# define RADEON_ALPHA_ARG_C_DIFFUSE_ALPHA (2 << 8)
2151# define RADEON_ALPHA_ARG_C_SPECULAR_ALPHA (3 << 8)
2152# define RADEON_ALPHA_ARG_C_TFACTOR_ALPHA (4 << 8)
2153# define RADEON_ALPHA_ARG_C_T0_ALPHA (5 << 8)
2154# define RADEON_ALPHA_ARG_C_T1_ALPHA (6 << 8)
2155# define RADEON_ALPHA_ARG_C_T2_ALPHA (7 << 8)
2156# define RADEON_ALPHA_ARG_C_T3_ALPHA (8 << 8)
2157# define RADEON_DOT_ALPHA_DONT_REPLICATE (1 << 9)
2158# define RADEON_ALPHA_ARG_MASK 0xf
2159
2160#define RADEON_PP_TFACTOR_0 0x1c68
2161#define RADEON_PP_TFACTOR_1 0x1c80
2162#define RADEON_PP_TFACTOR_2 0x1c98
2163
2164#define RADEON_RB3D_BLENDCNTL 0x1c20
2165# define RADEON_COMB_FCN_MASK (3 << 12)
2166# define RADEON_COMB_FCN_ADD_CLAMP (0 << 12)
2167# define RADEON_COMB_FCN_ADD_NOCLAMP (1 << 12)
2168# define RADEON_COMB_FCN_SUB_CLAMP (2 << 12)
2169# define RADEON_COMB_FCN_SUB_NOCLAMP (3 << 12)
2170# define RADEON_SRC_BLEND_GL_ZERO (32 << 16)
2171# define RADEON_SRC_BLEND_GL_ONE (33 << 16)
2172# define RADEON_SRC_BLEND_GL_SRC_COLOR (34 << 16)
2173# define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
2174# define RADEON_SRC_BLEND_GL_DST_COLOR (36 << 16)
2175# define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
2176# define RADEON_SRC_BLEND_GL_SRC_ALPHA (38 << 16)
2177# define RADEON_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
2178# define RADEON_SRC_BLEND_GL_DST_ALPHA (40 << 16)
2179# define RADEON_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
2180# define RADEON_SRC_BLEND_GL_SRC_ALPHA_SATURATE (42 << 16)
2181# define RADEON_SRC_BLEND_MASK (63 << 16)
2182# define RADEON_DST_BLEND_GL_ZERO (32 << 24)
2183# define RADEON_DST_BLEND_GL_ONE (33 << 24)
2184# define RADEON_DST_BLEND_GL_SRC_COLOR (34 << 24)
2185# define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
2186# define RADEON_DST_BLEND_GL_DST_COLOR (36 << 24)
2187# define RADEON_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
2188# define RADEON_DST_BLEND_GL_SRC_ALPHA (38 << 24)
2189# define RADEON_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
2190# define RADEON_DST_BLEND_GL_DST_ALPHA (40 << 24)
2191# define RADEON_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
2192# define RADEON_DST_BLEND_MASK (63 << 24)
2193#define RADEON_RB3D_CNTL 0x1c3c
2194# define RADEON_ALPHA_BLEND_ENABLE (1 << 0)
2195# define RADEON_PLANE_MASK_ENABLE (1 << 1)
2196# define RADEON_DITHER_ENABLE (1 << 2)
2197# define RADEON_ROUND_ENABLE (1 << 3)
2198# define RADEON_SCALE_DITHER_ENABLE (1 << 4)
2199# define RADEON_DITHER_INIT (1 << 5)
2200# define RADEON_ROP_ENABLE (1 << 6)
2201# define RADEON_STENCIL_ENABLE (1 << 7)
2202# define RADEON_Z_ENABLE (1 << 8)
2203# define RADEON_DEPTH_XZ_OFFEST_ENABLE (1 << 9)
2204# define RADEON_RB3D_COLOR_FORMAT_SHIFT 10
2205
2206# define RADEON_COLOR_FORMAT_ARGB1555 3
2207# define RADEON_COLOR_FORMAT_RGB565 4
2208# define RADEON_COLOR_FORMAT_ARGB8888 6
2209# define RADEON_COLOR_FORMAT_RGB332 7
2210# define RADEON_COLOR_FORMAT_Y8 8
2211# define RADEON_COLOR_FORMAT_RGB8 9
2212# define RADEON_COLOR_FORMAT_YUV422_VYUY 11
2213# define RADEON_COLOR_FORMAT_YUV422_YVYU 12
2214# define RADEON_COLOR_FORMAT_aYUV444 14
2215# define RADEON_COLOR_FORMAT_ARGB4444 15
2216
2217# define RADEON_CLRCMP_FLIP_ENABLE (1 << 14)
2218#define RADEON_RB3D_COLOROFFSET 0x1c40
2219# define RADEON_COLOROFFSET_MASK 0xfffffff0
2220#define RADEON_RB3D_COLORPITCH 0x1c48
2221# define RADEON_COLORPITCH_MASK 0x000001ff8
2222# define RADEON_COLOR_TILE_ENABLE (1 << 16)
2223# define RADEON_COLOR_MICROTILE_ENABLE (1 << 17)
2224# define RADEON_COLOR_ENDIAN_NO_SWAP (0 << 18)
2225# define RADEON_COLOR_ENDIAN_WORD_SWAP (1 << 18)
2226# define RADEON_COLOR_ENDIAN_DWORD_SWAP (2 << 18)
2227#define RADEON_RB3D_DEPTHOFFSET 0x1c24
2228#define RADEON_RB3D_DEPTHPITCH 0x1c28
2229# define RADEON_DEPTHPITCH_MASK 0x00001ff8
2230# define RADEON_DEPTH_ENDIAN_NO_SWAP (0 << 18)
2231# define RADEON_DEPTH_ENDIAN_WORD_SWAP (1 << 18)
2232# define RADEON_DEPTH_ENDIAN_DWORD_SWAP (2 << 18)
2233#define RADEON_RB3D_PLANEMASK 0x1d84
2234#define RADEON_RB3D_ROPCNTL 0x1d80
2235# define RADEON_ROP_MASK (15 << 8)
2236# define RADEON_ROP_CLEAR (0 << 8)
2237# define RADEON_ROP_NOR (1 << 8)
2238# define RADEON_ROP_AND_INVERTED (2 << 8)
2239# define RADEON_ROP_COPY_INVERTED (3 << 8)
2240# define RADEON_ROP_AND_REVERSE (4 << 8)
2241# define RADEON_ROP_INVERT (5 << 8)
2242# define RADEON_ROP_XOR (6 << 8)
2243# define RADEON_ROP_NAND (7 << 8)
2244# define RADEON_ROP_AND (8 << 8)
2245# define RADEON_ROP_EQUIV (9 << 8)
2246# define RADEON_ROP_NOOP (10 << 8)
2247# define RADEON_ROP_OR_INVERTED (11 << 8)
2248# define RADEON_ROP_COPY (12 << 8)
2249# define RADEON_ROP_OR_REVERSE (13 << 8)
2250# define RADEON_ROP_OR (14 << 8)
2251# define RADEON_ROP_SET (15 << 8)
2252#define RADEON_RB3D_STENCILREFMASK 0x1d7c
2253# define RADEON_STENCIL_REF_SHIFT 0
2254# define RADEON_STENCIL_REF_MASK (0xff << 0)
2255# define RADEON_STENCIL_MASK_SHIFT 16
2256# define RADEON_STENCIL_VALUE_MASK (0xff << 16)
2257# define RADEON_STENCIL_WRITEMASK_SHIFT 24
2258# define RADEON_STENCIL_WRITE_MASK (0xff << 24)
2259#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
2260# define RADEON_DEPTH_FORMAT_MASK (0xf << 0)
2261# define RADEON_DEPTH_FORMAT_16BIT_INT_Z (0 << 0)
2262# define RADEON_DEPTH_FORMAT_24BIT_INT_Z (2 << 0)
2263# define RADEON_DEPTH_FORMAT_24BIT_FLOAT_Z (3 << 0)
2264# define RADEON_DEPTH_FORMAT_32BIT_INT_Z (4 << 0)
2265# define RADEON_DEPTH_FORMAT_32BIT_FLOAT_Z (5 << 0)
2266# define RADEON_DEPTH_FORMAT_16BIT_FLOAT_W (7 << 0)
2267# define RADEON_DEPTH_FORMAT_24BIT_FLOAT_W (9 << 0)
2268# define RADEON_DEPTH_FORMAT_32BIT_FLOAT_W (11 << 0)
2269# define RADEON_Z_TEST_NEVER (0 << 4)
2270# define RADEON_Z_TEST_LESS (1 << 4)
2271# define RADEON_Z_TEST_LEQUAL (2 << 4)
2272# define RADEON_Z_TEST_EQUAL (3 << 4)
2273# define RADEON_Z_TEST_GEQUAL (4 << 4)
2274# define RADEON_Z_TEST_GREATER (5 << 4)
2275# define RADEON_Z_TEST_NEQUAL (6 << 4)
2276# define RADEON_Z_TEST_ALWAYS (7 << 4)
2277# define RADEON_Z_TEST_MASK (7 << 4)
2278# define RADEON_STENCIL_TEST_NEVER (0 << 12)
2279# define RADEON_STENCIL_TEST_LESS (1 << 12)
2280# define RADEON_STENCIL_TEST_LEQUAL (2 << 12)
2281# define RADEON_STENCIL_TEST_EQUAL (3 << 12)
2282# define RADEON_STENCIL_TEST_GEQUAL (4 << 12)
2283# define RADEON_STENCIL_TEST_GREATER (5 << 12)
2284# define RADEON_STENCIL_TEST_NEQUAL (6 << 12)
2285# define RADEON_STENCIL_TEST_ALWAYS (7 << 12)
2286# define RADEON_STENCIL_TEST_MASK (0x7 << 12)
2287# define RADEON_STENCIL_FAIL_KEEP (0 << 16)
2288# define RADEON_STENCIL_FAIL_ZERO (1 << 16)
2289# define RADEON_STENCIL_FAIL_REPLACE (2 << 16)
2290# define RADEON_STENCIL_FAIL_INC (3 << 16)
2291# define RADEON_STENCIL_FAIL_DEC (4 << 16)
2292# define RADEON_STENCIL_FAIL_INVERT (5 << 16)
2293# define RADEON_STENCIL_FAIL_MASK (0x7 << 16)
2294# define RADEON_STENCIL_ZPASS_KEEP (0 << 20)
2295# define RADEON_STENCIL_ZPASS_ZERO (1 << 20)
2296# define RADEON_STENCIL_ZPASS_REPLACE (2 << 20)
2297# define RADEON_STENCIL_ZPASS_INC (3 << 20)
2298# define RADEON_STENCIL_ZPASS_DEC (4 << 20)
2299# define RADEON_STENCIL_ZPASS_INVERT (5 << 20)
2300# define RADEON_STENCIL_ZPASS_MASK (0x7 << 20)
2301# define RADEON_STENCIL_ZFAIL_KEEP (0 << 24)
2302# define RADEON_STENCIL_ZFAIL_ZERO (1 << 24)
2303# define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24)
2304# define RADEON_STENCIL_ZFAIL_INC (3 << 24)
2305# define RADEON_STENCIL_ZFAIL_DEC (4 << 24)
2306# define RADEON_STENCIL_ZFAIL_INVERT (5 << 24)
2307# define RADEON_STENCIL_ZFAIL_MASK (0x7 << 24)
2308# define RADEON_Z_COMPRESSION_ENABLE (1 << 28)
2309# define RADEON_FORCE_Z_DIRTY (1 << 29)
2310# define RADEON_Z_WRITE_ENABLE (1 << 30)
2311#define RADEON_RE_LINE_PATTERN 0x1cd0
2312# define RADEON_LINE_PATTERN_MASK 0x0000ffff
2313# define RADEON_LINE_REPEAT_COUNT_SHIFT 16
2314# define RADEON_LINE_PATTERN_START_SHIFT 24
2315# define RADEON_LINE_PATTERN_LITTLE_BIT_ORDER (0 << 28)
2316# define RADEON_LINE_PATTERN_BIG_BIT_ORDER (1 << 28)
2317# define RADEON_LINE_PATTERN_AUTO_RESET (1 << 29)
2318#define RADEON_RE_LINE_STATE 0x1cd4
2319# define RADEON_LINE_CURRENT_PTR_SHIFT 0
2320# define RADEON_LINE_CURRENT_COUNT_SHIFT 8
2321#define RADEON_RE_MISC 0x26c4
2322# define RADEON_STIPPLE_COORD_MASK 0x1f
2323# define RADEON_STIPPLE_X_OFFSET_SHIFT 0
2324# define RADEON_STIPPLE_X_OFFSET_MASK (0x1f << 0)
2325# define RADEON_STIPPLE_Y_OFFSET_SHIFT 8
2326# define RADEON_STIPPLE_Y_OFFSET_MASK (0x1f << 8)
2327# define RADEON_STIPPLE_LITTLE_BIT_ORDER (0 << 16)
2328# define RADEON_STIPPLE_BIG_BIT_ORDER (1 << 16)
2329#define RADEON_RE_SOLID_COLOR 0x1c1c
2330#define RADEON_RE_TOP_LEFT 0x26c0
2331# define RADEON_RE_LEFT_SHIFT 0
2332# define RADEON_RE_TOP_SHIFT 16
2333#define RADEON_RE_WIDTH_HEIGHT 0x1c44
2334# define RADEON_RE_WIDTH_SHIFT 0
2335# define RADEON_RE_HEIGHT_SHIFT 16
2336
2337#define RADEON_SE_CNTL 0x1c4c
2338# define RADEON_FFACE_CULL_CW (0 << 0)
2339# define RADEON_FFACE_CULL_CCW (1 << 0)
2340# define RADEON_FFACE_CULL_DIR_MASK (1 << 0)
2341# define RADEON_BFACE_CULL (0 << 1)
2342# define RADEON_BFACE_SOLID (3 << 1)
2343# define RADEON_FFACE_CULL (0 << 3)
2344# define RADEON_FFACE_SOLID (3 << 3)
2345# define RADEON_FFACE_CULL_MASK (3 << 3)
2346# define RADEON_BADVTX_CULL_DISABLE (1 << 5)
2347# define RADEON_FLAT_SHADE_VTX_0 (0 << 6)
2348# define RADEON_FLAT_SHADE_VTX_1 (1 << 6)
2349# define RADEON_FLAT_SHADE_VTX_2 (2 << 6)
2350# define RADEON_FLAT_SHADE_VTX_LAST (3 << 6)
2351# define RADEON_DIFFUSE_SHADE_SOLID (0 << 8)
2352# define RADEON_DIFFUSE_SHADE_FLAT (1 << 8)
2353# define RADEON_DIFFUSE_SHADE_GOURAUD (2 << 8)
2354# define RADEON_DIFFUSE_SHADE_MASK (3 << 8)
2355# define RADEON_ALPHA_SHADE_SOLID (0 << 10)
2356# define RADEON_ALPHA_SHADE_FLAT (1 << 10)
2357# define RADEON_ALPHA_SHADE_GOURAUD (2 << 10)
2358# define RADEON_ALPHA_SHADE_MASK (3 << 10)
2359# define RADEON_SPECULAR_SHADE_SOLID (0 << 12)
2360# define RADEON_SPECULAR_SHADE_FLAT (1 << 12)
2361# define RADEON_SPECULAR_SHADE_GOURAUD (2 << 12)
2362# define RADEON_SPECULAR_SHADE_MASK (3 << 12)
2363# define RADEON_FOG_SHADE_SOLID (0 << 14)
2364# define RADEON_FOG_SHADE_FLAT (1 << 14)
2365# define RADEON_FOG_SHADE_GOURAUD (2 << 14)
2366# define RADEON_FOG_SHADE_MASK (3 << 14)
2367# define RADEON_ZBIAS_ENABLE_POINT (1 << 16)
2368# define RADEON_ZBIAS_ENABLE_LINE (1 << 17)
2369# define RADEON_ZBIAS_ENABLE_TRI (1 << 18)
2370# define RADEON_WIDELINE_ENABLE (1 << 20)
2371# define RADEON_VPORT_XY_XFORM_ENABLE (1 << 24)
2372# define RADEON_VPORT_Z_XFORM_ENABLE (1 << 25)
2373# define RADEON_VTX_PIX_CENTER_D3D (0 << 27)
2374# define RADEON_VTX_PIX_CENTER_OGL (1 << 27)
2375# define RADEON_ROUND_MODE_TRUNC (0 << 28)
2376# define RADEON_ROUND_MODE_ROUND (1 << 28)
2377# define RADEON_ROUND_MODE_ROUND_EVEN (2 << 28)
2378# define RADEON_ROUND_MODE_ROUND_ODD (3 << 28)
2379# define RADEON_ROUND_PREC_16TH_PIX (0 << 30)
2380# define RADEON_ROUND_PREC_8TH_PIX (1 << 30)
2381# define RADEON_ROUND_PREC_4TH_PIX (2 << 30)
2382# define RADEON_ROUND_PREC_HALF_PIX (3 << 30)
2383#define R200_RE_CNTL 0x1c50
2384# define R200_STIPPLE_ENABLE 0x1
2385# define R200_SCISSOR_ENABLE 0x2
2386# define R200_PATTERN_ENABLE 0x4
2387# define R200_PERSPECTIVE_ENABLE 0x8
2388# define R200_POINT_SMOOTH 0x20
2389# define R200_VTX_STQ0_D3D 0x00010000
2390# define R200_VTX_STQ1_D3D 0x00040000
2391# define R200_VTX_STQ2_D3D 0x00100000
2392# define R200_VTX_STQ3_D3D 0x00400000
2393# define R200_VTX_STQ4_D3D 0x01000000
2394# define R200_VTX_STQ5_D3D 0x04000000
2395#define RADEON_SE_CNTL_STATUS 0x2140
2396# define RADEON_VC_NO_SWAP (0 << 0)
2397# define RADEON_VC_16BIT_SWAP (1 << 0)
2398# define RADEON_VC_32BIT_SWAP (2 << 0)
2399# define RADEON_VC_HALF_DWORD_SWAP (3 << 0)
2400# define RADEON_TCL_BYPASS (1 << 8)
2401#define RADEON_SE_COORD_FMT 0x1c50
2402# define RADEON_VTX_XY_PRE_MULT_1_OVER_W0 (1 << 0)
2403# define RADEON_VTX_Z_PRE_MULT_1_OVER_W0 (1 << 1)
2404# define RADEON_VTX_ST0_NONPARAMETRIC (1 << 8)
2405# define RADEON_VTX_ST1_NONPARAMETRIC (1 << 9)
2406# define RADEON_VTX_ST2_NONPARAMETRIC (1 << 10)
2407# define RADEON_VTX_ST3_NONPARAMETRIC (1 << 11)
2408# define RADEON_VTX_W0_NORMALIZE (1 << 12)
2409# define RADEON_VTX_W0_IS_NOT_1_OVER_W0 (1 << 16)
2410# define RADEON_VTX_ST0_PRE_MULT_1_OVER_W0 (1 << 17)
2411# define RADEON_VTX_ST1_PRE_MULT_1_OVER_W0 (1 << 19)
2412# define RADEON_VTX_ST2_PRE_MULT_1_OVER_W0 (1 << 21)
2413# define RADEON_VTX_ST3_PRE_MULT_1_OVER_W0 (1 << 23)
2414# define RADEON_TEX1_W_ROUTING_USE_W0 (0 << 26)
2415# define RADEON_TEX1_W_ROUTING_USE_Q1 (1 << 26)
2416#define RADEON_SE_LINE_WIDTH 0x1db8
2417#define RADEON_SE_TCL_LIGHT_MODEL_CTL 0x226c
2418# define RADEON_LIGHTING_ENABLE (1 << 0)
2419# define RADEON_LIGHT_IN_MODELSPACE (1 << 1)
2420# define RADEON_LOCAL_VIEWER (1 << 2)
2421# define RADEON_NORMALIZE_NORMALS (1 << 3)
2422# define RADEON_RESCALE_NORMALS (1 << 4)
2423# define RADEON_SPECULAR_LIGHTS (1 << 5)
2424# define RADEON_DIFFUSE_SPECULAR_COMBINE (1 << 6)
2425# define RADEON_LIGHT_ALPHA (1 << 7)
2426# define RADEON_LOCAL_LIGHT_VEC_GL (1 << 8)
2427# define RADEON_LIGHT_NO_NORMAL_AMBIENT_ONLY (1 << 9)
2428# define RADEON_LM_SOURCE_STATE_PREMULT 0
2429# define RADEON_LM_SOURCE_STATE_MULT 1
2430# define RADEON_LM_SOURCE_VERTEX_DIFFUSE 2
2431# define RADEON_LM_SOURCE_VERTEX_SPECULAR 3
2432# define RADEON_EMISSIVE_SOURCE_SHIFT 16
2433# define RADEON_AMBIENT_SOURCE_SHIFT 18
2434# define RADEON_DIFFUSE_SOURCE_SHIFT 20
2435# define RADEON_SPECULAR_SOURCE_SHIFT 22
2436#define RADEON_SE_TCL_MATERIAL_AMBIENT_RED 0x2220
2437#define RADEON_SE_TCL_MATERIAL_AMBIENT_GREEN 0x2224
2438#define RADEON_SE_TCL_MATERIAL_AMBIENT_BLUE 0x2228
2439#define RADEON_SE_TCL_MATERIAL_AMBIENT_ALPHA 0x222c
2440#define RADEON_SE_TCL_MATERIAL_DIFFUSE_RED 0x2230
2441#define RADEON_SE_TCL_MATERIAL_DIFFUSE_GREEN 0x2234
2442#define RADEON_SE_TCL_MATERIAL_DIFFUSE_BLUE 0x2238
2443#define RADEON_SE_TCL_MATERIAL_DIFFUSE_ALPHA 0x223c
2444#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210
2445#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_GREEN 0x2214
2446#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_BLUE 0x2218
2447#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_ALPHA 0x221c
2448#define RADEON_SE_TCL_MATERIAL_SPECULAR_RED 0x2240
2449#define RADEON_SE_TCL_MATERIAL_SPECULAR_GREEN 0x2244
2450#define RADEON_SE_TCL_MATERIAL_SPECULAR_BLUE 0x2248
2451#define RADEON_SE_TCL_MATERIAL_SPECULAR_ALPHA 0x224c
2452#define RADEON_SE_TCL_MATRIX_SELECT_0 0x225c
2453# define RADEON_MODELVIEW_0_SHIFT 0
2454# define RADEON_MODELVIEW_1_SHIFT 4
2455# define RADEON_MODELVIEW_2_SHIFT 8
2456# define RADEON_MODELVIEW_3_SHIFT 12
2457# define RADEON_IT_MODELVIEW_0_SHIFT 16
2458# define RADEON_IT_MODELVIEW_1_SHIFT 20
2459# define RADEON_IT_MODELVIEW_2_SHIFT 24
2460# define RADEON_IT_MODELVIEW_3_SHIFT 28
2461#define RADEON_SE_TCL_MATRIX_SELECT_1 0x2260
2462# define RADEON_MODELPROJECT_0_SHIFT 0
2463# define RADEON_MODELPROJECT_1_SHIFT 4
2464# define RADEON_MODELPROJECT_2_SHIFT 8
2465# define RADEON_MODELPROJECT_3_SHIFT 12
2466# define RADEON_TEXMAT_0_SHIFT 16
2467# define RADEON_TEXMAT_1_SHIFT 20
2468# define RADEON_TEXMAT_2_SHIFT 24
2469# define RADEON_TEXMAT_3_SHIFT 28
2470
2471
2472#define RADEON_SE_TCL_OUTPUT_VTX_FMT 0x2254
2473# define RADEON_TCL_VTX_W0 (1 << 0)
2474# define RADEON_TCL_VTX_FP_DIFFUSE (1 << 1)
2475# define RADEON_TCL_VTX_FP_ALPHA (1 << 2)
2476# define RADEON_TCL_VTX_PK_DIFFUSE (1 << 3)
2477# define RADEON_TCL_VTX_FP_SPEC (1 << 4)
2478# define RADEON_TCL_VTX_FP_FOG (1 << 5)
2479# define RADEON_TCL_VTX_PK_SPEC (1 << 6)
2480# define RADEON_TCL_VTX_ST0 (1 << 7)
2481# define RADEON_TCL_VTX_ST1 (1 << 8)
2482# define RADEON_TCL_VTX_Q1 (1 << 9)
2483# define RADEON_TCL_VTX_ST2 (1 << 10)
2484# define RADEON_TCL_VTX_Q2 (1 << 11)
2485# define RADEON_TCL_VTX_ST3 (1 << 12)
2486# define RADEON_TCL_VTX_Q3 (1 << 13)
2487# define RADEON_TCL_VTX_Q0 (1 << 14)
2488# define RADEON_TCL_VTX_WEIGHT_COUNT_SHIFT 15
2489# define RADEON_TCL_VTX_NORM0 (1 << 18)
2490# define RADEON_TCL_VTX_XY1 (1 << 27)
2491# define RADEON_TCL_VTX_Z1 (1 << 28)
2492# define RADEON_TCL_VTX_W1 (1 << 29)
2493# define RADEON_TCL_VTX_NORM1 (1 << 30)
2494# define RADEON_TCL_VTX_Z0 (1 << 31)
2495
2496#define RADEON_SE_TCL_OUTPUT_VTX_SEL 0x2258
2497# define RADEON_TCL_COMPUTE_XYZW (1 << 0)
2498# define RADEON_TCL_COMPUTE_DIFFUSE (1 << 1)
2499# define RADEON_TCL_COMPUTE_SPECULAR (1 << 2)
2500# define RADEON_TCL_FORCE_NAN_IF_COLOR_NAN (1 << 3)
2501# define RADEON_TCL_FORCE_INORDER_PROC (1 << 4)
2502# define RADEON_TCL_TEX_INPUT_TEX_0 0
2503# define RADEON_TCL_TEX_INPUT_TEX_1 1
2504# define RADEON_TCL_TEX_INPUT_TEX_2 2
2505# define RADEON_TCL_TEX_INPUT_TEX_3 3
2506# define RADEON_TCL_TEX_COMPUTED_TEX_0 8
2507# define RADEON_TCL_TEX_COMPUTED_TEX_1 9
2508# define RADEON_TCL_TEX_COMPUTED_TEX_2 10
2509# define RADEON_TCL_TEX_COMPUTED_TEX_3 11
2510# define RADEON_TCL_TEX_0_OUTPUT_SHIFT 16
2511# define RADEON_TCL_TEX_1_OUTPUT_SHIFT 20
2512# define RADEON_TCL_TEX_2_OUTPUT_SHIFT 24
2513# define RADEON_TCL_TEX_3_OUTPUT_SHIFT 28
2514
2515#define RADEON_SE_TCL_PER_LIGHT_CTL_0 0x2270
2516# define RADEON_LIGHT_0_ENABLE (1 << 0)
2517# define RADEON_LIGHT_0_ENABLE_AMBIENT (1 << 1)
2518# define RADEON_LIGHT_0_ENABLE_SPECULAR (1 << 2)
2519# define RADEON_LIGHT_0_IS_LOCAL (1 << 3)
2520# define RADEON_LIGHT_0_IS_SPOT (1 << 4)
2521# define RADEON_LIGHT_0_DUAL_CONE (1 << 5)
2522# define RADEON_LIGHT_0_ENABLE_RANGE_ATTEN (1 << 6)
2523# define RADEON_LIGHT_0_CONSTANT_RANGE_ATTEN (1 << 7)
2524# define RADEON_LIGHT_0_SHIFT 0
2525# define RADEON_LIGHT_1_ENABLE (1 << 16)
2526# define RADEON_LIGHT_1_ENABLE_AMBIENT (1 << 17)
2527# define RADEON_LIGHT_1_ENABLE_SPECULAR (1 << 18)
2528# define RADEON_LIGHT_1_IS_LOCAL (1 << 19)
2529# define RADEON_LIGHT_1_IS_SPOT (1 << 20)
2530# define RADEON_LIGHT_1_DUAL_CONE (1 << 21)
2531# define RADEON_LIGHT_1_ENABLE_RANGE_ATTEN (1 << 22)
2532# define RADEON_LIGHT_1_CONSTANT_RANGE_ATTEN (1 << 23)
2533# define RADEON_LIGHT_1_SHIFT 16
2534#define RADEON_SE_TCL_PER_LIGHT_CTL_1 0x2274
2535# define RADEON_LIGHT_2_SHIFT 0
2536# define RADEON_LIGHT_3_SHIFT 16
2537#define RADEON_SE_TCL_PER_LIGHT_CTL_2 0x2278
2538# define RADEON_LIGHT_4_SHIFT 0
2539# define RADEON_LIGHT_5_SHIFT 16
2540#define RADEON_SE_TCL_PER_LIGHT_CTL_3 0x227c
2541# define RADEON_LIGHT_6_SHIFT 0
2542# define RADEON_LIGHT_7_SHIFT 16
2543
2544#define RADEON_SE_TCL_SHININESS 0x2250
2545
2546#define RADEON_SE_TCL_TEXTURE_PROC_CTL 0x2268
2547# define RADEON_TEXGEN_TEXMAT_0_ENABLE (1 << 0)
2548# define RADEON_TEXGEN_TEXMAT_1_ENABLE (1 << 1)
2549# define RADEON_TEXGEN_TEXMAT_2_ENABLE (1 << 2)
2550# define RADEON_TEXGEN_TEXMAT_3_ENABLE (1 << 3)
2551# define RADEON_TEXMAT_0_ENABLE (1 << 4)
2552# define RADEON_TEXMAT_1_ENABLE (1 << 5)
2553# define RADEON_TEXMAT_2_ENABLE (1 << 6)
2554# define RADEON_TEXMAT_3_ENABLE (1 << 7)
2555# define RADEON_TEXGEN_INPUT_MASK 0xf
2556# define RADEON_TEXGEN_INPUT_TEXCOORD_0 0
2557# define RADEON_TEXGEN_INPUT_TEXCOORD_1 1
2558# define RADEON_TEXGEN_INPUT_TEXCOORD_2 2
2559# define RADEON_TEXGEN_INPUT_TEXCOORD_3 3
2560# define RADEON_TEXGEN_INPUT_OBJ 4
2561# define RADEON_TEXGEN_INPUT_EYE 5
2562# define RADEON_TEXGEN_INPUT_EYE_NORMAL 6
2563# define RADEON_TEXGEN_INPUT_EYE_REFLECT 7
2564# define RADEON_TEXGEN_INPUT_EYE_NORMALIZED 8
2565# define RADEON_TEXGEN_0_INPUT_SHIFT 16
2566# define RADEON_TEXGEN_1_INPUT_SHIFT 20
2567# define RADEON_TEXGEN_2_INPUT_SHIFT 24
2568# define RADEON_TEXGEN_3_INPUT_SHIFT 28
2569
2570#define RADEON_SE_TCL_UCP_VERT_BLEND_CTL 0x2264
2571# define RADEON_UCP_IN_CLIP_SPACE (1 << 0)
2572# define RADEON_UCP_IN_MODEL_SPACE (1 << 1)
2573# define RADEON_UCP_ENABLE_0 (1 << 2)
2574# define RADEON_UCP_ENABLE_1 (1 << 3)
2575# define RADEON_UCP_ENABLE_2 (1 << 4)
2576# define RADEON_UCP_ENABLE_3 (1 << 5)
2577# define RADEON_UCP_ENABLE_4 (1 << 6)
2578# define RADEON_UCP_ENABLE_5 (1 << 7)
2579# define RADEON_TCL_FOG_MASK (3 << 8)
2580# define RADEON_TCL_FOG_DISABLE (0 << 8)
2581# define RADEON_TCL_FOG_EXP (1 << 8)
2582# define RADEON_TCL_FOG_EXP2 (2 << 8)
2583# define RADEON_TCL_FOG_LINEAR (3 << 8)
2584# define RADEON_RNG_BASED_FOG (1 << 10)
2585# define RADEON_LIGHT_TWOSIDE (1 << 11)
2586# define RADEON_BLEND_OP_COUNT_MASK (7 << 12)
2587# define RADEON_BLEND_OP_COUNT_SHIFT 12
2588# define RADEON_POSITION_BLEND_OP_ENABLE (1 << 16)
2589# define RADEON_NORMAL_BLEND_OP_ENABLE (1 << 17)
2590# define RADEON_VERTEX_BLEND_SRC_0_PRIMARY (1 << 18)
2591# define RADEON_VERTEX_BLEND_SRC_0_SECONDARY (1 << 18)
2592# define RADEON_VERTEX_BLEND_SRC_1_PRIMARY (1 << 19)
2593# define RADEON_VERTEX_BLEND_SRC_1_SECONDARY (1 << 19)
2594# define RADEON_VERTEX_BLEND_SRC_2_PRIMARY (1 << 20)
2595# define RADEON_VERTEX_BLEND_SRC_2_SECONDARY (1 << 20)
2596# define RADEON_VERTEX_BLEND_SRC_3_PRIMARY (1 << 21)
2597# define RADEON_VERTEX_BLEND_SRC_3_SECONDARY (1 << 21)
2598# define RADEON_VERTEX_BLEND_WGT_MINUS_ONE (1 << 22)
2599# define RADEON_CULL_FRONT_IS_CW (0 << 28)
2600# define RADEON_CULL_FRONT_IS_CCW (1 << 28)
2601# define RADEON_CULL_FRONT (1 << 29)
2602# define RADEON_CULL_BACK (1 << 30)
2603# define RADEON_FORCE_W_TO_ONE (1 << 31)
2604
2605#define RADEON_SE_VPORT_XSCALE 0x1d98
2606#define RADEON_SE_VPORT_XOFFSET 0x1d9c
2607#define RADEON_SE_VPORT_YSCALE 0x1da0
2608#define RADEON_SE_VPORT_YOFFSET 0x1da4
2609#define RADEON_SE_VPORT_ZSCALE 0x1da8
2610#define RADEON_SE_VPORT_ZOFFSET 0x1dac
2611#define RADEON_SE_ZBIAS_FACTOR 0x1db0
2612#define RADEON_SE_ZBIAS_CONSTANT 0x1db4
2613
2614#define RADEON_SE_VTX_FMT 0x2080
2615# define RADEON_SE_VTX_FMT_XY 0x00000000
2616# define RADEON_SE_VTX_FMT_W0 0x00000001
2617# define RADEON_SE_VTX_FMT_FPCOLOR 0x00000002
2618# define RADEON_SE_VTX_FMT_FPALPHA 0x00000004
2619# define RADEON_SE_VTX_FMT_PKCOLOR 0x00000008
2620# define RADEON_SE_VTX_FMT_FPSPEC 0x00000010
2621# define RADEON_SE_VTX_FMT_FPFOG 0x00000020
2622# define RADEON_SE_VTX_FMT_PKSPEC 0x00000040
2623# define RADEON_SE_VTX_FMT_ST0 0x00000080
2624# define RADEON_SE_VTX_FMT_ST1 0x00000100
2625# define RADEON_SE_VTX_FMT_Q1 0x00000200
2626# define RADEON_SE_VTX_FMT_ST2 0x00000400
2627# define RADEON_SE_VTX_FMT_Q2 0x00000800
2628# define RADEON_SE_VTX_FMT_ST3 0x00001000
2629# define RADEON_SE_VTX_FMT_Q3 0x00002000
2630# define RADEON_SE_VTX_FMT_Q0 0x00004000
2631# define RADEON_SE_VTX_FMT_BLND_WEIGHT_CNT_MASK 0x00038000
2632# define RADEON_SE_VTX_FMT_N0 0x00040000
2633# define RADEON_SE_VTX_FMT_XY1 0x08000000
2634# define RADEON_SE_VTX_FMT_Z1 0x10000000
2635# define RADEON_SE_VTX_FMT_W1 0x20000000
2636# define RADEON_SE_VTX_FMT_N1 0x40000000
2637# define RADEON_SE_VTX_FMT_Z 0x80000000
2638
2639#define RADEON_SE_VF_CNTL 0x2084
2640# define RADEON_VF_PRIM_TYPE_POINT_LIST 1
2641# define RADEON_VF_PRIM_TYPE_LINE_LIST 2
2642# define RADEON_VF_PRIM_TYPE_LINE_STRIP 3
2643# define RADEON_VF_PRIM_TYPE_TRIANGLE_LIST 4
2644# define RADEON_VF_PRIM_TYPE_TRIANGLE_FAN 5
2645# define RADEON_VF_PRIM_TYPE_TRIANGLE_STRIP 6
2646# define RADEON_VF_PRIM_TYPE_TRIANGLE_FLAG 7
2647# define RADEON_VF_PRIM_TYPE_RECTANGLE_LIST 8
2648# define RADEON_VF_PRIM_TYPE_POINT_LIST_3 9
2649# define RADEON_VF_PRIM_TYPE_LINE_LIST_3 10
2650# define RADEON_VF_PRIM_TYPE_SPIRIT_LIST 11
2651# define RADEON_VF_PRIM_TYPE_LINE_LOOP 12
2652# define RADEON_VF_PRIM_TYPE_QUAD_LIST 13
2653# define RADEON_VF_PRIM_TYPE_QUAD_STRIP 14
2654# define RADEON_VF_PRIM_TYPE_POLYGON 15
2655# define RADEON_VF_PRIM_WALK_STATE (0<<4)
2656# define RADEON_VF_PRIM_WALK_INDEX (1<<4)
2657# define RADEON_VF_PRIM_WALK_LIST (2<<4)
2658# define RADEON_VF_PRIM_WALK_DATA (3<<4)
2659# define RADEON_VF_COLOR_ORDER_RGBA (1<<6)
2660# define RADEON_VF_RADEON_MODE (1<<8)
2661# define RADEON_VF_TCL_OUTPUT_CTL_ENA (1<<9)
2662# define RADEON_VF_PROG_STREAM_ENA (1<<10)
2663# define RADEON_VF_INDEX_SIZE_SHIFT 11
2664# define RADEON_VF_NUM_VERTICES_SHIFT 16
2665
2666#define RADEON_SE_PORT_DATA0 0x2000
2667
2668#define R200_SE_VAP_CNTL 0x2080
2669# define R200_VAP_TCL_ENABLE 0x00000001
2670# define R200_VAP_SINGLE_BUF_STATE_ENABLE 0x00000010
2671# define R200_VAP_FORCE_W_TO_ONE 0x00010000
2672# define R200_VAP_D3D_TEX_DEFAULT 0x00020000
2673# define R200_VAP_VF_MAX_VTX_NUM__SHIFT 18
2674# define R200_VAP_VF_MAX_VTX_NUM (9 << 18)
2675# define R200_VAP_DX_CLIP_SPACE_DEF 0x00400000
2676#define R200_VF_MAX_VTX_INDX 0x210c
2677#define R200_VF_MIN_VTX_INDX 0x2110
2678#define R200_SE_VTE_CNTL 0x20b0
2679# define R200_VPORT_X_SCALE_ENA 0x00000001
2680# define R200_VPORT_X_OFFSET_ENA 0x00000002
2681# define R200_VPORT_Y_SCALE_ENA 0x00000004
2682# define R200_VPORT_Y_OFFSET_ENA 0x00000008
2683# define R200_VPORT_Z_SCALE_ENA 0x00000010
2684# define R200_VPORT_Z_OFFSET_ENA 0x00000020
2685# define R200_VTX_XY_FMT 0x00000100
2686# define R200_VTX_Z_FMT 0x00000200
2687# define R200_VTX_W0_FMT 0x00000400
2688# define R200_VTX_W0_NORMALIZE 0x00000800
2689# define R200_VTX_ST_DENORMALIZED 0x00001000
2690#define R200_SE_VAP_CNTL_STATUS 0x2140
2691# define R200_VC_NO_SWAP (0 << 0)
2692# define R200_VC_16BIT_SWAP (1 << 0)
2693# define R200_VC_32BIT_SWAP (2 << 0)
2694#define R200_PP_TXFILTER_0 0x2c00
2695#define R200_PP_TXFILTER_1 0x2c20
2696#define R200_PP_TXFILTER_2 0x2c40
2697#define R200_PP_TXFILTER_3 0x2c60
2698#define R200_PP_TXFILTER_4 0x2c80
2699#define R200_PP_TXFILTER_5 0x2ca0
2700# define R200_MAG_FILTER_NEAREST (0 << 0)
2701# define R200_MAG_FILTER_LINEAR (1 << 0)
2702# define R200_MAG_FILTER_MASK (1 << 0)
2703# define R200_MIN_FILTER_NEAREST (0 << 1)
2704# define R200_MIN_FILTER_LINEAR (1 << 1)
2705# define R200_MIN_FILTER_NEAREST_MIP_NEAREST (2 << 1)
2706# define R200_MIN_FILTER_NEAREST_MIP_LINEAR (3 << 1)
2707# define R200_MIN_FILTER_LINEAR_MIP_NEAREST (6 << 1)
2708# define R200_MIN_FILTER_LINEAR_MIP_LINEAR (7 << 1)
2709# define R200_MIN_FILTER_ANISO_NEAREST (8 << 1)
2710# define R200_MIN_FILTER_ANISO_LINEAR (9 << 1)
2711# define R200_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (10 << 1)
2712# define R200_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR (11 << 1)
2713# define R200_MIN_FILTER_MASK (15 << 1)
2714# define R200_MAX_ANISO_1_TO_1 (0 << 5)
2715# define R200_MAX_ANISO_2_TO_1 (1 << 5)
2716# define R200_MAX_ANISO_4_TO_1 (2 << 5)
2717# define R200_MAX_ANISO_8_TO_1 (3 << 5)
2718# define R200_MAX_ANISO_16_TO_1 (4 << 5)
2719# define R200_MAX_ANISO_MASK (7 << 5)
2720# define R200_MAX_MIP_LEVEL_MASK (0x0f << 16)
2721# define R200_MAX_MIP_LEVEL_SHIFT 16
2722# define R200_YUV_TO_RGB (1 << 20)
2723# define R200_YUV_TEMPERATURE_COOL (0 << 21)
2724# define R200_YUV_TEMPERATURE_HOT (1 << 21)
2725# define R200_YUV_TEMPERATURE_MASK (1 << 21)
2726# define R200_WRAPEN_S (1 << 22)
2727# define R200_CLAMP_S_WRAP (0 << 23)
2728# define R200_CLAMP_S_MIRROR (1 << 23)
2729# define R200_CLAMP_S_CLAMP_LAST (2 << 23)
2730# define R200_CLAMP_S_MIRROR_CLAMP_LAST (3 << 23)
2731# define R200_CLAMP_S_CLAMP_BORDER (4 << 23)
2732# define R200_CLAMP_S_MIRROR_CLAMP_BORDER (5 << 23)
2733# define R200_CLAMP_S_CLAMP_GL (6 << 23)
2734# define R200_CLAMP_S_MIRROR_CLAMP_GL (7 << 23)
2735# define R200_CLAMP_S_MASK (7 << 23)
2736# define R200_WRAPEN_T (1 << 26)
2737# define R200_CLAMP_T_WRAP (0 << 27)
2738# define R200_CLAMP_T_MIRROR (1 << 27)
2739# define R200_CLAMP_T_CLAMP_LAST (2 << 27)
2740# define R200_CLAMP_T_MIRROR_CLAMP_LAST (3 << 27)
2741# define R200_CLAMP_T_CLAMP_BORDER (4 << 27)
2742# define R200_CLAMP_T_MIRROR_CLAMP_BORDER (5 << 27)
2743# define R200_CLAMP_T_CLAMP_GL (6 << 27)
2744# define R200_CLAMP_T_MIRROR_CLAMP_GL (7 << 27)
2745# define R200_CLAMP_T_MASK (7 << 27)
2746# define R200_KILL_LT_ZERO (1 << 30)
2747# define R200_BORDER_MODE_OGL (0 << 31)
2748# define R200_BORDER_MODE_D3D (1 << 31)
2749#define R200_PP_TXFORMAT_0 0x2c04
2750#define R200_PP_TXFORMAT_1 0x2c24
2751#define R200_PP_TXFORMAT_2 0x2c44
2752#define R200_PP_TXFORMAT_3 0x2c64
2753#define R200_PP_TXFORMAT_4 0x2c84
2754#define R200_PP_TXFORMAT_5 0x2ca4
2755# define R200_TXFORMAT_I8 (0 << 0)
2756# define R200_TXFORMAT_AI88 (1 << 0)
2757# define R200_TXFORMAT_RGB332 (2 << 0)
2758# define R200_TXFORMAT_ARGB1555 (3 << 0)
2759# define R200_TXFORMAT_RGB565 (4 << 0)
2760# define R200_TXFORMAT_ARGB4444 (5 << 0)
2761# define R200_TXFORMAT_ARGB8888 (6 << 0)
2762# define R200_TXFORMAT_RGBA8888 (7 << 0)
2763# define R200_TXFORMAT_Y8 (8 << 0)
2764# define R200_TXFORMAT_AVYU4444 (9 << 0)
2765# define R200_TXFORMAT_VYUY422 (10 << 0)
2766# define R200_TXFORMAT_YVYU422 (11 << 0)
2767# define R200_TXFORMAT_DXT1 (12 << 0)
2768# define R200_TXFORMAT_DXT23 (14 << 0)
2769# define R200_TXFORMAT_DXT45 (15 << 0)
2770# define R200_TXFORMAT_ABGR8888 (22 << 0)
2771# define R200_TXFORMAT_FORMAT_MASK (31 << 0)
2772# define R200_TXFORMAT_FORMAT_SHIFT 0
2773# define R200_TXFORMAT_ALPHA_IN_MAP (1 << 6)
2774# define R200_TXFORMAT_NON_POWER2 (1 << 7)
2775# define R200_TXFORMAT_WIDTH_MASK (15 << 8)
2776# define R200_TXFORMAT_WIDTH_SHIFT 8
2777# define R200_TXFORMAT_HEIGHT_MASK (15 << 12)
2778# define R200_TXFORMAT_HEIGHT_SHIFT 12
2779# define R200_TXFORMAT_F5_WIDTH_MASK (15 << 16) /* cube face 5 */
2780# define R200_TXFORMAT_F5_WIDTH_SHIFT 16
2781# define R200_TXFORMAT_F5_HEIGHT_MASK (15 << 20)
2782# define R200_TXFORMAT_F5_HEIGHT_SHIFT 20
2783# define R200_TXFORMAT_ST_ROUTE_STQ0 (0 << 24)
2784# define R200_TXFORMAT_ST_ROUTE_STQ1 (1 << 24)
2785# define R200_TXFORMAT_ST_ROUTE_STQ2 (2 << 24)
2786# define R200_TXFORMAT_ST_ROUTE_STQ3 (3 << 24)
2787# define R200_TXFORMAT_ST_ROUTE_STQ4 (4 << 24)
2788# define R200_TXFORMAT_ST_ROUTE_STQ5 (5 << 24)
2789# define R200_TXFORMAT_ST_ROUTE_MASK (7 << 24)
2790# define R200_TXFORMAT_ST_ROUTE_SHIFT 24
2791# define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28)
2792# define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29)
2793# define R200_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30)
2794#define R200_PP_TXFORMAT_X_0 0x2c08
2795#define R200_PP_TXFORMAT_X_1 0x2c28
2796#define R200_PP_TXFORMAT_X_2 0x2c48
2797#define R200_PP_TXFORMAT_X_3 0x2c68
2798#define R200_PP_TXFORMAT_X_4 0x2c88
2799#define R200_PP_TXFORMAT_X_5 0x2ca8
2800
2801#define R200_PP_TXSIZE_0 0x2c0c /* NPOT only */
2802#define R200_PP_TXSIZE_1 0x2c2c /* NPOT only */
2803#define R200_PP_TXSIZE_2 0x2c4c /* NPOT only */
2804#define R200_PP_TXSIZE_3 0x2c6c /* NPOT only */
2805#define R200_PP_TXSIZE_4 0x2c8c /* NPOT only */
2806#define R200_PP_TXSIZE_5 0x2cac /* NPOT only */
2807
2808#define R200_PP_TXPITCH_0 0x2c10 /* NPOT only */
2809#define R200_PP_TXPITCH_1 0x2c30 /* NPOT only */
2810#define R200_PP_TXPITCH_2 0x2c50 /* NPOT only */
2811#define R200_PP_TXPITCH_3 0x2c70 /* NPOT only */
2812#define R200_PP_TXPITCH_4 0x2c90 /* NPOT only */
2813#define R200_PP_TXPITCH_5 0x2cb0 /* NPOT only */
2814
2815#define R200_PP_TXOFFSET_0 0x2d00
2816# define R200_TXO_ENDIAN_NO_SWAP (0 << 0)
2817# define R200_TXO_ENDIAN_BYTE_SWAP (1 << 0)
2818# define R200_TXO_ENDIAN_WORD_SWAP (2 << 0)
2819# define R200_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
2820# define R200_TXO_MACRO_LINEAR (0 << 2)
2821# define R200_TXO_MACRO_TILE (1 << 2)
2822# define R200_TXO_MICRO_LINEAR (0 << 3)
2823# define R200_TXO_MICRO_TILE (1 << 3)
2824# define R200_TXO_OFFSET_MASK 0xffffffe0
2825# define R200_TXO_OFFSET_SHIFT 5
2826#define R200_PP_TXOFFSET_1 0x2d18
2827#define R200_PP_TXOFFSET_2 0x2d30
2828#define R200_PP_TXOFFSET_3 0x2d48
2829#define R200_PP_TXOFFSET_4 0x2d60
2830#define R200_PP_TXOFFSET_5 0x2d78
2831
2832#define R200_PP_TFACTOR_0 0x2ee0
2833#define R200_PP_TFACTOR_1 0x2ee4
2834#define R200_PP_TFACTOR_2 0x2ee8
2835#define R200_PP_TFACTOR_3 0x2eec
2836#define R200_PP_TFACTOR_4 0x2ef0
2837#define R200_PP_TFACTOR_5 0x2ef4
2838
2839#define R200_PP_TXCBLEND_0 0x2f00
2840# define R200_TXC_ARG_A_ZERO (0)
2841# define R200_TXC_ARG_A_CURRENT_COLOR (2)
2842# define R200_TXC_ARG_A_CURRENT_ALPHA (3)
2843# define R200_TXC_ARG_A_DIFFUSE_COLOR (4)
2844# define R200_TXC_ARG_A_DIFFUSE_ALPHA (5)
2845# define R200_TXC_ARG_A_SPECULAR_COLOR (6)
2846# define R200_TXC_ARG_A_SPECULAR_ALPHA (7)
2847# define R200_TXC_ARG_A_TFACTOR_COLOR (8)
2848# define R200_TXC_ARG_A_TFACTOR_ALPHA (9)
2849# define R200_TXC_ARG_A_R0_COLOR (10)
2850# define R200_TXC_ARG_A_R0_ALPHA (11)
2851# define R200_TXC_ARG_A_R1_COLOR (12)
2852# define R200_TXC_ARG_A_R1_ALPHA (13)
2853# define R200_TXC_ARG_A_R2_COLOR (14)
2854# define R200_TXC_ARG_A_R2_ALPHA (15)
2855# define R200_TXC_ARG_A_R3_COLOR (16)
2856# define R200_TXC_ARG_A_R3_ALPHA (17)
2857# define R200_TXC_ARG_A_R4_COLOR (18)
2858# define R200_TXC_ARG_A_R4_ALPHA (19)
2859# define R200_TXC_ARG_A_R5_COLOR (20)
2860# define R200_TXC_ARG_A_R5_ALPHA (21)
2861# define R200_TXC_ARG_A_TFACTOR1_COLOR (26)
2862# define R200_TXC_ARG_A_TFACTOR1_ALPHA (27)
2863# define R200_TXC_ARG_A_MASK (31 << 0)
2864# define R200_TXC_ARG_A_SHIFT 0
2865# define R200_TXC_ARG_B_ZERO (0 << 5)
2866# define R200_TXC_ARG_B_CURRENT_COLOR (2 << 5)
2867# define R200_TXC_ARG_B_CURRENT_ALPHA (3 << 5)
2868# define R200_TXC_ARG_B_DIFFUSE_COLOR (4 << 5)
2869# define R200_TXC_ARG_B_DIFFUSE_ALPHA (5 << 5)
2870# define R200_TXC_ARG_B_SPECULAR_COLOR (6 << 5)
2871# define R200_TXC_ARG_B_SPECULAR_ALPHA (7 << 5)
2872# define R200_TXC_ARG_B_TFACTOR_COLOR (8 << 5)
2873# define R200_TXC_ARG_B_TFACTOR_ALPHA (9 << 5)
2874# define R200_TXC_ARG_B_R0_COLOR (10 << 5)
2875# define R200_TXC_ARG_B_R0_ALPHA (11 << 5)
2876# define R200_TXC_ARG_B_R1_COLOR (12 << 5)
2877# define R200_TXC_ARG_B_R1_ALPHA (13 << 5)
2878# define R200_TXC_ARG_B_R2_COLOR (14 << 5)
2879# define R200_TXC_ARG_B_R2_ALPHA (15 << 5)
2880# define R200_TXC_ARG_B_R3_COLOR (16 << 5)
2881# define R200_TXC_ARG_B_R3_ALPHA (17 << 5)
2882# define R200_TXC_ARG_B_R4_COLOR (18 << 5)
2883# define R200_TXC_ARG_B_R4_ALPHA (19 << 5)
2884# define R200_TXC_ARG_B_R5_COLOR (20 << 5)
2885# define R200_TXC_ARG_B_R5_ALPHA (21 << 5)
2886# define R200_TXC_ARG_B_TFACTOR1_COLOR (26 << 5)
2887# define R200_TXC_ARG_B_TFACTOR1_ALPHA (27 << 5)
2888# define R200_TXC_ARG_B_MASK (31 << 5)
2889# define R200_TXC_ARG_B_SHIFT 5
2890# define R200_TXC_ARG_C_ZERO (0 << 10)
2891# define R200_TXC_ARG_C_CURRENT_COLOR (2 << 10)
2892# define R200_TXC_ARG_C_CURRENT_ALPHA (3 << 10)
2893# define R200_TXC_ARG_C_DIFFUSE_COLOR (4 << 10)
2894# define R200_TXC_ARG_C_DIFFUSE_ALPHA (5 << 10)
2895# define R200_TXC_ARG_C_SPECULAR_COLOR (6 << 10)
2896# define R200_TXC_ARG_C_SPECULAR_ALPHA (7 << 10)
2897# define R200_TXC_ARG_C_TFACTOR_COLOR (8 << 10)
2898# define R200_TXC_ARG_C_TFACTOR_ALPHA (9 << 10)
2899# define R200_TXC_ARG_C_R0_COLOR (10 << 10)
2900# define R200_TXC_ARG_C_R0_ALPHA (11 << 10)
2901# define R200_TXC_ARG_C_R1_COLOR (12 << 10)
2902# define R200_TXC_ARG_C_R1_ALPHA (13 << 10)
2903# define R200_TXC_ARG_C_R2_COLOR (14 << 10)
2904# define R200_TXC_ARG_C_R2_ALPHA (15 << 10)
2905# define R200_TXC_ARG_C_R3_COLOR (16 << 10)
2906# define R200_TXC_ARG_C_R3_ALPHA (17 << 10)
2907# define R200_TXC_ARG_C_R4_COLOR (18 << 10)
2908# define R200_TXC_ARG_C_R4_ALPHA (19 << 10)
2909# define R200_TXC_ARG_C_R5_COLOR (20 << 10)
2910# define R200_TXC_ARG_C_R5_ALPHA (21 << 10)
2911# define R200_TXC_ARG_C_TFACTOR1_COLOR (26 << 10)
2912# define R200_TXC_ARG_C_TFACTOR1_ALPHA (27 << 10)
2913# define R200_TXC_ARG_C_MASK (31 << 10)
2914# define R200_TXC_ARG_C_SHIFT 10
2915# define R200_TXC_COMP_ARG_A (1 << 16)
2916# define R200_TXC_COMP_ARG_A_SHIFT (16)
2917# define R200_TXC_BIAS_ARG_A (1 << 17)
2918# define R200_TXC_SCALE_ARG_A (1 << 18)
2919# define R200_TXC_NEG_ARG_A (1 << 19)
2920# define R200_TXC_COMP_ARG_B (1 << 20)
2921# define R200_TXC_COMP_ARG_B_SHIFT (20)
2922# define R200_TXC_BIAS_ARG_B (1 << 21)
2923# define R200_TXC_SCALE_ARG_B (1 << 22)
2924# define R200_TXC_NEG_ARG_B (1 << 23)
2925# define R200_TXC_COMP_ARG_C (1 << 24)
2926# define R200_TXC_COMP_ARG_C_SHIFT (24)
2927# define R200_TXC_BIAS_ARG_C (1 << 25)
2928# define R200_TXC_SCALE_ARG_C (1 << 26)
2929# define R200_TXC_NEG_ARG_C (1 << 27)
2930# define R200_TXC_OP_MADD (0 << 28)
2931# define R200_TXC_OP_CND0 (2 << 28)
2932# define R200_TXC_OP_LERP (3 << 28)
2933# define R200_TXC_OP_DOT3 (4 << 28)
2934# define R200_TXC_OP_DOT4 (5 << 28)
2935# define R200_TXC_OP_CONDITIONAL (6 << 28)
2936# define R200_TXC_OP_DOT2_ADD (7 << 28)
2937# define R200_TXC_OP_MASK (7 << 28)
2938#define R200_PP_TXCBLEND2_0 0x2f04
2939# define R200_TXC_TFACTOR_SEL_SHIFT 0
2940# define R200_TXC_TFACTOR_SEL_MASK 0x7
2941# define R200_TXC_TFACTOR1_SEL_SHIFT 4
2942# define R200_TXC_TFACTOR1_SEL_MASK (0x7 << 4)
2943# define R200_TXC_SCALE_SHIFT 8
2944# define R200_TXC_SCALE_MASK (7 << 8)
2945# define R200_TXC_SCALE_1X (0 << 8)
2946# define R200_TXC_SCALE_2X (1 << 8)
2947# define R200_TXC_SCALE_4X (2 << 8)
2948# define R200_TXC_SCALE_8X (3 << 8)
2949# define R200_TXC_SCALE_INV2 (5 << 8)
2950# define R200_TXC_SCALE_INV4 (6 << 8)
2951# define R200_TXC_SCALE_INV8 (7 << 8)
2952# define R200_TXC_CLAMP_SHIFT 12
2953# define R200_TXC_CLAMP_MASK (3 << 12)
2954# define R200_TXC_CLAMP_WRAP (0 << 12)
2955# define R200_TXC_CLAMP_0_1 (1 << 12)
2956# define R200_TXC_CLAMP_8_8 (2 << 12)
2957# define R200_TXC_OUTPUT_REG_MASK (7 << 16)
2958# define R200_TXC_OUTPUT_REG_NONE (0 << 16)
2959# define R200_TXC_OUTPUT_REG_R0 (1 << 16)
2960# define R200_TXC_OUTPUT_REG_R1 (2 << 16)
2961# define R200_TXC_OUTPUT_REG_R2 (3 << 16)
2962# define R200_TXC_OUTPUT_REG_R3 (4 << 16)
2963# define R200_TXC_OUTPUT_REG_R4 (5 << 16)
2964# define R200_TXC_OUTPUT_REG_R5 (6 << 16)
2965# define R200_TXC_OUTPUT_MASK_MASK (7 << 20)
2966# define R200_TXC_OUTPUT_MASK_RGB (0 << 20)
2967# define R200_TXC_OUTPUT_MASK_RG (1 << 20)
2968# define R200_TXC_OUTPUT_MASK_RB (2 << 20)
2969# define R200_TXC_OUTPUT_MASK_R (3 << 20)
2970# define R200_TXC_OUTPUT_MASK_GB (4 << 20)
2971# define R200_TXC_OUTPUT_MASK_G (5 << 20)
2972# define R200_TXC_OUTPUT_MASK_B (6 << 20)
2973# define R200_TXC_OUTPUT_MASK_NONE (7 << 20)
2974# define R200_TXC_REPL_NORMAL 0
2975# define R200_TXC_REPL_RED 1
2976# define R200_TXC_REPL_GREEN 2
2977# define R200_TXC_REPL_BLUE 3
2978# define R200_TXC_REPL_ARG_A_SHIFT 26
2979# define R200_TXC_REPL_ARG_A_MASK (3 << 26)
2980# define R200_TXC_REPL_ARG_B_SHIFT 28
2981# define R200_TXC_REPL_ARG_B_MASK (3 << 28)
2982# define R200_TXC_REPL_ARG_C_SHIFT 30
2983# define R200_TXC_REPL_ARG_C_MASK (3 << 30)
2984#define R200_PP_TXABLEND_0 0x2f08
2985# define R200_TXA_ARG_A_ZERO (0)
2986# define R200_TXA_ARG_A_CURRENT_ALPHA (2) /* guess */
2987# define R200_TXA_ARG_A_CURRENT_BLUE (3) /* guess */
2988# define R200_TXA_ARG_A_DIFFUSE_ALPHA (4)
2989# define R200_TXA_ARG_A_DIFFUSE_BLUE (5)
2990# define R200_TXA_ARG_A_SPECULAR_ALPHA (6)
2991# define R200_TXA_ARG_A_SPECULAR_BLUE (7)
2992# define R200_TXA_ARG_A_TFACTOR_ALPHA (8)
2993# define R200_TXA_ARG_A_TFACTOR_BLUE (9)
2994# define R200_TXA_ARG_A_R0_ALPHA (10)
2995# define R200_TXA_ARG_A_R0_BLUE (11)
2996# define R200_TXA_ARG_A_R1_ALPHA (12)
2997# define R200_TXA_ARG_A_R1_BLUE (13)
2998# define R200_TXA_ARG_A_R2_ALPHA (14)
2999# define R200_TXA_ARG_A_R2_BLUE (15)
3000# define R200_TXA_ARG_A_R3_ALPHA (16)
3001# define R200_TXA_ARG_A_R3_BLUE (17)
3002# define R200_TXA_ARG_A_R4_ALPHA (18)
3003# define R200_TXA_ARG_A_R4_BLUE (19)
3004# define R200_TXA_ARG_A_R5_ALPHA (20)
3005# define R200_TXA_ARG_A_R5_BLUE (21)
3006# define R200_TXA_ARG_A_TFACTOR1_ALPHA (26)
3007# define R200_TXA_ARG_A_TFACTOR1_BLUE (27)
3008# define R200_TXA_ARG_A_MASK (31 << 0)
3009# define R200_TXA_ARG_A_SHIFT 0
3010# define R200_TXA_ARG_B_ZERO (0 << 5)
3011# define R200_TXA_ARG_B_CURRENT_ALPHA (2 << 5) /* guess */
3012# define R200_TXA_ARG_B_CURRENT_BLUE (3 << 5) /* guess */
3013# define R200_TXA_ARG_B_DIFFUSE_ALPHA (4 << 5)
3014# define R200_TXA_ARG_B_DIFFUSE_BLUE (5 << 5)
3015# define R200_TXA_ARG_B_SPECULAR_ALPHA (6 << 5)
3016# define R200_TXA_ARG_B_SPECULAR_BLUE (7 << 5)
3017# define R200_TXA_ARG_B_TFACTOR_ALPHA (8 << 5)
3018# define R200_TXA_ARG_B_TFACTOR_BLUE (9 << 5)
3019# define R200_TXA_ARG_B_R0_ALPHA (10 << 5)
3020# define R200_TXA_ARG_B_R0_BLUE (11 << 5)
3021# define R200_TXA_ARG_B_R1_ALPHA (12 << 5)
3022# define R200_TXA_ARG_B_R1_BLUE (13 << 5)
3023# define R200_TXA_ARG_B_R2_ALPHA (14 << 5)
3024# define R200_TXA_ARG_B_R2_BLUE (15 << 5)
3025# define R200_TXA_ARG_B_R3_ALPHA (16 << 5)
3026# define R200_TXA_ARG_B_R3_BLUE (17 << 5)
3027# define R200_TXA_ARG_B_R4_ALPHA (18 << 5)
3028# define R200_TXA_ARG_B_R4_BLUE (19 << 5)
3029# define R200_TXA_ARG_B_R5_ALPHA (20 << 5)
3030# define R200_TXA_ARG_B_R5_BLUE (21 << 5)
3031# define R200_TXA_ARG_B_TFACTOR1_ALPHA (26 << 5)
3032# define R200_TXA_ARG_B_TFACTOR1_BLUE (27 << 5)
3033# define R200_TXA_ARG_B_MASK (31 << 5)
3034# define R200_TXA_ARG_B_SHIFT 5
3035# define R200_TXA_ARG_C_ZERO (0 << 10)
3036# define R200_TXA_ARG_C_CURRENT_ALPHA (2 << 10) /* guess */
3037# define R200_TXA_ARG_C_CURRENT_BLUE (3 << 10) /* guess */
3038# define R200_TXA_ARG_C_DIFFUSE_ALPHA (4 << 10)
3039# define R200_TXA_ARG_C_DIFFUSE_BLUE (5 << 10)
3040# define R200_TXA_ARG_C_SPECULAR_ALPHA (6 << 10)
3041# define R200_TXA_ARG_C_SPECULAR_BLUE (7 << 10)
3042# define R200_TXA_ARG_C_TFACTOR_ALPHA (8 << 10)
3043# define R200_TXA_ARG_C_TFACTOR_BLUE (9 << 10)
3044# define R200_TXA_ARG_C_R0_ALPHA (10 << 10)
3045# define R200_TXA_ARG_C_R0_BLUE (11 << 10)
3046# define R200_TXA_ARG_C_R1_ALPHA (12 << 10)
3047# define R200_TXA_ARG_C_R1_BLUE (13 << 10)
3048# define R200_TXA_ARG_C_R2_ALPHA (14 << 10)
3049# define R200_TXA_ARG_C_R2_BLUE (15 << 10)
3050# define R200_TXA_ARG_C_R3_ALPHA (16 << 10)
3051# define R200_TXA_ARG_C_R3_BLUE (17 << 10)
3052# define R200_TXA_ARG_C_R4_ALPHA (18 << 10)
3053# define R200_TXA_ARG_C_R4_BLUE (19 << 10)
3054# define R200_TXA_ARG_C_R5_ALPHA (20 << 10)
3055# define R200_TXA_ARG_C_R5_BLUE (21 << 10)
3056# define R200_TXA_ARG_C_TFACTOR1_ALPHA (26 << 10)
3057# define R200_TXA_ARG_C_TFACTOR1_BLUE (27 << 10)
3058# define R200_TXA_ARG_C_MASK (31 << 10)
3059# define R200_TXA_ARG_C_SHIFT 10
3060# define R200_TXA_COMP_ARG_A (1 << 16)
3061# define R200_TXA_COMP_ARG_A_SHIFT (16)
3062# define R200_TXA_BIAS_ARG_A (1 << 17)
3063# define R200_TXA_SCALE_ARG_A (1 << 18)
3064# define R200_TXA_NEG_ARG_A (1 << 19)
3065# define R200_TXA_COMP_ARG_B (1 << 20)
3066# define R200_TXA_COMP_ARG_B_SHIFT (20)
3067# define R200_TXA_BIAS_ARG_B (1 << 21)
3068# define R200_TXA_SCALE_ARG_B (1 << 22)
3069# define R200_TXA_NEG_ARG_B (1 << 23)
3070# define R200_TXA_COMP_ARG_C (1 << 24)
3071# define R200_TXA_COMP_ARG_C_SHIFT (24)
3072# define R200_TXA_BIAS_ARG_C (1 << 25)
3073# define R200_TXA_SCALE_ARG_C (1 << 26)
3074# define R200_TXA_NEG_ARG_C (1 << 27)
3075# define R200_TXA_OP_MADD (0 << 28)
3076# define R200_TXA_OP_CND0 (2 << 28)
3077# define R200_TXA_OP_LERP (3 << 28)
3078# define R200_TXA_OP_CONDITIONAL (6 << 28)
3079# define R200_TXA_OP_MASK (7 << 28)
3080#define R200_PP_TXABLEND2_0 0x2f0c
3081# define R200_TXA_TFACTOR_SEL_SHIFT 0
3082# define R200_TXA_TFACTOR_SEL_MASK 0x7
3083# define R200_TXA_TFACTOR1_SEL_SHIFT 4
3084# define R200_TXA_TFACTOR1_SEL_MASK (0x7 << 4)
3085# define R200_TXA_SCALE_SHIFT 8
3086# define R200_TXA_SCALE_MASK (7 << 8)
3087# define R200_TXA_SCALE_1X (0 << 8)
3088# define R200_TXA_SCALE_2X (1 << 8)
3089# define R200_TXA_SCALE_4X (2 << 8)
3090# define R200_TXA_SCALE_8X (3 << 8)
3091# define R200_TXA_SCALE_INV2 (5 << 8)
3092# define R200_TXA_SCALE_INV4 (6 << 8)
3093# define R200_TXA_SCALE_INV8 (7 << 8)
3094# define R200_TXA_CLAMP_SHIFT 12
3095# define R200_TXA_CLAMP_MASK (3 << 12)
3096# define R200_TXA_CLAMP_WRAP (0 << 12)
3097# define R200_TXA_CLAMP_0_1 (1 << 12)
3098# define R200_TXA_CLAMP_8_8 (2 << 12)
3099# define R200_TXA_OUTPUT_REG_MASK (7 << 16)
3100# define R200_TXA_OUTPUT_REG_NONE (0 << 16)
3101# define R200_TXA_OUTPUT_REG_R0 (1 << 16)
3102# define R200_TXA_OUTPUT_REG_R1 (2 << 16)
3103# define R200_TXA_OUTPUT_REG_R2 (3 << 16)
3104# define R200_TXA_OUTPUT_REG_R3 (4 << 16)
3105# define R200_TXA_OUTPUT_REG_R4 (5 << 16)
3106# define R200_TXA_OUTPUT_REG_R5 (6 << 16)
3107# define R200_TXA_DOT_ALPHA (1 << 20)
3108# define R200_TXA_REPL_NORMAL 0
3109# define R200_TXA_REPL_RED 1
3110# define R200_TXA_REPL_GREEN 2
3111# define R200_TXA_REPL_ARG_A_SHIFT 26
3112# define R200_TXA_REPL_ARG_A_MASK (3 << 26)
3113# define R200_TXA_REPL_ARG_B_SHIFT 28
3114# define R200_TXA_REPL_ARG_B_MASK (3 << 28)
3115# define R200_TXA_REPL_ARG_C_SHIFT 30
3116# define R200_TXA_REPL_ARG_C_MASK (3 << 30)
3117
3118#define R200_SE_VTX_FMT_0 0x2088
3119# define R200_VTX_XY 0 /* always have xy */
3120# define R200_VTX_Z0 (1<<0)
3121# define R200_VTX_W0 (1<<1)
3122# define R200_VTX_WEIGHT_COUNT_SHIFT (2)
3123# define R200_VTX_PV_MATRIX_SEL (1<<5)
3124# define R200_VTX_N0 (1<<6)
3125# define R200_VTX_POINT_SIZE (1<<7)
3126# define R200_VTX_DISCRETE_FOG (1<<8)
3127# define R200_VTX_SHININESS_0 (1<<9)
3128# define R200_VTX_SHININESS_1 (1<<10)
3129# define R200_VTX_COLOR_NOT_PRESENT 0
3130# define R200_VTX_PK_RGBA 1
3131# define R200_VTX_FP_RGB 2
3132# define R200_VTX_FP_RGBA 3
3133# define R200_VTX_COLOR_MASK 3
3134# define R200_VTX_COLOR_0_SHIFT 11
3135# define R200_VTX_COLOR_1_SHIFT 13
3136# define R200_VTX_COLOR_2_SHIFT 15
3137# define R200_VTX_COLOR_3_SHIFT 17
3138# define R200_VTX_COLOR_4_SHIFT 19
3139# define R200_VTX_COLOR_5_SHIFT 21
3140# define R200_VTX_COLOR_6_SHIFT 23
3141# define R200_VTX_COLOR_7_SHIFT 25
3142# define R200_VTX_XY1 (1<<28)
3143# define R200_VTX_Z1 (1<<29)
3144# define R200_VTX_W1 (1<<30)
3145# define R200_VTX_N1 (1<<31)
3146#define R200_SE_VTX_FMT_1 0x208c
3147# define R200_VTX_TEX0_COMP_CNT_SHIFT 0
3148# define R200_VTX_TEX1_COMP_CNT_SHIFT 3
3149# define R200_VTX_TEX2_COMP_CNT_SHIFT 6
3150# define R200_VTX_TEX3_COMP_CNT_SHIFT 9
3151# define R200_VTX_TEX4_COMP_CNT_SHIFT 12
3152# define R200_VTX_TEX5_COMP_CNT_SHIFT 15
3153
3154#define R200_SE_TCL_OUTPUT_VTX_FMT_0 0x2090
3155#define R200_SE_TCL_OUTPUT_VTX_FMT_1 0x2094
3156#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL 0x2250
3157# define R200_OUTPUT_XYZW (1<<0)
3158# define R200_OUTPUT_COLOR_0 (1<<8)
3159# define R200_OUTPUT_COLOR_1 (1<<9)
3160# define R200_OUTPUT_TEX_0 (1<<16)
3161# define R200_OUTPUT_TEX_1 (1<<17)
3162# define R200_OUTPUT_TEX_2 (1<<18)
3163# define R200_OUTPUT_TEX_3 (1<<19)
3164# define R200_OUTPUT_TEX_4 (1<<20)
3165# define R200_OUTPUT_TEX_5 (1<<21)
3166# define R200_OUTPUT_TEX_MASK (0x3f<<16)
3167# define R200_OUTPUT_DISCRETE_FOG (1<<24)
3168# define R200_OUTPUT_PT_SIZE (1<<25)
3169# define R200_FORCE_INORDER_PROC (1<<31)
3170#define R200_PP_CNTL_X 0x2cc4
3171#define R200_PP_TXMULTI_CTL_0 0x2c1c
3172#define R200_SE_VTX_STATE_CNTL 0x2180
3173# define R200_UPDATE_USER_COLOR_0_ENA_MASK (1<<16)
3174
3175 /* Registers for CP and Microcode Engine */
3176#define RADEON_CP_ME_RAM_ADDR 0x07d4
3177#define RADEON_CP_ME_RAM_RADDR 0x07d8
3178#define RADEON_CP_ME_RAM_DATAH 0x07dc
3179#define RADEON_CP_ME_RAM_DATAL 0x07e0
3180
3181#define RADEON_CP_RB_BASE 0x0700
3182#define RADEON_CP_RB_CNTL 0x0704
3183# define RADEON_RB_BUFSZ_SHIFT 0
3184# define RADEON_RB_BUFSZ_MASK (0x3f << 0)
3185# define RADEON_RB_BLKSZ_SHIFT 8
3186# define RADEON_RB_BLKSZ_MASK (0x3f << 8)
3187# define RADEON_MAX_FETCH_SHIFT 18
3188# define RADEON_MAX_FETCH_MASK (0x3 << 18)
3189# define RADEON_RB_NO_UPDATE (1 << 27)
3190# define RADEON_RB_RPTR_WR_ENA (1 << 31)
3191#define RADEON_CP_RB_RPTR_ADDR 0x070c
3192#define RADEON_CP_RB_RPTR 0x0710
3193#define RADEON_CP_RB_WPTR 0x0714
3194#define RADEON_CP_RB_RPTR_WR 0x071c
3195
3196#define RADEON_CP_IB_BASE 0x0738
3197#define RADEON_CP_IB_BUFSZ 0x073c
3198
3199#define RADEON_CP_CSQ_CNTL 0x0740
3200# define RADEON_CSQ_CNT_PRIMARY_MASK (0xff << 0)
3201# define RADEON_CSQ_PRIDIS_INDDIS (0 << 28)
3202# define RADEON_CSQ_PRIPIO_INDDIS (1 << 28)
3203# define RADEON_CSQ_PRIBM_INDDIS (2 << 28)
3204# define RADEON_CSQ_PRIPIO_INDBM (3 << 28)
3205# define RADEON_CSQ_PRIBM_INDBM (4 << 28)
3206# define RADEON_CSQ_PRIPIO_INDPIO (15 << 28)
3207
3208#define R300_CP_RESYNC_ADDR 0x778
3209#define R300_CP_RESYNC_DATA 0x77c
3210
3211#define RADEON_CP_CSQ_STAT 0x07f8
3212# define RADEON_CSQ_RPTR_PRIMARY_MASK (0xff << 0)
3213# define RADEON_CSQ_WPTR_PRIMARY_MASK (0xff << 8)
3214# define RADEON_CSQ_RPTR_INDIRECT_MASK (0xff << 16)
3215# define RADEON_CSQ_WPTR_INDIRECT_MASK (0xff << 24)
3216#define RADEON_CP_CSQ2_STAT 0x07fc
3217#define RADEON_CP_CSQ_ADDR 0x07f0
3218#define RADEON_CP_CSQ_DATA 0x07f4
3219#define RADEON_CP_CSQ_APER_PRIMARY 0x1000
3220#define RADEON_CP_CSQ_APER_INDIRECT 0x1300
3221
3222#define RADEON_CP_RB_WPTR_DELAY 0x0718
3223# define RADEON_PRE_WRITE_TIMER_SHIFT 0
3224# define RADEON_PRE_WRITE_LIMIT_SHIFT 23
3225#define RADEON_CP_CSQ_MODE 0x0744
3226# define RADEON_INDIRECT2_START_SHIFT 0
3227# define RADEON_INDIRECT2_START_MASK (0x7f << 0)
3228# define RADEON_INDIRECT1_START_SHIFT 8
3229# define RADEON_INDIRECT1_START_MASK (0x7f << 8)
3230
3231#define RADEON_AIC_CNTL 0x01d0
3232# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
3233# define RADEON_DIS_OUT_OF_PCI_GART_ACCESS (1 << 1)
3234#define RADEON_AIC_LO_ADDR 0x01dc
3235#define RADEON_AIC_PT_BASE 0x01d8
3236#define RADEON_AIC_HI_ADDR 0x01e0
3237
3238
3239
3240 /* Constants */
3241/* #define RADEON_LAST_FRAME_REG RADEON_GUI_SCRATCH_REG0 */
3242/* efine RADEON_LAST_CLEAR_REG RADEON_GUI_SCRATCH_REG2 */
3243
3244
3245
3246 /* CP packet types */
3247#define RADEON_CP_PACKET0 0x00000000
3248#define RADEON_CP_PACKET1 0x40000000
3249#define RADEON_CP_PACKET2 0x80000000
3250#define RADEON_CP_PACKET3 0xC0000000
3251# define RADEON_CP_PACKET_MASK 0xC0000000
3252# define RADEON_CP_PACKET_COUNT_MASK 0x3fff0000
3253# define RADEON_CP_PACKET_MAX_DWORDS (1 << 12)
3254# define RADEON_CP_PACKET0_REG_MASK 0x000007ff
3255# define R300_CP_PACKET0_REG_MASK 0x00001fff
3256# define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
3257# define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
3258
3259#define RADEON_CP_PACKET0_ONE_REG_WR 0x00008000
3260
3261#define RADEON_CP_PACKET3_NOP 0xC0001000
3262#define RADEON_CP_PACKET3_NEXT_CHAR 0xC0001900
3263#define RADEON_CP_PACKET3_PLY_NEXTSCAN 0xC0001D00
3264#define RADEON_CP_PACKET3_SET_SCISSORS 0xC0001E00
3265#define RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM 0xC0002300
3266#define RADEON_CP_PACKET3_LOAD_MICROCODE 0xC0002400
3267#define RADEON_CP_PACKET3_WAIT_FOR_IDLE 0xC0002600
3268#define RADEON_CP_PACKET3_3D_DRAW_VBUF 0xC0002800
3269#define RADEON_CP_PACKET3_3D_DRAW_IMMD 0xC0002900
3270#define RADEON_CP_PACKET3_3D_DRAW_INDX 0xC0002A00
3271#define RADEON_CP_PACKET3_LOAD_PALETTE 0xC0002C00
3272#define R200_CP_PACKET3_3D_DRAW_IMMD_2 0xc0003500
3273#define RADEON_CP_PACKET3_3D_LOAD_VBPNTR 0xC0002F00
3274#define RADEON_CP_PACKET3_CNTL_PAINT 0xC0009100
3275#define RADEON_CP_PACKET3_CNTL_BITBLT 0xC0009200
3276#define RADEON_CP_PACKET3_CNTL_SMALLTEXT 0xC0009300
3277#define RADEON_CP_PACKET3_CNTL_HOSTDATA_BLT 0xC0009400
3278#define RADEON_CP_PACKET3_CNTL_POLYLINE 0xC0009500
3279#define RADEON_CP_PACKET3_CNTL_POLYSCANLINES 0xC0009800
3280#define RADEON_CP_PACKET3_CNTL_PAINT_MULTI 0xC0009A00
3281#define RADEON_CP_PACKET3_CNTL_BITBLT_MULTI 0xC0009B00
3282#define RADEON_CP_PACKET3_CNTL_TRANS_BITBLT 0xC0009C00
3283
3284
3285#define RADEON_CP_VC_FRMT_XY 0x00000000
3286#define RADEON_CP_VC_FRMT_W0 0x00000001
3287#define RADEON_CP_VC_FRMT_FPCOLOR 0x00000002
3288#define RADEON_CP_VC_FRMT_FPALPHA 0x00000004
3289#define RADEON_CP_VC_FRMT_PKCOLOR 0x00000008
3290#define RADEON_CP_VC_FRMT_FPSPEC 0x00000010
3291#define RADEON_CP_VC_FRMT_FPFOG 0x00000020
3292#define RADEON_CP_VC_FRMT_PKSPEC 0x00000040
3293#define RADEON_CP_VC_FRMT_ST0 0x00000080
3294#define RADEON_CP_VC_FRMT_ST1 0x00000100
3295#define RADEON_CP_VC_FRMT_Q1 0x00000200
3296#define RADEON_CP_VC_FRMT_ST2 0x00000400
3297#define RADEON_CP_VC_FRMT_Q2 0x00000800
3298#define RADEON_CP_VC_FRMT_ST3 0x00001000
3299#define RADEON_CP_VC_FRMT_Q3 0x00002000
3300#define RADEON_CP_VC_FRMT_Q0 0x00004000
3301#define RADEON_CP_VC_FRMT_BLND_WEIGHT_CNT_MASK 0x00038000
3302#define RADEON_CP_VC_FRMT_N0 0x00040000
3303#define RADEON_CP_VC_FRMT_XY1 0x08000000
3304#define RADEON_CP_VC_FRMT_Z1 0x10000000
3305#define RADEON_CP_VC_FRMT_W1 0x20000000
3306#define RADEON_CP_VC_FRMT_N1 0x40000000
3307#define RADEON_CP_VC_FRMT_Z 0x80000000
3308
3309#define RADEON_CP_VC_CNTL_PRIM_TYPE_NONE 0x00000000
3310#define RADEON_CP_VC_CNTL_PRIM_TYPE_POINT 0x00000001
3311#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE 0x00000002
3312#define RADEON_CP_VC_CNTL_PRIM_TYPE_LINE_STRIP 0x00000003
3313#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004
3314#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005
3315#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006
3316#define RADEON_CP_VC_CNTL_PRIM_TYPE_TRI_TYPE_2 0x00000007
3317#define RADEON_CP_VC_CNTL_PRIM_TYPE_RECT_LIST 0x00000008
3318#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_POINT_LIST 0x00000009
3319#define RADEON_CP_VC_CNTL_PRIM_TYPE_3VRT_LINE_LIST 0x0000000a
3320#define RADEON_CP_VC_CNTL_PRIM_WALK_IND 0x00000010
3321#define RADEON_CP_VC_CNTL_PRIM_WALK_LIST 0x00000020
3322#define RADEON_CP_VC_CNTL_PRIM_WALK_RING 0x00000030
3323#define RADEON_CP_VC_CNTL_COLOR_ORDER_BGRA 0x00000000
3324#define RADEON_CP_VC_CNTL_COLOR_ORDER_RGBA 0x00000040
3325#define RADEON_CP_VC_CNTL_MAOS_ENABLE 0x00000080
3326#define RADEON_CP_VC_CNTL_VTX_FMT_NON_RADEON_MODE 0x00000000
3327#define RADEON_CP_VC_CNTL_VTX_FMT_RADEON_MODE 0x00000100
3328#define RADEON_CP_VC_CNTL_TCL_DISABLE 0x00000000
3329#define RADEON_CP_VC_CNTL_TCL_ENABLE 0x00000200
3330#define RADEON_CP_VC_CNTL_NUM_SHIFT 16
3331
3332#define RADEON_VS_MATRIX_0_ADDR 0
3333#define RADEON_VS_MATRIX_1_ADDR 4
3334#define RADEON_VS_MATRIX_2_ADDR 8
3335#define RADEON_VS_MATRIX_3_ADDR 12
3336#define RADEON_VS_MATRIX_4_ADDR 16
3337#define RADEON_VS_MATRIX_5_ADDR 20
3338#define RADEON_VS_MATRIX_6_ADDR 24
3339#define RADEON_VS_MATRIX_7_ADDR 28
3340#define RADEON_VS_MATRIX_8_ADDR 32
3341#define RADEON_VS_MATRIX_9_ADDR 36
3342#define RADEON_VS_MATRIX_10_ADDR 40
3343#define RADEON_VS_MATRIX_11_ADDR 44
3344#define RADEON_VS_MATRIX_12_ADDR 48
3345#define RADEON_VS_MATRIX_13_ADDR 52
3346#define RADEON_VS_MATRIX_14_ADDR 56
3347#define RADEON_VS_MATRIX_15_ADDR 60
3348#define RADEON_VS_LIGHT_AMBIENT_ADDR 64
3349#define RADEON_VS_LIGHT_DIFFUSE_ADDR 72
3350#define RADEON_VS_LIGHT_SPECULAR_ADDR 80
3351#define RADEON_VS_LIGHT_DIRPOS_ADDR 88
3352#define RADEON_VS_LIGHT_HWVSPOT_ADDR 96
3353#define RADEON_VS_LIGHT_ATTENUATION_ADDR 104
3354#define RADEON_VS_MATRIX_EYE2CLIP_ADDR 112
3355#define RADEON_VS_UCP_ADDR 116
3356#define RADEON_VS_GLOBAL_AMBIENT_ADDR 122
3357#define RADEON_VS_FOG_PARAM_ADDR 123
3358#define RADEON_VS_EYE_VECTOR_ADDR 124
3359
3360#define RADEON_SS_LIGHT_DCD_ADDR 0
3361#define RADEON_SS_LIGHT_SPOT_EXPONENT_ADDR 8
3362#define RADEON_SS_LIGHT_SPOT_CUTOFF_ADDR 16
3363#define RADEON_SS_LIGHT_SPECULAR_THRESH_ADDR 24
3364#define RADEON_SS_LIGHT_RANGE_CUTOFF_ADDR 32
3365#define RADEON_SS_VERT_GUARD_CLIP_ADJ_ADDR 48
3366#define RADEON_SS_VERT_GUARD_DISCARD_ADJ_ADDR 49
3367#define RADEON_SS_HORZ_GUARD_CLIP_ADJ_ADDR 50
3368#define RADEON_SS_HORZ_GUARD_DISCARD_ADJ_ADDR 51
3369#define RADEON_SS_SHININESS 60
3370
3371#define RADEON_TV_MASTER_CNTL 0x0800
3372# define RADEON_TV_ASYNC_RST (1 << 0)
3373# define RADEON_CRT_ASYNC_RST (1 << 1)
3374# define RADEON_RESTART_PHASE_FIX (1 << 3)
3375# define RADEON_TV_FIFO_ASYNC_RST (1 << 4)
3376# define RADEON_VIN_ASYNC_RST (1 << 5)
3377# define RADEON_AUD_ASYNC_RST (1 << 6)
3378# define RADEON_DVS_ASYNC_RST (1 << 7)
3379# define RADEON_CRT_FIFO_CE_EN (1 << 9)
3380# define RADEON_TV_FIFO_CE_EN (1 << 10)
3381# define RADEON_RE_SYNC_NOW_SEL_MASK (3 << 14)
3382# define RADEON_TVCLK_ALWAYS_ONb (1 << 30)
3383# define RADEON_TV_ON (1 << 31)
3384#define RADEON_TV_PRE_DAC_MUX_CNTL 0x0888
3385# define RADEON_Y_RED_EN (1 << 0)
3386# define RADEON_C_GRN_EN (1 << 1)
3387# define RADEON_CMP_BLU_EN (1 << 2)
3388# define RADEON_DAC_DITHER_EN (1 << 3)
3389# define RADEON_RED_MX_FORCE_DAC_DATA (6 << 4)
3390# define RADEON_GRN_MX_FORCE_DAC_DATA (6 << 8)
3391# define RADEON_BLU_MX_FORCE_DAC_DATA (6 << 12)
3392# define RADEON_TV_FORCE_DAC_DATA_SHIFT 16
3393#define RADEON_TV_RGB_CNTL 0x0804
3394# define RADEON_SWITCH_TO_BLUE (1 << 4)
3395# define RADEON_RGB_DITHER_EN (1 << 5)
3396# define RADEON_RGB_SRC_SEL_MASK (3 << 8)
3397# define RADEON_RGB_SRC_SEL_CRTC1 (0 << 8)
3398# define RADEON_RGB_SRC_SEL_RMX (1 << 8)
3399# define RADEON_RGB_SRC_SEL_CRTC2 (2 << 8)
3400# define RADEON_RGB_CONVERT_BY_PASS (1 << 10)
3401# define RADEON_UVRAM_READ_MARGIN_SHIFT 16
3402# define RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT 20
3403# define RADEON_TVOUT_SCALE_EN (1 << 26)
3404#define RADEON_TV_SYNC_CNTL 0x0808
3405# define RADEON_SYNC_OE (1 << 0)
3406# define RADEON_SYNC_OUT (1 << 1)
3407# define RADEON_SYNC_IN (1 << 2)
3408# define RADEON_SYNC_PUB (1 << 3)
3409# define RADEON_SYNC_PD (1 << 4)
3410# define RADEON_TV_SYNC_IO_DRIVE (1 << 5)
3411#define RADEON_TV_HTOTAL 0x080c
3412#define RADEON_TV_HDISP 0x0810
3413#define RADEON_TV_HSTART 0x0818
3414#define RADEON_TV_HCOUNT 0x081C
3415#define RADEON_TV_VTOTAL 0x0820
3416#define RADEON_TV_VDISP 0x0824
3417#define RADEON_TV_VCOUNT 0x0828
3418#define RADEON_TV_FTOTAL 0x082c
3419#define RADEON_TV_FCOUNT 0x0830
3420#define RADEON_TV_FRESTART 0x0834
3421#define RADEON_TV_HRESTART 0x0838
3422#define RADEON_TV_VRESTART 0x083c
3423#define RADEON_TV_HOST_READ_DATA 0x0840
3424#define RADEON_TV_HOST_WRITE_DATA 0x0844
3425#define RADEON_TV_HOST_RD_WT_CNTL 0x0848
3426# define RADEON_HOST_FIFO_RD (1 << 12)
3427# define RADEON_HOST_FIFO_RD_ACK (1 << 13)
3428# define RADEON_HOST_FIFO_WT (1 << 14)
3429# define RADEON_HOST_FIFO_WT_ACK (1 << 15)
3430#define RADEON_TV_VSCALER_CNTL1 0x084c
3431# define RADEON_UV_INC_MASK 0xffff
3432# define RADEON_UV_INC_SHIFT 0
3433# define RADEON_Y_W_EN (1 << 24)
3434# define RADEON_RESTART_FIELD (1 << 29) /* restart on field 0 */
3435# define RADEON_Y_DEL_W_SIG_SHIFT 26
3436#define RADEON_TV_TIMING_CNTL 0x0850
3437# define RADEON_H_INC_MASK 0xfff
3438# define RADEON_H_INC_SHIFT 0
3439# define RADEON_REQ_Y_FIRST (1 << 19)
3440# define RADEON_FORCE_BURST_ALWAYS (1 << 21)
3441# define RADEON_UV_POST_SCALE_BYPASS (1 << 23)
3442# define RADEON_UV_OUTPUT_POST_SCALE_SHIFT 24
3443#define RADEON_TV_VSCALER_CNTL2 0x0854
3444# define RADEON_DITHER_MODE (1 << 0)
3445# define RADEON_Y_OUTPUT_DITHER_EN (1 << 1)
3446# define RADEON_UV_OUTPUT_DITHER_EN (1 << 2)
3447# define RADEON_UV_TO_BUF_DITHER_EN (1 << 3)
3448#define RADEON_TV_Y_FALL_CNTL 0x0858
3449# define RADEON_Y_FALL_PING_PONG (1 << 16)
3450# define RADEON_Y_COEF_EN (1 << 17)
3451#define RADEON_TV_Y_RISE_CNTL 0x085c
3452# define RADEON_Y_RISE_PING_PONG (1 << 16)
3453#define RADEON_TV_Y_SAW_TOOTH_CNTL 0x0860
3454#define RADEON_TV_UPSAMP_AND_GAIN_CNTL 0x0864
3455# define RADEON_YUPSAMP_EN (1 << 0)
3456# define RADEON_UVUPSAMP_EN (1 << 2)
3457#define RADEON_TV_GAIN_LIMIT_SETTINGS 0x0868
3458# define RADEON_Y_GAIN_LIMIT_SHIFT 0
3459# define RADEON_UV_GAIN_LIMIT_SHIFT 16
3460#define RADEON_TV_LINEAR_GAIN_SETTINGS 0x086c
3461# define RADEON_Y_GAIN_SHIFT 0
3462# define RADEON_UV_GAIN_SHIFT 16
3463#define RADEON_TV_MODULATOR_CNTL1 0x0870
3464# define RADEON_YFLT_EN (1 << 2)
3465# define RADEON_UVFLT_EN (1 << 3)
3466# define RADEON_ALT_PHASE_EN (1 << 6)
3467# define RADEON_SYNC_TIP_LEVEL (1 << 7)
3468# define RADEON_BLANK_LEVEL_SHIFT 8
3469# define RADEON_SET_UP_LEVEL_SHIFT 16
3470# define RADEON_SLEW_RATE_LIMIT (1 << 23)
3471# define RADEON_CY_FILT_BLEND_SHIFT 28
3472#define RADEON_TV_MODULATOR_CNTL2 0x0874
3473# define RADEON_TV_U_BURST_LEVEL_MASK 0x1ff
3474# define RADEON_TV_V_BURST_LEVEL_MASK 0x1ff
3475# define RADEON_TV_V_BURST_LEVEL_SHIFT 16
3476#define RADEON_TV_CRC_CNTL 0x0890
3477#define RADEON_TV_UV_ADR 0x08ac
3478# define RADEON_MAX_UV_ADR_MASK 0x000000ff
3479# define RADEON_MAX_UV_ADR_SHIFT 0
3480# define RADEON_TABLE1_BOT_ADR_MASK 0x0000ff00
3481# define RADEON_TABLE1_BOT_ADR_SHIFT 8
3482# define RADEON_TABLE3_TOP_ADR_MASK 0x00ff0000
3483# define RADEON_TABLE3_TOP_ADR_SHIFT 16
3484# define RADEON_HCODE_TABLE_SEL_MASK 0x06000000
3485# define RADEON_HCODE_TABLE_SEL_SHIFT 25
3486# define RADEON_VCODE_TABLE_SEL_MASK 0x18000000
3487# define RADEON_VCODE_TABLE_SEL_SHIFT 27
3488# define RADEON_TV_MAX_FIFO_ADDR 0x1a7
3489# define RADEON_TV_MAX_FIFO_ADDR_INTERNAL 0x1ff
3490#define RADEON_TV_PLL_FINE_CNTL 0x0020 /* PLL */
3491#define RADEON_TV_PLL_CNTL 0x0021 /* PLL */
3492# define RADEON_TV_M0LO_MASK 0xff
3493# define RADEON_TV_M0HI_MASK 0x7
3494# define RADEON_TV_M0HI_SHIFT 18
3495# define RADEON_TV_N0LO_MASK 0x1ff
3496# define RADEON_TV_N0LO_SHIFT 8
3497# define RADEON_TV_N0HI_MASK 0x3
3498# define RADEON_TV_N0HI_SHIFT 21
3499# define RADEON_TV_P_MASK 0xf
3500# define RADEON_TV_P_SHIFT 24
3501# define RADEON_TV_SLIP_EN (1 << 23)
3502# define RADEON_TV_DTO_EN (1 << 28)
3503#define RADEON_TV_PLL_CNTL1 0x0022 /* PLL */
3504# define RADEON_TVPLL_RESET (1 << 1)
3505# define RADEON_TVPLL_SLEEP (1 << 3)
3506# define RADEON_TVPLL_REFCLK_SEL (1 << 4)
3507# define RADEON_TVPCP_SHIFT 8
3508# define RADEON_TVPCP_MASK (7 << 8)
3509# define RADEON_TVPVG_SHIFT 11
3510# define RADEON_TVPVG_MASK (7 << 11)
3511# define RADEON_TVPDC_SHIFT 14
3512# define RADEON_TVPDC_MASK (3 << 14)
3513# define RADEON_TVPLL_TEST_DIS (1 << 31)
3514# define RADEON_TVCLK_SRC_SEL_TVPLL (1 << 30)
3515
3516#define RS400_DISP2_REQ_CNTL1 0xe30
3517# define RS400_DISP2_START_REQ_LEVEL_SHIFT 0
3518# define RS400_DISP2_START_REQ_LEVEL_MASK 0x3ff
3519# define RS400_DISP2_STOP_REQ_LEVEL_SHIFT 12
3520# define RS400_DISP2_STOP_REQ_LEVEL_MASK 0x3ff
3521# define RS400_DISP2_ALLOW_FID_LEVEL_SHIFT 22
3522# define RS400_DISP2_ALLOW_FID_LEVEL_MASK 0x3ff
3523#define RS400_DISP2_REQ_CNTL2 0xe34
3524# define RS400_DISP2_CRITICAL_POINT_START_SHIFT 12
3525# define RS400_DISP2_CRITICAL_POINT_START_MASK 0x3ff
3526# define RS400_DISP2_CRITICAL_POINT_STOP_SHIFT 22
3527# define RS400_DISP2_CRITICAL_POINT_STOP_MASK 0x3ff
3528#define RS400_DMIF_MEM_CNTL1 0xe38
3529# define RS400_DISP2_START_ADR_SHIFT 0
3530# define RS400_DISP2_START_ADR_MASK 0x3ff
3531# define RS400_DISP1_CRITICAL_POINT_START_SHIFT 12
3532# define RS400_DISP1_CRITICAL_POINT_START_MASK 0x3ff
3533# define RS400_DISP1_CRITICAL_POINT_STOP_SHIFT 22
3534# define RS400_DISP1_CRITICAL_POINT_STOP_MASK 0x3ff
3535#define RS400_DISP1_REQ_CNTL1 0xe3c
3536# define RS400_DISP1_START_REQ_LEVEL_SHIFT 0
3537# define RS400_DISP1_START_REQ_LEVEL_MASK 0x3ff
3538# define RS400_DISP1_STOP_REQ_LEVEL_SHIFT 12
3539# define RS400_DISP1_STOP_REQ_LEVEL_MASK 0x3ff
3540# define RS400_DISP1_ALLOW_FID_LEVEL_SHIFT 22
3541# define RS400_DISP1_ALLOW_FID_LEVEL_MASK 0x3ff
3542
3543#define RADEON_PCIE_INDEX 0x0030
3544#define RADEON_PCIE_DATA 0x0034
3545#define RADEON_PCIE_TX_GART_CNTL 0x10
3546# define RADEON_PCIE_TX_GART_EN (1 << 0)
3547# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
3548# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1 << 1)
3549# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3 << 1)
3550# define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0 << 3)
3551# define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1 << 3)
3552# define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1 << 5)
3553# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1 << 8)
3554#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
3555#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
3556#define RADEON_PCIE_TX_GART_BASE 0x13
3557#define RADEON_PCIE_TX_GART_START_LO 0x14
3558#define RADEON_PCIE_TX_GART_START_HI 0x15
3559#define RADEON_PCIE_TX_GART_END_LO 0x16
3560#define RADEON_PCIE_TX_GART_END_HI 0x17
3561#define RADEON_PCIE_TX_GART_ERROR 0x18
3562
3563#define RADEON_SCRATCH_REG0 0x15e0
3564#define RADEON_SCRATCH_REG1 0x15e4
3565#define RADEON_SCRATCH_REG2 0x15e8
3566#define RADEON_SCRATCH_REG3 0x15ec
3567#define RADEON_SCRATCH_REG4 0x15f0
3568#define RADEON_SCRATCH_REG5 0x15f4
3569
3570#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
new file mode 100644
index 000000000000..a853261d1881
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -0,0 +1,485 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "radeon_drm.h"
31#include "radeon_reg.h"
32#include "radeon.h"
33#include "atom.h"
34
35int radeon_debugfs_ib_init(struct radeon_device *rdev);
36
37/*
38 * IB.
39 */
40int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
41{
42 struct radeon_fence *fence;
43 struct radeon_ib *nib;
44 unsigned long i;
45 int r = 0;
46
47 *ib = NULL;
48 r = radeon_fence_create(rdev, &fence);
49 if (r) {
50 DRM_ERROR("failed to create fence for new IB\n");
51 return r;
52 }
53 mutex_lock(&rdev->ib_pool.mutex);
54 i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
55 if (i < RADEON_IB_POOL_SIZE) {
56 set_bit(i, rdev->ib_pool.alloc_bm);
57 rdev->ib_pool.ibs[i].length_dw = 0;
58 *ib = &rdev->ib_pool.ibs[i];
59 goto out;
60 }
61 if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
62 /* we go do nothings here */
63 DRM_ERROR("all IB allocated none scheduled.\n");
64 r = -EINVAL;
65 goto out;
66 }
67 /* get the first ib on the scheduled list */
68 nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
69 struct radeon_ib, list);
70 if (nib->fence == NULL) {
71 /* we go do nothings here */
72 DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
73 r = -EINVAL;
74 goto out;
75 }
76 r = radeon_fence_wait(nib->fence, false);
77 if (r) {
78 DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
79 (unsigned long)nib->gpu_addr, nib->length_dw);
80 DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
81 goto out;
82 }
83 radeon_fence_unref(&nib->fence);
84 nib->length_dw = 0;
85 list_del(&nib->list);
86 INIT_LIST_HEAD(&nib->list);
87 *ib = nib;
88out:
89 mutex_unlock(&rdev->ib_pool.mutex);
90 if (r) {
91 radeon_fence_unref(&fence);
92 } else {
93 (*ib)->fence = fence;
94 }
95 return r;
96}
97
98void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
99{
100 struct radeon_ib *tmp = *ib;
101
102 *ib = NULL;
103 if (tmp == NULL) {
104 return;
105 }
106 mutex_lock(&rdev->ib_pool.mutex);
107 if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
108 /* IB is scheduled & not signaled don't do anythings */
109 mutex_unlock(&rdev->ib_pool.mutex);
110 return;
111 }
112 list_del(&tmp->list);
113 INIT_LIST_HEAD(&tmp->list);
114 if (tmp->fence) {
115 radeon_fence_unref(&tmp->fence);
116 }
117 tmp->length_dw = 0;
118 clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
119 mutex_unlock(&rdev->ib_pool.mutex);
120}
121
122static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib)
123{
124 while ((ib->length_dw & rdev->cp.align_mask)) {
125 ib->ptr[ib->length_dw++] = PACKET2(0);
126 }
127}
128
129static void radeon_ib_cpu_flush(struct radeon_device *rdev,
130 struct radeon_ib *ib)
131{
132 unsigned long tmp;
133 unsigned i;
134
135 /* To force CPU cache flush ugly but seems reliable */
136 for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) {
137 tmp = readl(&ib->ptr[i]);
138 }
139}
140
141int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
142{
143 int r = 0;
144
145 mutex_lock(&rdev->ib_pool.mutex);
146 radeon_ib_align(rdev, ib);
147 radeon_ib_cpu_flush(rdev, ib);
148 if (!ib->length_dw || !rdev->cp.ready) {
149 /* TODO: Nothings in the ib we should report. */
150 mutex_unlock(&rdev->ib_pool.mutex);
151 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
152 return -EINVAL;
153 }
154 /* 64 dwords should be enought for fence too */
155 r = radeon_ring_lock(rdev, 64);
156 if (r) {
157 DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
158 mutex_unlock(&rdev->ib_pool.mutex);
159 return r;
160 }
161 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
162 radeon_ring_write(rdev, ib->gpu_addr);
163 radeon_ring_write(rdev, ib->length_dw);
164 radeon_fence_emit(rdev, ib->fence);
165 radeon_ring_unlock_commit(rdev);
166 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
167 mutex_unlock(&rdev->ib_pool.mutex);
168 return 0;
169}
170
171int radeon_ib_pool_init(struct radeon_device *rdev)
172{
173 void *ptr;
174 uint64_t gpu_addr;
175 int i;
176 int r = 0;
177
178 /* Allocate 1M object buffer */
179 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
180 r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
181 true, RADEON_GEM_DOMAIN_GTT,
182 false, &rdev->ib_pool.robj);
183 if (r) {
184 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
185 return r;
186 }
187 r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
188 if (r) {
189 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
190 return r;
191 }
192 r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
193 if (r) {
194 DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
195 return r;
196 }
197 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
198 unsigned offset;
199
200 offset = i * 64 * 1024;
201 rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
202 rdev->ib_pool.ibs[i].ptr = ptr + offset;
203 rdev->ib_pool.ibs[i].idx = i;
204 rdev->ib_pool.ibs[i].length_dw = 0;
205 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
206 }
207 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
208 rdev->ib_pool.ready = true;
209 DRM_INFO("radeon: ib pool ready.\n");
210 if (radeon_debugfs_ib_init(rdev)) {
211 DRM_ERROR("Failed to register debugfs file for IB !\n");
212 }
213 return r;
214}
215
216void radeon_ib_pool_fini(struct radeon_device *rdev)
217{
218 if (!rdev->ib_pool.ready) {
219 return;
220 }
221 mutex_lock(&rdev->ib_pool.mutex);
222 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
223 if (rdev->ib_pool.robj) {
224 radeon_object_kunmap(rdev->ib_pool.robj);
225 radeon_object_unref(&rdev->ib_pool.robj);
226 rdev->ib_pool.robj = NULL;
227 }
228 mutex_unlock(&rdev->ib_pool.mutex);
229}
230
231int radeon_ib_test(struct radeon_device *rdev)
232{
233 struct radeon_ib *ib;
234 uint32_t scratch;
235 uint32_t tmp = 0;
236 unsigned i;
237 int r;
238
239 r = radeon_scratch_get(rdev, &scratch);
240 if (r) {
241 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
242 return r;
243 }
244 WREG32(scratch, 0xCAFEDEAD);
245 r = radeon_ib_get(rdev, &ib);
246 if (r) {
247 return r;
248 }
249 ib->ptr[0] = PACKET0(scratch, 0);
250 ib->ptr[1] = 0xDEADBEEF;
251 ib->ptr[2] = PACKET2(0);
252 ib->ptr[3] = PACKET2(0);
253 ib->ptr[4] = PACKET2(0);
254 ib->ptr[5] = PACKET2(0);
255 ib->ptr[6] = PACKET2(0);
256 ib->ptr[7] = PACKET2(0);
257 ib->length_dw = 8;
258 r = radeon_ib_schedule(rdev, ib);
259 if (r) {
260 radeon_scratch_free(rdev, scratch);
261 radeon_ib_free(rdev, &ib);
262 return r;
263 }
264 r = radeon_fence_wait(ib->fence, false);
265 if (r) {
266 return r;
267 }
268 for (i = 0; i < rdev->usec_timeout; i++) {
269 tmp = RREG32(scratch);
270 if (tmp == 0xDEADBEEF) {
271 break;
272 }
273 DRM_UDELAY(1);
274 }
275 if (i < rdev->usec_timeout) {
276 DRM_INFO("ib test succeeded in %u usecs\n", i);
277 } else {
278 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
279 scratch, tmp);
280 r = -EINVAL;
281 }
282 radeon_scratch_free(rdev, scratch);
283 radeon_ib_free(rdev, &ib);
284 return r;
285}
286
287
288/*
289 * Ring.
290 */
291void radeon_ring_free_size(struct radeon_device *rdev)
292{
293 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
294 /* This works because ring_size is a power of 2 */
295 rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
296 rdev->cp.ring_free_dw -= rdev->cp.wptr;
297 rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
298 if (!rdev->cp.ring_free_dw) {
299 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
300 }
301}
302
303int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
304{
305 int r;
306
307 /* Align requested size with padding so unlock_commit can
308 * pad safely */
309 ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
310 mutex_lock(&rdev->cp.mutex);
311 while (ndw > (rdev->cp.ring_free_dw - 1)) {
312 radeon_ring_free_size(rdev);
313 if (ndw < rdev->cp.ring_free_dw) {
314 break;
315 }
316 r = radeon_fence_wait_next(rdev);
317 if (r) {
318 mutex_unlock(&rdev->cp.mutex);
319 return r;
320 }
321 }
322 rdev->cp.count_dw = ndw;
323 rdev->cp.wptr_old = rdev->cp.wptr;
324 return 0;
325}
326
327void radeon_ring_unlock_commit(struct radeon_device *rdev)
328{
329 unsigned count_dw_pad;
330 unsigned i;
331
332 /* We pad to match fetch size */
333 count_dw_pad = (rdev->cp.align_mask + 1) -
334 (rdev->cp.wptr & rdev->cp.align_mask);
335 for (i = 0; i < count_dw_pad; i++) {
336 radeon_ring_write(rdev, PACKET2(0));
337 }
338 DRM_MEMORYBARRIER();
339 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
340 (void)RREG32(RADEON_CP_RB_WPTR);
341 mutex_unlock(&rdev->cp.mutex);
342}
343
344void radeon_ring_unlock_undo(struct radeon_device *rdev)
345{
346 rdev->cp.wptr = rdev->cp.wptr_old;
347 mutex_unlock(&rdev->cp.mutex);
348}
349
350int radeon_ring_test(struct radeon_device *rdev)
351{
352 uint32_t scratch;
353 uint32_t tmp = 0;
354 unsigned i;
355 int r;
356
357 r = radeon_scratch_get(rdev, &scratch);
358 if (r) {
359 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
360 return r;
361 }
362 WREG32(scratch, 0xCAFEDEAD);
363 r = radeon_ring_lock(rdev, 2);
364 if (r) {
365 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
366 radeon_scratch_free(rdev, scratch);
367 return r;
368 }
369 radeon_ring_write(rdev, PACKET0(scratch, 0));
370 radeon_ring_write(rdev, 0xDEADBEEF);
371 radeon_ring_unlock_commit(rdev);
372 for (i = 0; i < rdev->usec_timeout; i++) {
373 tmp = RREG32(scratch);
374 if (tmp == 0xDEADBEEF) {
375 break;
376 }
377 DRM_UDELAY(1);
378 }
379 if (i < rdev->usec_timeout) {
380 DRM_INFO("ring test succeeded in %d usecs\n", i);
381 } else {
382 DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
383 scratch, tmp);
384 r = -EINVAL;
385 }
386 radeon_scratch_free(rdev, scratch);
387 return r;
388}
389
390int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
391{
392 int r;
393
394 rdev->cp.ring_size = ring_size;
395 /* Allocate ring buffer */
396 if (rdev->cp.ring_obj == NULL) {
397 r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
398 true,
399 RADEON_GEM_DOMAIN_GTT,
400 false,
401 &rdev->cp.ring_obj);
402 if (r) {
403 DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
404 mutex_unlock(&rdev->cp.mutex);
405 return r;
406 }
407 r = radeon_object_pin(rdev->cp.ring_obj,
408 RADEON_GEM_DOMAIN_GTT,
409 &rdev->cp.gpu_addr);
410 if (r) {
411 DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
412 mutex_unlock(&rdev->cp.mutex);
413 return r;
414 }
415 r = radeon_object_kmap(rdev->cp.ring_obj,
416 (void **)&rdev->cp.ring);
417 if (r) {
418 DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
419 mutex_unlock(&rdev->cp.mutex);
420 return r;
421 }
422 }
423 rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
424 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
425 return 0;
426}
427
428void radeon_ring_fini(struct radeon_device *rdev)
429{
430 mutex_lock(&rdev->cp.mutex);
431 if (rdev->cp.ring_obj) {
432 radeon_object_kunmap(rdev->cp.ring_obj);
433 radeon_object_unpin(rdev->cp.ring_obj);
434 radeon_object_unref(&rdev->cp.ring_obj);
435 rdev->cp.ring = NULL;
436 rdev->cp.ring_obj = NULL;
437 }
438 mutex_unlock(&rdev->cp.mutex);
439}
440
441
442/*
443 * Debugfs info
444 */
445#if defined(CONFIG_DEBUG_FS)
446static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
447{
448 struct drm_info_node *node = (struct drm_info_node *) m->private;
449 struct radeon_ib *ib = node->info_ent->data;
450 unsigned i;
451
452 if (ib == NULL) {
453 return 0;
454 }
455 seq_printf(m, "IB %04lu\n", ib->idx);
456 seq_printf(m, "IB fence %p\n", ib->fence);
457 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
458 for (i = 0; i < ib->length_dw; i++) {
459 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
460 }
461 return 0;
462}
463
464static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
465static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
466#endif
467
468int radeon_debugfs_ib_init(struct radeon_device *rdev)
469{
470#if defined(CONFIG_DEBUG_FS)
471 unsigned i;
472
473 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
474 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
475 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
476 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
477 radeon_debugfs_ib_list[i].driver_features = 0;
478 radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
479 }
480 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
481 RADEON_IB_POOL_SIZE);
482#else
483 return 0;
484#endif
485}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
new file mode 100644
index 000000000000..4c087c1510d7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -0,0 +1,653 @@
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <ttm/ttm_bo_api.h>
33#include <ttm/ttm_bo_driver.h>
34#include <ttm/ttm_placement.h>
35#include <ttm/ttm_module.h>
36#include <drm/drmP.h>
37#include <drm/radeon_drm.h>
38#include "radeon_reg.h"
39#include "radeon.h"
40
41#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
42
43static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
44{
45 struct radeon_mman *mman;
46 struct radeon_device *rdev;
47
48 mman = container_of(bdev, struct radeon_mman, bdev);
49 rdev = container_of(mman, struct radeon_device, mman);
50 return rdev;
51}
52
53
54/*
55 * Global memory.
56 */
57static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
58{
59 return ttm_mem_global_init(ref->object);
60}
61
62static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
63{
64 ttm_mem_global_release(ref->object);
65}
66
67static int radeon_ttm_global_init(struct radeon_device *rdev)
68{
69 struct ttm_global_reference *global_ref;
70 int r;
71
72 rdev->mman.mem_global_referenced = false;
73 global_ref = &rdev->mman.mem_global_ref;
74 global_ref->global_type = TTM_GLOBAL_TTM_MEM;
75 global_ref->size = sizeof(struct ttm_mem_global);
76 global_ref->init = &radeon_ttm_mem_global_init;
77 global_ref->release = &radeon_ttm_mem_global_release;
78 r = ttm_global_item_ref(global_ref);
79 if (r != 0) {
80 DRM_ERROR("Failed referencing a global TTM memory object.\n");
81 return r;
82 }
83 rdev->mman.mem_global_referenced = true;
84 return 0;
85}
86
87static void radeon_ttm_global_fini(struct radeon_device *rdev)
88{
89 if (rdev->mman.mem_global_referenced) {
90 ttm_global_item_unref(&rdev->mman.mem_global_ref);
91 rdev->mman.mem_global_referenced = false;
92 }
93}
94
95struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
96
97static struct ttm_backend*
98radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
99{
100 struct radeon_device *rdev;
101
102 rdev = radeon_get_rdev(bdev);
103#if __OS_HAS_AGP
104 if (rdev->flags & RADEON_IS_AGP) {
105 return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
106 } else
107#endif
108 {
109 return radeon_ttm_backend_create(rdev);
110 }
111}
112
113static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
114{
115 return 0;
116}
117
118static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
119 struct ttm_mem_type_manager *man)
120{
121 struct radeon_device *rdev;
122
123 rdev = radeon_get_rdev(bdev);
124
125 switch (type) {
126 case TTM_PL_SYSTEM:
127 /* System memory */
128 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
129 man->available_caching = TTM_PL_MASK_CACHING;
130 man->default_caching = TTM_PL_FLAG_CACHED;
131 break;
132 case TTM_PL_TT:
133 man->gpu_offset = 0;
134 man->available_caching = TTM_PL_MASK_CACHING;
135 man->default_caching = TTM_PL_FLAG_CACHED;
136#if __OS_HAS_AGP
137 if (rdev->flags & RADEON_IS_AGP) {
138 if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
139 DRM_ERROR("AGP is not enabled for memory type %u\n",
140 (unsigned)type);
141 return -EINVAL;
142 }
143 man->io_offset = rdev->mc.agp_base;
144 man->io_size = rdev->mc.gtt_size;
145 man->io_addr = NULL;
146 man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
147 TTM_MEMTYPE_FLAG_MAPPABLE;
148 man->available_caching = TTM_PL_FLAG_UNCACHED |
149 TTM_PL_FLAG_WC;
150 man->default_caching = TTM_PL_FLAG_WC;
151 } else
152#endif
153 {
154 man->io_offset = 0;
155 man->io_size = 0;
156 man->io_addr = NULL;
157 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
158 TTM_MEMTYPE_FLAG_CMA;
159 }
160 break;
161 case TTM_PL_VRAM:
162 /* "On-card" video ram */
163 man->gpu_offset = 0;
164 man->flags = TTM_MEMTYPE_FLAG_FIXED |
165 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
166 TTM_MEMTYPE_FLAG_MAPPABLE;
167 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
168 man->default_caching = TTM_PL_FLAG_WC;
169 man->io_addr = NULL;
170 man->io_offset = rdev->mc.aper_base;
171 man->io_size = rdev->mc.aper_size;
172 break;
173 default:
174 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
175 return -EINVAL;
176 }
177 return 0;
178}
179
180static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo)
181{
182 uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE;
183
184 switch (bo->mem.mem_type) {
185 default:
186 return (cur_placement & ~TTM_PL_MASK_CACHING) |
187 TTM_PL_FLAG_SYSTEM |
188 TTM_PL_FLAG_CACHED;
189 }
190}
191
192static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
193{
194 return 0;
195}
196
197static void radeon_move_null(struct ttm_buffer_object *bo,
198 struct ttm_mem_reg *new_mem)
199{
200 struct ttm_mem_reg *old_mem = &bo->mem;
201
202 BUG_ON(old_mem->mm_node != NULL);
203 *old_mem = *new_mem;
204 new_mem->mm_node = NULL;
205}
206
207static int radeon_move_blit(struct ttm_buffer_object *bo,
208 bool evict, int no_wait,
209 struct ttm_mem_reg *new_mem,
210 struct ttm_mem_reg *old_mem)
211{
212 struct radeon_device *rdev;
213 uint64_t old_start, new_start;
214 struct radeon_fence *fence;
215 int r;
216
217 rdev = radeon_get_rdev(bo->bdev);
218 r = radeon_fence_create(rdev, &fence);
219 if (unlikely(r)) {
220 return r;
221 }
222 old_start = old_mem->mm_node->start << PAGE_SHIFT;
223 new_start = new_mem->mm_node->start << PAGE_SHIFT;
224
225 switch (old_mem->mem_type) {
226 case TTM_PL_VRAM:
227 old_start += rdev->mc.vram_location;
228 break;
229 case TTM_PL_TT:
230 old_start += rdev->mc.gtt_location;
231 break;
232 default:
233 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
234 return -EINVAL;
235 }
236 switch (new_mem->mem_type) {
237 case TTM_PL_VRAM:
238 new_start += rdev->mc.vram_location;
239 break;
240 case TTM_PL_TT:
241 new_start += rdev->mc.gtt_location;
242 break;
243 default:
244 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
245 return -EINVAL;
246 }
247 if (!rdev->cp.ready) {
248 DRM_ERROR("Trying to move memory with CP turned off.\n");
249 return -EINVAL;
250 }
251 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
252 /* FIXME: handle copy error */
253 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
254 evict, no_wait, new_mem);
255 radeon_fence_unref(&fence);
256 return r;
257}
258
259static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
260 bool evict, bool interruptible, bool no_wait,
261 struct ttm_mem_reg *new_mem)
262{
263 struct radeon_device *rdev;
264 struct ttm_mem_reg *old_mem = &bo->mem;
265 struct ttm_mem_reg tmp_mem;
266 uint32_t proposed_placement;
267 int r;
268
269 rdev = radeon_get_rdev(bo->bdev);
270 tmp_mem = *new_mem;
271 tmp_mem.mm_node = NULL;
272 proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
273 r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem,
274 interruptible, no_wait);
275 if (unlikely(r)) {
276 return r;
277 }
278 r = ttm_tt_bind(bo->ttm, &tmp_mem);
279 if (unlikely(r)) {
280 goto out_cleanup;
281 }
282 r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
283 if (unlikely(r)) {
284 goto out_cleanup;
285 }
286 r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
287out_cleanup:
288 if (tmp_mem.mm_node) {
289 spin_lock(&rdev->mman.bdev.lru_lock);
290 drm_mm_put_block(tmp_mem.mm_node);
291 spin_unlock(&rdev->mman.bdev.lru_lock);
292 return r;
293 }
294 return r;
295}
296
297static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
298 bool evict, bool interruptible, bool no_wait,
299 struct ttm_mem_reg *new_mem)
300{
301 struct radeon_device *rdev;
302 struct ttm_mem_reg *old_mem = &bo->mem;
303 struct ttm_mem_reg tmp_mem;
304 uint32_t proposed_flags;
305 int r;
306
307 rdev = radeon_get_rdev(bo->bdev);
308 tmp_mem = *new_mem;
309 tmp_mem.mm_node = NULL;
310 proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
311 r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
312 interruptible, no_wait);
313 if (unlikely(r)) {
314 return r;
315 }
316 r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
317 if (unlikely(r)) {
318 goto out_cleanup;
319 }
320 r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
321 if (unlikely(r)) {
322 goto out_cleanup;
323 }
324out_cleanup:
325 if (tmp_mem.mm_node) {
326 spin_lock(&rdev->mman.bdev.lru_lock);
327 drm_mm_put_block(tmp_mem.mm_node);
328 spin_unlock(&rdev->mman.bdev.lru_lock);
329 return r;
330 }
331 return r;
332}
333
334static int radeon_bo_move(struct ttm_buffer_object *bo,
335 bool evict, bool interruptible, bool no_wait,
336 struct ttm_mem_reg *new_mem)
337{
338 struct radeon_device *rdev;
339 struct ttm_mem_reg *old_mem = &bo->mem;
340 int r;
341
342 rdev = radeon_get_rdev(bo->bdev);
343 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
344 radeon_move_null(bo, new_mem);
345 return 0;
346 }
347 if ((old_mem->mem_type == TTM_PL_TT &&
348 new_mem->mem_type == TTM_PL_SYSTEM) ||
349 (old_mem->mem_type == TTM_PL_SYSTEM &&
350 new_mem->mem_type == TTM_PL_TT)) {
351 /* bind is enought */
352 radeon_move_null(bo, new_mem);
353 return 0;
354 }
355 if (!rdev->cp.ready) {
356 /* use memcpy */
357 DRM_ERROR("CP is not ready use memcpy.\n");
358 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
359 }
360
361 if (old_mem->mem_type == TTM_PL_VRAM &&
362 new_mem->mem_type == TTM_PL_SYSTEM) {
363 return radeon_move_vram_ram(bo, evict, interruptible,
364 no_wait, new_mem);
365 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
366 new_mem->mem_type == TTM_PL_VRAM) {
367 return radeon_move_ram_vram(bo, evict, interruptible,
368 no_wait, new_mem);
369 } else {
370 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
371 if (unlikely(r)) {
372 return r;
373 }
374 }
375 return r;
376}
377
378const uint32_t radeon_mem_prios[] = {
379 TTM_PL_VRAM,
380 TTM_PL_TT,
381 TTM_PL_SYSTEM,
382};
383
384const uint32_t radeon_busy_prios[] = {
385 TTM_PL_TT,
386 TTM_PL_VRAM,
387 TTM_PL_SYSTEM,
388};
389
390static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
391 bool lazy, bool interruptible)
392{
393 return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
394}
395
396static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
397{
398 return 0;
399}
400
401static void radeon_sync_obj_unref(void **sync_obj)
402{
403 radeon_fence_unref((struct radeon_fence **)sync_obj);
404}
405
406static void *radeon_sync_obj_ref(void *sync_obj)
407{
408 return radeon_fence_ref((struct radeon_fence *)sync_obj);
409}
410
411static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
412{
413 return radeon_fence_signaled((struct radeon_fence *)sync_obj);
414}
415
416static struct ttm_bo_driver radeon_bo_driver = {
417 .mem_type_prio = radeon_mem_prios,
418 .mem_busy_prio = radeon_busy_prios,
419 .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
420 .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
421 .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
422 .invalidate_caches = &radeon_invalidate_caches,
423 .init_mem_type = &radeon_init_mem_type,
424 .evict_flags = &radeon_evict_flags,
425 .move = &radeon_bo_move,
426 .verify_access = &radeon_verify_access,
427 .sync_obj_signaled = &radeon_sync_obj_signaled,
428 .sync_obj_wait = &radeon_sync_obj_wait,
429 .sync_obj_flush = &radeon_sync_obj_flush,
430 .sync_obj_unref = &radeon_sync_obj_unref,
431 .sync_obj_ref = &radeon_sync_obj_ref,
432};
433
434int radeon_ttm_init(struct radeon_device *rdev)
435{
436 int r;
437
438 r = radeon_ttm_global_init(rdev);
439 if (r) {
440 return r;
441 }
442 /* No others user of address space so set it to 0 */
443 r = ttm_bo_device_init(&rdev->mman.bdev,
444 rdev->mman.mem_global_ref.object,
445 &radeon_bo_driver, DRM_FILE_PAGE_OFFSET);
446 if (r) {
447 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
448 return r;
449 }
450 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
451 ((rdev->mc.aper_size) >> PAGE_SHIFT));
452 if (r) {
453 DRM_ERROR("Failed initializing VRAM heap.\n");
454 return r;
455 }
456 r = radeon_object_create(rdev, NULL, 256 * 1024, true,
457 RADEON_GEM_DOMAIN_VRAM, false,
458 &rdev->stollen_vga_memory);
459 if (r) {
460 return r;
461 }
462 r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
463 if (r) {
464 radeon_object_unref(&rdev->stollen_vga_memory);
465 return r;
466 }
467 DRM_INFO("radeon: %uM of VRAM memory ready\n",
468 rdev->mc.vram_size / (1024 * 1024));
469 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
470 ((rdev->mc.gtt_size) >> PAGE_SHIFT));
471 if (r) {
472 DRM_ERROR("Failed initializing GTT heap.\n");
473 return r;
474 }
475 DRM_INFO("radeon: %uM of GTT memory ready.\n",
476 rdev->mc.gtt_size / (1024 * 1024));
477 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
478 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
479 }
480 return 0;
481}
482
483void radeon_ttm_fini(struct radeon_device *rdev)
484{
485 if (rdev->stollen_vga_memory) {
486 radeon_object_unpin(rdev->stollen_vga_memory);
487 radeon_object_unref(&rdev->stollen_vga_memory);
488 }
489 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
490 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
491 ttm_bo_device_release(&rdev->mman.bdev);
492 radeon_gart_fini(rdev);
493 radeon_ttm_global_fini(rdev);
494 DRM_INFO("radeon: ttm finalized\n");
495}
496
497static struct vm_operations_struct radeon_ttm_vm_ops;
498static struct vm_operations_struct *ttm_vm_ops = NULL;
499
500static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
501{
502 struct ttm_buffer_object *bo;
503 int r;
504
505 bo = (struct ttm_buffer_object *)vma->vm_private_data;
506 if (bo == NULL) {
507 return VM_FAULT_NOPAGE;
508 }
509 r = ttm_vm_ops->fault(vma, vmf);
510 return r;
511}
512
513int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
514{
515 struct drm_file *file_priv;
516 struct radeon_device *rdev;
517 int r;
518
519 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
520 return drm_mmap(filp, vma);
521 }
522
523 file_priv = (struct drm_file *)filp->private_data;
524 rdev = file_priv->minor->dev->dev_private;
525 if (rdev == NULL) {
526 return -EINVAL;
527 }
528 r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
529 if (unlikely(r != 0)) {
530 return r;
531 }
532 if (unlikely(ttm_vm_ops == NULL)) {
533 ttm_vm_ops = vma->vm_ops;
534 radeon_ttm_vm_ops = *ttm_vm_ops;
535 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
536 }
537 vma->vm_ops = &radeon_ttm_vm_ops;
538 return 0;
539}
540
541
542/*
543 * TTM backend functions.
544 */
545struct radeon_ttm_backend {
546 struct ttm_backend backend;
547 struct radeon_device *rdev;
548 unsigned long num_pages;
549 struct page **pages;
550 struct page *dummy_read_page;
551 bool populated;
552 bool bound;
553 unsigned offset;
554};
555
556static int radeon_ttm_backend_populate(struct ttm_backend *backend,
557 unsigned long num_pages,
558 struct page **pages,
559 struct page *dummy_read_page)
560{
561 struct radeon_ttm_backend *gtt;
562
563 gtt = container_of(backend, struct radeon_ttm_backend, backend);
564 gtt->pages = pages;
565 gtt->num_pages = num_pages;
566 gtt->dummy_read_page = dummy_read_page;
567 gtt->populated = true;
568 return 0;
569}
570
571static void radeon_ttm_backend_clear(struct ttm_backend *backend)
572{
573 struct radeon_ttm_backend *gtt;
574
575 gtt = container_of(backend, struct radeon_ttm_backend, backend);
576 gtt->pages = NULL;
577 gtt->num_pages = 0;
578 gtt->dummy_read_page = NULL;
579 gtt->populated = false;
580 gtt->bound = false;
581}
582
583
584static int radeon_ttm_backend_bind(struct ttm_backend *backend,
585 struct ttm_mem_reg *bo_mem)
586{
587 struct radeon_ttm_backend *gtt;
588 int r;
589
590 gtt = container_of(backend, struct radeon_ttm_backend, backend);
591 gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
592 if (!gtt->num_pages) {
593 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
594 }
595 r = radeon_gart_bind(gtt->rdev, gtt->offset,
596 gtt->num_pages, gtt->pages);
597 if (r) {
598 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
599 gtt->num_pages, gtt->offset);
600 return r;
601 }
602 gtt->bound = true;
603 return 0;
604}
605
606static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
607{
608 struct radeon_ttm_backend *gtt;
609
610 gtt = container_of(backend, struct radeon_ttm_backend, backend);
611 radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
612 gtt->bound = false;
613 return 0;
614}
615
616static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
617{
618 struct radeon_ttm_backend *gtt;
619
620 gtt = container_of(backend, struct radeon_ttm_backend, backend);
621 if (gtt->bound) {
622 radeon_ttm_backend_unbind(backend);
623 }
624 kfree(gtt);
625}
626
627static struct ttm_backend_func radeon_backend_func = {
628 .populate = &radeon_ttm_backend_populate,
629 .clear = &radeon_ttm_backend_clear,
630 .bind = &radeon_ttm_backend_bind,
631 .unbind = &radeon_ttm_backend_unbind,
632 .destroy = &radeon_ttm_backend_destroy,
633};
634
635struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
636{
637 struct radeon_ttm_backend *gtt;
638
639 gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
640 if (gtt == NULL) {
641 return NULL;
642 }
643 gtt->backend.bdev = &rdev->mman.bdev;
644 gtt->backend.flags = 0;
645 gtt->backend.func = &radeon_backend_func;
646 gtt->rdev = rdev;
647 gtt->pages = NULL;
648 gtt->num_pages = 0;
649 gtt->dummy_read_page = NULL;
650 gtt->populated = false;
651 gtt->bound = false;
652 return &gtt->backend;
653}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
new file mode 100644
index 000000000000..cc074b5a8f74
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -0,0 +1,411 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include <drm/drmP.h>
30#include "radeon_reg.h"
31#include "radeon.h"
32
33/* rs400,rs480 depends on : */
34void r100_hdp_reset(struct radeon_device *rdev);
35void r100_mc_disable_clients(struct radeon_device *rdev);
36int r300_mc_wait_for_idle(struct radeon_device *rdev);
37void r420_pipes_init(struct radeon_device *rdev);
38
39/* This files gather functions specifics to :
40 * rs400,rs480
41 *
42 * Some of these functions might be used by newer ASICs.
43 */
44void rs400_gpu_init(struct radeon_device *rdev);
45int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
46
47
48/*
49 * GART functions.
50 */
51void rs400_gart_adjust_size(struct radeon_device *rdev)
52{
53 /* Check gart size */
54 switch (rdev->mc.gtt_size/(1024*1024)) {
55 case 32:
56 case 64:
57 case 128:
58 case 256:
59 case 512:
60 case 1024:
61 case 2048:
62 break;
63 default:
64 DRM_ERROR("Unable to use IGP GART size %uM\n",
65 rdev->mc.gtt_size >> 20);
66 DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
67 DRM_ERROR("Forcing to 32M GART size\n");
68 rdev->mc.gtt_size = 32 * 1024 * 1024;
69 return;
70 }
71 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
72 /* FIXME: RS400 & RS480 seems to have issue with GART size
73 * if 4G of system memory (needs more testing) */
74 rdev->mc.gtt_size = 32 * 1024 * 1024;
75 DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n");
76 }
77}
78
79void rs400_gart_tlb_flush(struct radeon_device *rdev)
80{
81 uint32_t tmp;
82 unsigned int timeout = rdev->usec_timeout;
83
84 WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
85 do {
86 tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
87 if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
88 break;
89 DRM_UDELAY(1);
90 timeout--;
91 } while (timeout > 0);
92 WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
93}
94
95int rs400_gart_enable(struct radeon_device *rdev)
96{
97 uint32_t size_reg;
98 uint32_t tmp;
99 int r;
100
101 /* Initialize common gart structure */
102 r = radeon_gart_init(rdev);
103 if (r) {
104 return r;
105 }
106 if (rs400_debugfs_pcie_gart_info_init(rdev)) {
107 DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
108 }
109
110 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
111 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
112 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
113 /* Check gart size */
114 switch(rdev->mc.gtt_size / (1024 * 1024)) {
115 case 32:
116 size_reg = RS480_VA_SIZE_32MB;
117 break;
118 case 64:
119 size_reg = RS480_VA_SIZE_64MB;
120 break;
121 case 128:
122 size_reg = RS480_VA_SIZE_128MB;
123 break;
124 case 256:
125 size_reg = RS480_VA_SIZE_256MB;
126 break;
127 case 512:
128 size_reg = RS480_VA_SIZE_512MB;
129 break;
130 case 1024:
131 size_reg = RS480_VA_SIZE_1GB;
132 break;
133 case 2048:
134 size_reg = RS480_VA_SIZE_2GB;
135 break;
136 default:
137 return -EINVAL;
138 }
139 if (rdev->gart.table.ram.ptr == NULL) {
140 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
141 r = radeon_gart_table_ram_alloc(rdev);
142 if (r) {
143 return r;
144 }
145 }
146 /* It should be fine to program it to max value */
147 if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
148 WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
149 WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
150 } else {
151 WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
152 WREG32(RS480_AGP_BASE_2, 0);
153 }
154 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
155 tmp = REG_SET(RS690_MC_AGP_TOP, tmp >> 16);
156 tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_location >> 16);
157 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
158 WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
159 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
160 WREG32(RADEON_BUS_CNTL, tmp);
161 } else {
162 WREG32(RADEON_MC_AGP_LOCATION, tmp);
163 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
164 WREG32(RADEON_BUS_CNTL, tmp);
165 }
166 /* Table should be in 32bits address space so ignore bits above. */
167 tmp = rdev->gart.table_addr & 0xfffff000;
168 WREG32_MC(RS480_GART_BASE, tmp);
169 /* TODO: more tweaking here */
170 WREG32_MC(RS480_GART_FEATURE_ID,
171 (RS480_TLB_ENABLE |
172 RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
173 /* Disable snooping */
174 WREG32_MC(RS480_AGP_MODE_CNTL,
175 (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
176 /* Disable AGP mode */
177 /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
178 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
179 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
180 WREG32_MC(RS480_MC_MISC_CNTL,
181 (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
182 } else {
183 WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
184 }
185 /* Enable gart */
186 WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
187 rs400_gart_tlb_flush(rdev);
188 rdev->gart.ready = true;
189 return 0;
190}
191
192void rs400_gart_disable(struct radeon_device *rdev)
193{
194 uint32_t tmp;
195
196 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
197 tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
198 WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
199 WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
200}
201
202int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
203{
204 if (i < 0 || i > rdev->gart.num_gpu_pages) {
205 return -EINVAL;
206 }
207 rdev->gart.table.ram.ptr[i] = cpu_to_le32(((uint32_t)addr) | 0xC);
208 return 0;
209}
210
211
212/*
213 * MC functions.
214 */
215int rs400_mc_init(struct radeon_device *rdev)
216{
217 uint32_t tmp;
218 int r;
219
220 if (r100_debugfs_rbbm_init(rdev)) {
221 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
222 }
223
224 rs400_gpu_init(rdev);
225 rs400_gart_disable(rdev);
226 rdev->mc.gtt_location = rdev->mc.vram_size;
227 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
228 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
229 rdev->mc.vram_location = 0xFFFFFFFFUL;
230 r = radeon_mc_setup(rdev);
231 if (r) {
232 return r;
233 }
234
235 r100_mc_disable_clients(rdev);
236 if (r300_mc_wait_for_idle(rdev)) {
237 printk(KERN_WARNING "Failed to wait MC idle while "
238 "programming pipes. Bad things might happen.\n");
239 }
240
241 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
242 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
243 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
244 WREG32(RADEON_MC_FB_LOCATION, tmp);
245 tmp = RREG32(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS;
246 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
247 (void)RREG32(RADEON_HOST_PATH_CNTL);
248 WREG32(RADEON_HOST_PATH_CNTL, tmp);
249 (void)RREG32(RADEON_HOST_PATH_CNTL);
250 return 0;
251}
252
253void rs400_mc_fini(struct radeon_device *rdev)
254{
255 rs400_gart_disable(rdev);
256 radeon_gart_table_ram_free(rdev);
257 radeon_gart_fini(rdev);
258}
259
260
261/*
262 * Global GPU functions
263 */
264void rs400_errata(struct radeon_device *rdev)
265{
266 rdev->pll_errata = 0;
267}
268
269void rs400_gpu_init(struct radeon_device *rdev)
270{
271 /* FIXME: HDP same place on rs400 ? */
272 r100_hdp_reset(rdev);
273 /* FIXME: is this correct ? */
274 r420_pipes_init(rdev);
275 if (r300_mc_wait_for_idle(rdev)) {
276 printk(KERN_WARNING "Failed to wait MC idle while "
277 "programming pipes. Bad things might happen.\n");
278 }
279}
280
281
282/*
283 * VRAM info.
284 */
285void rs400_vram_info(struct radeon_device *rdev)
286{
287 uint32_t tom;
288
289 rs400_gart_adjust_size(rdev);
290 /* DDR for all card after R300 & IGP */
291 rdev->mc.vram_is_ddr = true;
292 rdev->mc.vram_width = 128;
293
294 /* read NB_TOM to get the amount of ram stolen for the GPU */
295 tom = RREG32(RADEON_NB_TOM);
296 rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
297 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
298
299 /* Could aper size report 0 ? */
300 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
301 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
302}
303
304
305/*
306 * Indirect registers accessor
307 */
308uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
309{
310 uint32_t r;
311
312 WREG32(RS480_NB_MC_INDEX, reg & 0xff);
313 r = RREG32(RS480_NB_MC_DATA);
314 WREG32(RS480_NB_MC_INDEX, 0xff);
315 return r;
316}
317
318void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
319{
320 WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
321 WREG32(RS480_NB_MC_DATA, (v));
322 WREG32(RS480_NB_MC_INDEX, 0xff);
323}
324
325
326/*
327 * Debugfs info
328 */
329#if defined(CONFIG_DEBUG_FS)
330static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
331{
332 struct drm_info_node *node = (struct drm_info_node *) m->private;
333 struct drm_device *dev = node->minor->dev;
334 struct radeon_device *rdev = dev->dev_private;
335 uint32_t tmp;
336
337 tmp = RREG32(RADEON_HOST_PATH_CNTL);
338 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
339 tmp = RREG32(RADEON_BUS_CNTL);
340 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
341 tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
342 seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
343 if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
344 tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
345 seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
346 tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
347 seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
348 tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
349 seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
350 tmp = RREG32_MC(0x100);
351 seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
352 tmp = RREG32(0x134);
353 seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
354 } else {
355 tmp = RREG32(RADEON_AGP_BASE);
356 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
357 tmp = RREG32(RS480_AGP_BASE_2);
358 seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
359 tmp = RREG32(RADEON_MC_AGP_LOCATION);
360 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
361 }
362 tmp = RREG32_MC(RS480_GART_BASE);
363 seq_printf(m, "GART_BASE 0x%08x\n", tmp);
364 tmp = RREG32_MC(RS480_GART_FEATURE_ID);
365 seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
366 tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
367 seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
368 tmp = RREG32_MC(RS480_MC_MISC_CNTL);
369 seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
370 tmp = RREG32_MC(0x5F);
371 seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
372 tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
373 seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
374 tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
375 seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
376 tmp = RREG32_MC(0x3B);
377 seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
378 tmp = RREG32_MC(0x3C);
379 seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
380 tmp = RREG32_MC(0x30);
381 seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
382 tmp = RREG32_MC(0x31);
383 seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
384 tmp = RREG32_MC(0x32);
385 seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
386 tmp = RREG32_MC(0x33);
387 seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
388 tmp = RREG32_MC(0x34);
389 seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
390 tmp = RREG32_MC(0x35);
391 seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
392 tmp = RREG32_MC(0x36);
393 seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
394 tmp = RREG32_MC(0x37);
395 seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
396 return 0;
397}
398
399static struct drm_info_list rs400_gart_info_list[] = {
400 {"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
401};
402#endif
403
404int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
405{
406#if defined(CONFIG_DEBUG_FS)
407 return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
408#else
409 return 0;
410#endif
411}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
new file mode 100644
index 000000000000..ab0c967553e6
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -0,0 +1,324 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32/* rs600 depends on : */
33void r100_hdp_reset(struct radeon_device *rdev);
34int r100_gui_wait_for_idle(struct radeon_device *rdev);
35int r300_mc_wait_for_idle(struct radeon_device *rdev);
36void r420_pipes_init(struct radeon_device *rdev);
37
38/* This files gather functions specifics to :
39 * rs600
40 *
41 * Some of these functions might be used by newer ASICs.
42 */
43void rs600_gpu_init(struct radeon_device *rdev);
44int rs600_mc_wait_for_idle(struct radeon_device *rdev);
45void rs600_disable_vga(struct radeon_device *rdev);
46
47
48/*
49 * GART.
50 */
51void rs600_gart_tlb_flush(struct radeon_device *rdev)
52{
53 uint32_t tmp;
54
55 tmp = RREG32_MC(RS600_MC_PT0_CNTL);
56 tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
57 WREG32_MC(RS600_MC_PT0_CNTL, tmp);
58
59 tmp = RREG32_MC(RS600_MC_PT0_CNTL);
60 tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE;
61 WREG32_MC(RS600_MC_PT0_CNTL, tmp);
62
63 tmp = RREG32_MC(RS600_MC_PT0_CNTL);
64 tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
65 WREG32_MC(RS600_MC_PT0_CNTL, tmp);
66 tmp = RREG32_MC(RS600_MC_PT0_CNTL);
67}
68
69int rs600_gart_enable(struct radeon_device *rdev)
70{
71 uint32_t tmp;
72 int i;
73 int r;
74
75 /* Initialize common gart structure */
76 r = radeon_gart_init(rdev);
77 if (r) {
78 return r;
79 }
80 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
81 r = radeon_gart_table_vram_alloc(rdev);
82 if (r) {
83 return r;
84 }
85 /* FIXME: setup default page */
86 WREG32_MC(RS600_MC_PT0_CNTL,
87 (RS600_EFFECTIVE_L2_CACHE_SIZE(6) |
88 RS600_EFFECTIVE_L2_QUEUE_SIZE(6)));
89 for (i = 0; i < 19; i++) {
90 WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i,
91 (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE |
92 RS600_SYSTEM_ACCESS_MODE_IN_SYS |
93 RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE |
94 RS600_EFFECTIVE_L1_CACHE_SIZE(3) |
95 RS600_ENABLE_FRAGMENT_PROCESSING |
96 RS600_EFFECTIVE_L1_QUEUE_SIZE(3)));
97 }
98
99 /* System context map to GART space */
100 WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location);
101 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
102 WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp);
103
104 /* enable first context */
105 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location);
106 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
107 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp);
108 WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL,
109 (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT));
110 /* disable all other contexts */
111 for (i = 1; i < 8; i++) {
112 WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0);
113 }
114
115 /* setup the page table */
116 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
117 rdev->gart.table_addr);
118 WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
119
120 /* enable page tables */
121 tmp = RREG32_MC(RS600_MC_PT0_CNTL);
122 WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT));
123 tmp = RREG32_MC(RS600_MC_CNTL1);
124 WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES));
125 rs600_gart_tlb_flush(rdev);
126 rdev->gart.ready = true;
127 return 0;
128}
129
130void rs600_gart_disable(struct radeon_device *rdev)
131{
132 uint32_t tmp;
133
134 /* FIXME: disable out of gart access */
135 WREG32_MC(RS600_MC_PT0_CNTL, 0);
136 tmp = RREG32_MC(RS600_MC_CNTL1);
137 tmp &= ~RS600_ENABLE_PAGE_TABLES;
138 WREG32_MC(RS600_MC_CNTL1, tmp);
139 radeon_object_kunmap(rdev->gart.table.vram.robj);
140 radeon_object_unpin(rdev->gart.table.vram.robj);
141}
142
143#define R600_PTE_VALID (1 << 0)
144#define R600_PTE_SYSTEM (1 << 1)
145#define R600_PTE_SNOOPED (1 << 2)
146#define R600_PTE_READABLE (1 << 5)
147#define R600_PTE_WRITEABLE (1 << 6)
148
149int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
150{
151 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
152
153 if (i < 0 || i > rdev->gart.num_gpu_pages) {
154 return -EINVAL;
155 }
156 addr = addr & 0xFFFFFFFFFFFFF000ULL;
157 addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
158 addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
159 writeq(addr, ((void __iomem *)ptr) + (i * 8));
160 return 0;
161}
162
163
164/*
165 * MC.
166 */
167void rs600_mc_disable_clients(struct radeon_device *rdev)
168{
169 unsigned tmp;
170
171 if (r100_gui_wait_for_idle(rdev)) {
172 printk(KERN_WARNING "Failed to wait GUI idle while "
173 "programming pipes. Bad things might happen.\n");
174 }
175
176 tmp = RREG32(AVIVO_D1VGA_CONTROL);
177 WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
178 tmp = RREG32(AVIVO_D2VGA_CONTROL);
179 WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
180
181 tmp = RREG32(AVIVO_D1CRTC_CONTROL);
182 WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
183 tmp = RREG32(AVIVO_D2CRTC_CONTROL);
184 WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
185
186 /* make sure all previous write got through */
187 tmp = RREG32(AVIVO_D2CRTC_CONTROL);
188
189 mdelay(1);
190}
191
192int rs600_mc_init(struct radeon_device *rdev)
193{
194 uint32_t tmp;
195 int r;
196
197 if (r100_debugfs_rbbm_init(rdev)) {
198 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
199 }
200
201 rs600_gpu_init(rdev);
202 rs600_gart_disable(rdev);
203
204 /* Setup GPU memory space */
205 rdev->mc.vram_location = 0xFFFFFFFFUL;
206 rdev->mc.gtt_location = 0xFFFFFFFFUL;
207 r = radeon_mc_setup(rdev);
208 if (r) {
209 return r;
210 }
211
212 /* Program GPU memory space */
213 /* Enable bus master */
214 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
215 WREG32(RADEON_BUS_CNTL, tmp);
216 /* FIXME: What does AGP means for such chipset ? */
217 WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF);
218 /* FIXME: are this AGP reg in indirect MC range ? */
219 WREG32_MC(RS600_MC_AGP_BASE, 0);
220 WREG32_MC(RS600_MC_AGP_BASE_2, 0);
221 rs600_mc_disable_clients(rdev);
222 if (rs600_mc_wait_for_idle(rdev)) {
223 printk(KERN_WARNING "Failed to wait MC idle while "
224 "programming pipes. Bad things might happen.\n");
225 }
226 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
227 tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16);
228 tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16);
229 WREG32_MC(RS600_MC_FB_LOCATION, tmp);
230 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
231 return 0;
232}
233
234void rs600_mc_fini(struct radeon_device *rdev)
235{
236 rs600_gart_disable(rdev);
237 radeon_gart_table_vram_free(rdev);
238 radeon_gart_fini(rdev);
239}
240
241
242/*
243 * Global GPU functions
244 */
245void rs600_disable_vga(struct radeon_device *rdev)
246{
247 unsigned tmp;
248
249 WREG32(0x330, 0);
250 WREG32(0x338, 0);
251 tmp = RREG32(0x300);
252 tmp &= ~(3 << 16);
253 WREG32(0x300, tmp);
254 WREG32(0x308, (1 << 8));
255 WREG32(0x310, rdev->mc.vram_location);
256 WREG32(0x594, 0);
257}
258
259int rs600_mc_wait_for_idle(struct radeon_device *rdev)
260{
261 unsigned i;
262 uint32_t tmp;
263
264 for (i = 0; i < rdev->usec_timeout; i++) {
265 /* read MC_STATUS */
266 tmp = RREG32_MC(RS600_MC_STATUS);
267 if (tmp & RS600_MC_STATUS_IDLE) {
268 return 0;
269 }
270 DRM_UDELAY(1);
271 }
272 return -1;
273}
274
275void rs600_errata(struct radeon_device *rdev)
276{
277 rdev->pll_errata = 0;
278}
279
280void rs600_gpu_init(struct radeon_device *rdev)
281{
282 /* FIXME: HDP same place on rs600 ? */
283 r100_hdp_reset(rdev);
284 rs600_disable_vga(rdev);
285 /* FIXME: is this correct ? */
286 r420_pipes_init(rdev);
287 if (rs600_mc_wait_for_idle(rdev)) {
288 printk(KERN_WARNING "Failed to wait MC idle while "
289 "programming pipes. Bad things might happen.\n");
290 }
291}
292
293
294/*
295 * VRAM info.
296 */
297void rs600_vram_info(struct radeon_device *rdev)
298{
299 /* FIXME: to do or is these values sane ? */
300 rdev->mc.vram_is_ddr = true;
301 rdev->mc.vram_width = 128;
302}
303
304
305/*
306 * Indirect registers accessor
307 */
308uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
309{
310 uint32_t r;
311
312 WREG32(RS600_MC_INDEX,
313 ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0));
314 r = RREG32(RS600_MC_DATA);
315 return r;
316}
317
318void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
319{
320 WREG32(RS600_MC_INDEX,
321 RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 |
322 ((reg) & RS600_MC_ADDR_MASK));
323 WREG32(RS600_MC_DATA, v);
324}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
new file mode 100644
index 000000000000..79ba85042b5f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -0,0 +1,181 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32/* rs690,rs740 depends on : */
33void r100_hdp_reset(struct radeon_device *rdev);
34int r300_mc_wait_for_idle(struct radeon_device *rdev);
35void r420_pipes_init(struct radeon_device *rdev);
36void rs400_gart_disable(struct radeon_device *rdev);
37int rs400_gart_enable(struct radeon_device *rdev);
38void rs400_gart_adjust_size(struct radeon_device *rdev);
39void rs600_mc_disable_clients(struct radeon_device *rdev);
40void rs600_disable_vga(struct radeon_device *rdev);
41
42/* This files gather functions specifics to :
43 * rs690,rs740
44 *
45 * Some of these functions might be used by newer ASICs.
46 */
47void rs690_gpu_init(struct radeon_device *rdev);
48int rs690_mc_wait_for_idle(struct radeon_device *rdev);
49
50
51/*
52 * MC functions.
53 */
54int rs690_mc_init(struct radeon_device *rdev)
55{
56 uint32_t tmp;
57 int r;
58
59 if (r100_debugfs_rbbm_init(rdev)) {
60 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
61 }
62
63 rs690_gpu_init(rdev);
64 rs400_gart_disable(rdev);
65
66 /* Setup GPU memory space */
67 rdev->mc.gtt_location = rdev->mc.vram_size;
68 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
69 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
70 rdev->mc.vram_location = 0xFFFFFFFFUL;
71 r = radeon_mc_setup(rdev);
72 if (r) {
73 return r;
74 }
75
76 /* Program GPU memory space */
77 rs600_mc_disable_clients(rdev);
78 if (rs690_mc_wait_for_idle(rdev)) {
79 printk(KERN_WARNING "Failed to wait MC idle while "
80 "programming pipes. Bad things might happen.\n");
81 }
82 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
83 tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16);
84 tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16);
85 WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp);
86 /* FIXME: Does this reg exist on RS480,RS740 ? */
87 WREG32(0x310, rdev->mc.vram_location);
88 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
89 return 0;
90}
91
92void rs690_mc_fini(struct radeon_device *rdev)
93{
94 rs400_gart_disable(rdev);
95 radeon_gart_table_ram_free(rdev);
96 radeon_gart_fini(rdev);
97}
98
99
100/*
101 * Global GPU functions
102 */
103int rs690_mc_wait_for_idle(struct radeon_device *rdev)
104{
105 unsigned i;
106 uint32_t tmp;
107
108 for (i = 0; i < rdev->usec_timeout; i++) {
109 /* read MC_STATUS */
110 tmp = RREG32_MC(RS690_MC_STATUS);
111 if (tmp & RS690_MC_STATUS_IDLE) {
112 return 0;
113 }
114 DRM_UDELAY(1);
115 }
116 return -1;
117}
118
119void rs690_errata(struct radeon_device *rdev)
120{
121 rdev->pll_errata = 0;
122}
123
124void rs690_gpu_init(struct radeon_device *rdev)
125{
126 /* FIXME: HDP same place on rs690 ? */
127 r100_hdp_reset(rdev);
128 rs600_disable_vga(rdev);
129 /* FIXME: is this correct ? */
130 r420_pipes_init(rdev);
131 if (rs690_mc_wait_for_idle(rdev)) {
132 printk(KERN_WARNING "Failed to wait MC idle while "
133 "programming pipes. Bad things might happen.\n");
134 }
135}
136
137
138/*
139 * VRAM info.
140 */
141void rs690_vram_info(struct radeon_device *rdev)
142{
143 uint32_t tmp;
144
145 rs400_gart_adjust_size(rdev);
146 /* DDR for all card after R300 & IGP */
147 rdev->mc.vram_is_ddr = true;
148 /* FIXME: is this correct for RS690/RS740 ? */
149 tmp = RREG32(RADEON_MEM_CNTL);
150 if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
151 rdev->mc.vram_width = 128;
152 } else {
153 rdev->mc.vram_width = 64;
154 }
155 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
156
157 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
158 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
159}
160
161
162/*
163 * Indirect registers accessor
164 */
165uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
166{
167 uint32_t r;
168
169 WREG32(RS690_MC_INDEX, (reg & RS690_MC_INDEX_MASK));
170 r = RREG32(RS690_MC_DATA);
171 WREG32(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
172 return r;
173}
174
175void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
176{
177 WREG32(RS690_MC_INDEX,
178 RS690_MC_INDEX_WR_EN | ((reg) & RS690_MC_INDEX_MASK));
179 WREG32(RS690_MC_DATA, v);
180 WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);
181}
diff --git a/drivers/gpu/drm/radeon/rs780.c b/drivers/gpu/drm/radeon/rs780.c
new file mode 100644
index 000000000000..0affcff81825
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs780.c
@@ -0,0 +1,102 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32/* rs780 depends on : */
33void rs600_mc_disable_clients(struct radeon_device *rdev);
34
35/* This files gather functions specifics to:
36 * rs780
37 *
38 * Some of these functions might be used by newer ASICs.
39 */
40int rs780_mc_wait_for_idle(struct radeon_device *rdev);
41void rs780_gpu_init(struct radeon_device *rdev);
42
43
44/*
45 * MC
46 */
47int rs780_mc_init(struct radeon_device *rdev)
48{
49 rs780_gpu_init(rdev);
50 /* FIXME: implement */
51
52 rs600_mc_disable_clients(rdev);
53 if (rs780_mc_wait_for_idle(rdev)) {
54 printk(KERN_WARNING "Failed to wait MC idle while "
55 "programming pipes. Bad things might happen.\n");
56 }
57 return 0;
58}
59
60void rs780_mc_fini(struct radeon_device *rdev)
61{
62 /* FIXME: implement */
63}
64
65
66/*
67 * Global GPU functions
68 */
69void rs780_errata(struct radeon_device *rdev)
70{
71 rdev->pll_errata = 0;
72}
73
74int rs780_mc_wait_for_idle(struct radeon_device *rdev)
75{
76 /* FIXME: implement */
77 return 0;
78}
79
80void rs780_gpu_init(struct radeon_device *rdev)
81{
82 /* FIXME: implement */
83}
84
85
86/*
87 * VRAM info
88 */
89void rs780_vram_get_type(struct radeon_device *rdev)
90{
91 /* FIXME: implement */
92}
93
94void rs780_vram_info(struct radeon_device *rdev)
95{
96 rs780_vram_get_type(rdev);
97
98 /* FIXME: implement */
99 /* Could aper size report 0 ? */
100 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
101 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
102}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
new file mode 100644
index 000000000000..7eab95db58ac
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -0,0 +1,504 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "radeon_reg.h"
31#include "radeon.h"
32
33/* rv515 depends on : */
34void r100_hdp_reset(struct radeon_device *rdev);
35int r100_cp_reset(struct radeon_device *rdev);
36int r100_rb2d_reset(struct radeon_device *rdev);
37int r100_gui_wait_for_idle(struct radeon_device *rdev);
38int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
39int rv370_pcie_gart_enable(struct radeon_device *rdev);
40void rv370_pcie_gart_disable(struct radeon_device *rdev);
41void r420_pipes_init(struct radeon_device *rdev);
42void rs600_mc_disable_clients(struct radeon_device *rdev);
43void rs600_disable_vga(struct radeon_device *rdev);
44
45/* This files gather functions specifics to:
46 * rv515
47 *
48 * Some of these functions might be used by newer ASICs.
49 */
50int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
51int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
52void rv515_gpu_init(struct radeon_device *rdev);
53int rv515_mc_wait_for_idle(struct radeon_device *rdev);
54
55
56/*
57 * MC
58 */
59int rv515_mc_init(struct radeon_device *rdev)
60{
61 uint32_t tmp;
62 int r;
63
64 if (r100_debugfs_rbbm_init(rdev)) {
65 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
66 }
67 if (rv515_debugfs_pipes_info_init(rdev)) {
68 DRM_ERROR("Failed to register debugfs file for pipes !\n");
69 }
70 if (rv515_debugfs_ga_info_init(rdev)) {
71 DRM_ERROR("Failed to register debugfs file for pipes !\n");
72 }
73
74 rv515_gpu_init(rdev);
75 rv370_pcie_gart_disable(rdev);
76
77 /* Setup GPU memory space */
78 rdev->mc.vram_location = 0xFFFFFFFFUL;
79 rdev->mc.gtt_location = 0xFFFFFFFFUL;
80 if (rdev->flags & RADEON_IS_AGP) {
81 r = radeon_agp_init(rdev);
82 if (r) {
83 printk(KERN_WARNING "[drm] Disabling AGP\n");
84 rdev->flags &= ~RADEON_IS_AGP;
85 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
86 } else {
87 rdev->mc.gtt_location = rdev->mc.agp_base;
88 }
89 }
90 r = radeon_mc_setup(rdev);
91 if (r) {
92 return r;
93 }
94
95 /* Program GPU memory space */
96 rs600_mc_disable_clients(rdev);
97 if (rv515_mc_wait_for_idle(rdev)) {
98 printk(KERN_WARNING "Failed to wait MC idle while "
99 "programming pipes. Bad things might happen.\n");
100 }
101 /* Write VRAM size in case we are limiting it */
102 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
103 tmp = REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16);
104 WREG32(0x134, tmp);
105 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
106 tmp = REG_SET(RV515_MC_FB_TOP, tmp >> 16);
107 tmp |= REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16);
108 WREG32_MC(RV515_MC_FB_LOCATION, tmp);
109 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
110 WREG32(0x310, rdev->mc.vram_location);
111 if (rdev->flags & RADEON_IS_AGP) {
112 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
113 tmp = REG_SET(RV515_MC_AGP_TOP, tmp >> 16);
114 tmp |= REG_SET(RV515_MC_AGP_START, rdev->mc.gtt_location >> 16);
115 WREG32_MC(RV515_MC_AGP_LOCATION, tmp);
116 WREG32_MC(RV515_MC_AGP_BASE, rdev->mc.agp_base);
117 WREG32_MC(RV515_MC_AGP_BASE_2, 0);
118 } else {
119 WREG32_MC(RV515_MC_AGP_LOCATION, 0x0FFFFFFF);
120 WREG32_MC(RV515_MC_AGP_BASE, 0);
121 WREG32_MC(RV515_MC_AGP_BASE_2, 0);
122 }
123 return 0;
124}
125
126void rv515_mc_fini(struct radeon_device *rdev)
127{
128 rv370_pcie_gart_disable(rdev);
129 radeon_gart_table_vram_free(rdev);
130 radeon_gart_fini(rdev);
131}
132
133
134/*
135 * Global GPU functions
136 */
137void rv515_ring_start(struct radeon_device *rdev)
138{
139 unsigned gb_tile_config;
140 int r;
141
142 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
143 gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16;
144 switch (rdev->num_gb_pipes) {
145 case 2:
146 gb_tile_config |= R300_PIPE_COUNT_R300;
147 break;
148 case 3:
149 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
150 break;
151 case 4:
152 gb_tile_config |= R300_PIPE_COUNT_R420;
153 break;
154 case 1:
155 default:
156 gb_tile_config |= R300_PIPE_COUNT_RV350;
157 break;
158 }
159
160 r = radeon_ring_lock(rdev, 64);
161 if (r) {
162 return;
163 }
164 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
165 radeon_ring_write(rdev,
166 RADEON_ISYNC_ANY2D_IDLE3D |
167 RADEON_ISYNC_ANY3D_IDLE2D |
168 RADEON_ISYNC_WAIT_IDLEGUI |
169 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
170 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
171 radeon_ring_write(rdev, gb_tile_config);
172 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
173 radeon_ring_write(rdev,
174 RADEON_WAIT_2D_IDLECLEAN |
175 RADEON_WAIT_3D_IDLECLEAN);
176 radeon_ring_write(rdev, PACKET0(0x170C, 0));
177 radeon_ring_write(rdev, 1 << 31);
178 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
179 radeon_ring_write(rdev, 0);
180 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
181 radeon_ring_write(rdev, 0);
182 radeon_ring_write(rdev, PACKET0(0x42C8, 0));
183 radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
184 radeon_ring_write(rdev, PACKET0(R500_VAP_INDEX_OFFSET, 0));
185 radeon_ring_write(rdev, 0);
186 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
187 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
188 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
189 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
190 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
191 radeon_ring_write(rdev,
192 RADEON_WAIT_2D_IDLECLEAN |
193 RADEON_WAIT_3D_IDLECLEAN);
194 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
195 radeon_ring_write(rdev, 0);
196 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
197 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
198 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
199 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
200 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
201 radeon_ring_write(rdev,
202 ((6 << R300_MS_X0_SHIFT) |
203 (6 << R300_MS_Y0_SHIFT) |
204 (6 << R300_MS_X1_SHIFT) |
205 (6 << R300_MS_Y1_SHIFT) |
206 (6 << R300_MS_X2_SHIFT) |
207 (6 << R300_MS_Y2_SHIFT) |
208 (6 << R300_MSBD0_Y_SHIFT) |
209 (6 << R300_MSBD0_X_SHIFT)));
210 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
211 radeon_ring_write(rdev,
212 ((6 << R300_MS_X3_SHIFT) |
213 (6 << R300_MS_Y3_SHIFT) |
214 (6 << R300_MS_X4_SHIFT) |
215 (6 << R300_MS_Y4_SHIFT) |
216 (6 << R300_MS_X5_SHIFT) |
217 (6 << R300_MS_Y5_SHIFT) |
218 (6 << R300_MSBD1_SHIFT)));
219 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
220 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
221 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
222 radeon_ring_write(rdev,
223 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
224 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
225 radeon_ring_write(rdev,
226 R300_GEOMETRY_ROUND_NEAREST |
227 R300_COLOR_ROUND_NEAREST);
228 radeon_ring_unlock_commit(rdev);
229}
230
231void rv515_errata(struct radeon_device *rdev)
232{
233 rdev->pll_errata = 0;
234}
235
236int rv515_mc_wait_for_idle(struct radeon_device *rdev)
237{
238 unsigned i;
239 uint32_t tmp;
240
241 for (i = 0; i < rdev->usec_timeout; i++) {
242 /* read MC_STATUS */
243 tmp = RREG32_MC(RV515_MC_STATUS);
244 if (tmp & RV515_MC_STATUS_IDLE) {
245 return 0;
246 }
247 DRM_UDELAY(1);
248 }
249 return -1;
250}
251
252void rv515_gpu_init(struct radeon_device *rdev)
253{
254 unsigned pipe_select_current, gb_pipe_select, tmp;
255
256 r100_hdp_reset(rdev);
257 r100_rb2d_reset(rdev);
258
259 if (r100_gui_wait_for_idle(rdev)) {
260 printk(KERN_WARNING "Failed to wait GUI idle while "
261 "reseting GPU. Bad things might happen.\n");
262 }
263
264 rs600_disable_vga(rdev);
265
266 r420_pipes_init(rdev);
267 gb_pipe_select = RREG32(0x402C);
268 tmp = RREG32(0x170C);
269 pipe_select_current = (tmp >> 2) & 3;
270 tmp = (1 << pipe_select_current) |
271 (((gb_pipe_select >> 8) & 0xF) << 4);
272 WREG32_PLL(0x000D, tmp);
273 if (r100_gui_wait_for_idle(rdev)) {
274 printk(KERN_WARNING "Failed to wait GUI idle while "
275 "reseting GPU. Bad things might happen.\n");
276 }
277 if (rv515_mc_wait_for_idle(rdev)) {
278 printk(KERN_WARNING "Failed to wait MC idle while "
279 "programming pipes. Bad things might happen.\n");
280 }
281}
282
283int rv515_ga_reset(struct radeon_device *rdev)
284{
285 uint32_t tmp;
286 bool reinit_cp;
287 int i;
288
289 reinit_cp = rdev->cp.ready;
290 rdev->cp.ready = false;
291 for (i = 0; i < rdev->usec_timeout; i++) {
292 WREG32(RADEON_CP_CSQ_MODE, 0);
293 WREG32(RADEON_CP_CSQ_CNTL, 0);
294 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
295 (void)RREG32(RADEON_RBBM_SOFT_RESET);
296 udelay(200);
297 WREG32(RADEON_RBBM_SOFT_RESET, 0);
298 /* Wait to prevent race in RBBM_STATUS */
299 mdelay(1);
300 tmp = RREG32(RADEON_RBBM_STATUS);
301 if (tmp & ((1 << 20) | (1 << 26))) {
302 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp);
303 /* GA still busy soft reset it */
304 WREG32(0x429C, 0x200);
305 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
306 WREG32(0x43E0, 0);
307 WREG32(0x43E4, 0);
308 WREG32(0x24AC, 0);
309 }
310 /* Wait to prevent race in RBBM_STATUS */
311 mdelay(1);
312 tmp = RREG32(RADEON_RBBM_STATUS);
313 if (!(tmp & ((1 << 20) | (1 << 26)))) {
314 break;
315 }
316 }
317 for (i = 0; i < rdev->usec_timeout; i++) {
318 tmp = RREG32(RADEON_RBBM_STATUS);
319 if (!(tmp & ((1 << 20) | (1 << 26)))) {
320 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
321 tmp);
322 DRM_INFO("GA_IDLE=0x%08X\n", RREG32(0x425C));
323 DRM_INFO("RB3D_RESET_STATUS=0x%08X\n", RREG32(0x46f0));
324 DRM_INFO("ISYNC_CNTL=0x%08X\n", RREG32(0x1724));
325 if (reinit_cp) {
326 return r100_cp_init(rdev, rdev->cp.ring_size);
327 }
328 return 0;
329 }
330 DRM_UDELAY(1);
331 }
332 tmp = RREG32(RADEON_RBBM_STATUS);
333 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
334 return -1;
335}
336
337int rv515_gpu_reset(struct radeon_device *rdev)
338{
339 uint32_t status;
340
341 /* reset order likely matter */
342 status = RREG32(RADEON_RBBM_STATUS);
343 /* reset HDP */
344 r100_hdp_reset(rdev);
345 /* reset rb2d */
346 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
347 r100_rb2d_reset(rdev);
348 }
349 /* reset GA */
350 if (status & ((1 << 20) | (1 << 26))) {
351 rv515_ga_reset(rdev);
352 }
353 /* reset CP */
354 status = RREG32(RADEON_RBBM_STATUS);
355 if (status & (1 << 16)) {
356 r100_cp_reset(rdev);
357 }
358 /* Check if GPU is idle */
359 status = RREG32(RADEON_RBBM_STATUS);
360 if (status & (1 << 31)) {
361 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
362 return -1;
363 }
364 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
365 return 0;
366}
367
368
369/*
370 * VRAM info
371 */
372static void rv515_vram_get_type(struct radeon_device *rdev)
373{
374 uint32_t tmp;
375
376 rdev->mc.vram_width = 128;
377 rdev->mc.vram_is_ddr = true;
378 tmp = RREG32_MC(RV515_MC_CNTL);
379 tmp &= RV515_MEM_NUM_CHANNELS_MASK;
380 switch (tmp) {
381 case 0:
382 rdev->mc.vram_width = 64;
383 break;
384 case 1:
385 rdev->mc.vram_width = 128;
386 break;
387 default:
388 rdev->mc.vram_width = 128;
389 break;
390 }
391}
392
393void rv515_vram_info(struct radeon_device *rdev)
394{
395 rv515_vram_get_type(rdev);
396 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
397
398 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
399 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
400}
401
402
403/*
404 * Indirect registers accessor
405 */
406uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
407{
408 uint32_t r;
409
410 WREG32(R520_MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
411 r = RREG32(R520_MC_IND_DATA);
412 WREG32(R520_MC_IND_INDEX, 0);
413 return r;
414}
415
416void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
417{
418 WREG32(R520_MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
419 WREG32(R520_MC_IND_DATA, (v));
420 WREG32(R520_MC_IND_INDEX, 0);
421}
422
423uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
424{
425 uint32_t r;
426
427 WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff));
428 (void)RREG32(RADEON_PCIE_INDEX);
429 r = RREG32(RADEON_PCIE_DATA);
430 return r;
431}
432
433void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
434{
435 WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff));
436 (void)RREG32(RADEON_PCIE_INDEX);
437 WREG32(RADEON_PCIE_DATA, (v));
438 (void)RREG32(RADEON_PCIE_DATA);
439}
440
441
442/*
443 * Debugfs info
444 */
445#if defined(CONFIG_DEBUG_FS)
446static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
447{
448 struct drm_info_node *node = (struct drm_info_node *) m->private;
449 struct drm_device *dev = node->minor->dev;
450 struct radeon_device *rdev = dev->dev_private;
451 uint32_t tmp;
452
453 tmp = RREG32(R400_GB_PIPE_SELECT);
454 seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
455 tmp = RREG32(R500_SU_REG_DEST);
456 seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp);
457 tmp = RREG32(R300_GB_TILE_CONFIG);
458 seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
459 tmp = RREG32(R300_DST_PIPE_CONFIG);
460 seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
461 return 0;
462}
463
464static int rv515_debugfs_ga_info(struct seq_file *m, void *data)
465{
466 struct drm_info_node *node = (struct drm_info_node *) m->private;
467 struct drm_device *dev = node->minor->dev;
468 struct radeon_device *rdev = dev->dev_private;
469 uint32_t tmp;
470
471 tmp = RREG32(0x2140);
472 seq_printf(m, "VAP_CNTL_STATUS 0x%08x\n", tmp);
473 radeon_gpu_reset(rdev);
474 tmp = RREG32(0x425C);
475 seq_printf(m, "GA_IDLE 0x%08x\n", tmp);
476 return 0;
477}
478
479static struct drm_info_list rv515_pipes_info_list[] = {
480 {"rv515_pipes_info", rv515_debugfs_pipes_info, 0, NULL},
481};
482
483static struct drm_info_list rv515_ga_info_list[] = {
484 {"rv515_ga_info", rv515_debugfs_ga_info, 0, NULL},
485};
486#endif
487
488int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
489{
490#if defined(CONFIG_DEBUG_FS)
491 return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
492#else
493 return 0;
494#endif
495}
496
497int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
498{
499#if defined(CONFIG_DEBUG_FS)
500 return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
501#else
502 return 0;
503#endif
504}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
new file mode 100644
index 000000000000..da50cc51ede3
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -0,0 +1,124 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h"
31
32/* rv770,rv730,rv710 depends on : */
33void rs600_mc_disable_clients(struct radeon_device *rdev);
34
35/* This files gather functions specifics to:
36 * rv770,rv730,rv710
37 *
38 * Some of these functions might be used by newer ASICs.
39 */
40int rv770_mc_wait_for_idle(struct radeon_device *rdev);
41void rv770_gpu_init(struct radeon_device *rdev);
42
43
44/*
45 * MC
46 */
47int rv770_mc_init(struct radeon_device *rdev)
48{
49 uint32_t tmp;
50
51 rv770_gpu_init(rdev);
52
53 /* setup the gart before changing location so we can ask to
54 * discard unmapped mc request
55 */
56 /* FIXME: disable out of gart access */
57 tmp = rdev->mc.gtt_location / 4096;
58 tmp = REG_SET(R700_LOGICAL_PAGE_NUMBER, tmp);
59 WREG32(R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR, tmp);
60 tmp = (rdev->mc.gtt_location + rdev->mc.gtt_size) / 4096;
61 tmp = REG_SET(R700_LOGICAL_PAGE_NUMBER, tmp);
62 WREG32(R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, tmp);
63
64 rs600_mc_disable_clients(rdev);
65 if (rv770_mc_wait_for_idle(rdev)) {
66 printk(KERN_WARNING "Failed to wait MC idle while "
67 "programming pipes. Bad things might happen.\n");
68 }
69
70 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
71 tmp = REG_SET(R700_MC_FB_TOP, tmp >> 24);
72 tmp |= REG_SET(R700_MC_FB_BASE, rdev->mc.vram_location >> 24);
73 WREG32(R700_MC_VM_FB_LOCATION, tmp);
74 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
75 tmp = REG_SET(R700_MC_AGP_TOP, tmp >> 22);
76 WREG32(R700_MC_VM_AGP_TOP, tmp);
77 tmp = REG_SET(R700_MC_AGP_BOT, rdev->mc.gtt_location >> 22);
78 WREG32(R700_MC_VM_AGP_BOT, tmp);
79 return 0;
80}
81
82void rv770_mc_fini(struct radeon_device *rdev)
83{
84 /* FIXME: implement */
85}
86
87
88/*
89 * Global GPU functions
90 */
91void rv770_errata(struct radeon_device *rdev)
92{
93 rdev->pll_errata = 0;
94}
95
96int rv770_mc_wait_for_idle(struct radeon_device *rdev)
97{
98 /* FIXME: implement */
99 return 0;
100}
101
102void rv770_gpu_init(struct radeon_device *rdev)
103{
104 /* FIXME: implement */
105}
106
107
108/*
109 * VRAM info
110 */
111void rv770_vram_get_type(struct radeon_device *rdev)
112{
113 /* FIXME: implement */
114}
115
116void rv770_vram_info(struct radeon_device *rdev)
117{
118 rv770_vram_get_type(rdev);
119
120 /* FIXME: implement */
121 /* Could aper size report 0 ? */
122 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
123 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
124}
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
new file mode 100644
index 000000000000..b0a9de7a57c2
--- /dev/null
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3
4ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
7
8obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
new file mode 100644
index 000000000000..e8f6d2229d8c
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -0,0 +1,150 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 * Keith Packard.
30 */
31
32#include "ttm/ttm_module.h"
33#include "ttm/ttm_bo_driver.h"
34#ifdef TTM_HAS_AGP
35#include "ttm/ttm_placement.h"
36#include <linux/agp_backend.h>
37#include <linux/module.h>
38#include <linux/io.h>
39#include <asm/agp.h>
40
41struct ttm_agp_backend {
42 struct ttm_backend backend;
43 struct agp_memory *mem;
44 struct agp_bridge_data *bridge;
45};
46
47static int ttm_agp_populate(struct ttm_backend *backend,
48 unsigned long num_pages, struct page **pages,
49 struct page *dummy_read_page)
50{
51 struct ttm_agp_backend *agp_be =
52 container_of(backend, struct ttm_agp_backend, backend);
53 struct page **cur_page, **last_page = pages + num_pages;
54 struct agp_memory *mem;
55
56 mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
57 if (unlikely(mem == NULL))
58 return -ENOMEM;
59
60 mem->page_count = 0;
61 for (cur_page = pages; cur_page < last_page; ++cur_page) {
62 struct page *page = *cur_page;
63 if (!page)
64 page = dummy_read_page;
65
66 mem->memory[mem->page_count++] =
67 phys_to_gart(page_to_phys(page));
68 }
69 agp_be->mem = mem;
70 return 0;
71}
72
73static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
74{
75 struct ttm_agp_backend *agp_be =
76 container_of(backend, struct ttm_agp_backend, backend);
77 struct agp_memory *mem = agp_be->mem;
78 int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
79 int ret;
80
81 mem->is_flushed = 1;
82 mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
83
84 ret = agp_bind_memory(mem, bo_mem->mm_node->start);
85 if (ret)
86 printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
87
88 return ret;
89}
90
91static int ttm_agp_unbind(struct ttm_backend *backend)
92{
93 struct ttm_agp_backend *agp_be =
94 container_of(backend, struct ttm_agp_backend, backend);
95
96 if (agp_be->mem->is_bound)
97 return agp_unbind_memory(agp_be->mem);
98 else
99 return 0;
100}
101
102static void ttm_agp_clear(struct ttm_backend *backend)
103{
104 struct ttm_agp_backend *agp_be =
105 container_of(backend, struct ttm_agp_backend, backend);
106 struct agp_memory *mem = agp_be->mem;
107
108 if (mem) {
109 ttm_agp_unbind(backend);
110 agp_free_memory(mem);
111 }
112 agp_be->mem = NULL;
113}
114
115static void ttm_agp_destroy(struct ttm_backend *backend)
116{
117 struct ttm_agp_backend *agp_be =
118 container_of(backend, struct ttm_agp_backend, backend);
119
120 if (agp_be->mem)
121 ttm_agp_clear(backend);
122 kfree(agp_be);
123}
124
125static struct ttm_backend_func ttm_agp_func = {
126 .populate = ttm_agp_populate,
127 .clear = ttm_agp_clear,
128 .bind = ttm_agp_bind,
129 .unbind = ttm_agp_unbind,
130 .destroy = ttm_agp_destroy,
131};
132
133struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
134 struct agp_bridge_data *bridge)
135{
136 struct ttm_agp_backend *agp_be;
137
138 agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
139 if (!agp_be)
140 return NULL;
141
142 agp_be->mem = NULL;
143 agp_be->bridge = bridge;
144 agp_be->backend.func = &ttm_agp_func;
145 agp_be->backend.bdev = bdev;
146 return &agp_be->backend;
147}
148EXPORT_SYMBOL(ttm_agp_backend_init);
149
150#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
new file mode 100644
index 000000000000..1587aeca7bea
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -0,0 +1,1698 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h"
33#include "ttm/ttm_placement.h"
34#include <linux/jiffies.h>
35#include <linux/slab.h>
36#include <linux/sched.h>
37#include <linux/mm.h>
38#include <linux/file.h>
39#include <linux/module.h>
40
41#define TTM_ASSERT_LOCKED(param)
42#define TTM_DEBUG(fmt, arg...)
43#define TTM_BO_HASH_ORDER 13
44
45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
47static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48
49static inline uint32_t ttm_bo_type_flags(unsigned type)
50{
51 return 1 << (type);
52}
53
54static void ttm_bo_release_list(struct kref *list_kref)
55{
56 struct ttm_buffer_object *bo =
57 container_of(list_kref, struct ttm_buffer_object, list_kref);
58 struct ttm_bo_device *bdev = bo->bdev;
59
60 BUG_ON(atomic_read(&bo->list_kref.refcount));
61 BUG_ON(atomic_read(&bo->kref.refcount));
62 BUG_ON(atomic_read(&bo->cpu_writers));
63 BUG_ON(bo->sync_obj != NULL);
64 BUG_ON(bo->mem.mm_node != NULL);
65 BUG_ON(!list_empty(&bo->lru));
66 BUG_ON(!list_empty(&bo->ddestroy));
67
68 if (bo->ttm)
69 ttm_tt_destroy(bo->ttm);
70 if (bo->destroy)
71 bo->destroy(bo);
72 else {
73 ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
74 kfree(bo);
75 }
76}
77
78int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
79{
80
81 if (interruptible) {
82 int ret = 0;
83
84 ret = wait_event_interruptible(bo->event_queue,
85 atomic_read(&bo->reserved) == 0);
86 if (unlikely(ret != 0))
87 return -ERESTART;
88 } else {
89 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
90 }
91 return 0;
92}
93
94static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
95{
96 struct ttm_bo_device *bdev = bo->bdev;
97 struct ttm_mem_type_manager *man;
98
99 BUG_ON(!atomic_read(&bo->reserved));
100
101 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
102
103 BUG_ON(!list_empty(&bo->lru));
104
105 man = &bdev->man[bo->mem.mem_type];
106 list_add_tail(&bo->lru, &man->lru);
107 kref_get(&bo->list_kref);
108
109 if (bo->ttm != NULL) {
110 list_add_tail(&bo->swap, &bdev->swap_lru);
111 kref_get(&bo->list_kref);
112 }
113 }
114}
115
116/**
117 * Call with the lru_lock held.
118 */
119
120static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
121{
122 int put_count = 0;
123
124 if (!list_empty(&bo->swap)) {
125 list_del_init(&bo->swap);
126 ++put_count;
127 }
128 if (!list_empty(&bo->lru)) {
129 list_del_init(&bo->lru);
130 ++put_count;
131 }
132
133 /*
134 * TODO: Add a driver hook to delete from
135 * driver-specific LRU's here.
136 */
137
138 return put_count;
139}
140
141int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
142 bool interruptible,
143 bool no_wait, bool use_sequence, uint32_t sequence)
144{
145 struct ttm_bo_device *bdev = bo->bdev;
146 int ret;
147
148 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
149 if (use_sequence && bo->seq_valid &&
150 (sequence - bo->val_seq < (1 << 31))) {
151 return -EAGAIN;
152 }
153
154 if (no_wait)
155 return -EBUSY;
156
157 spin_unlock(&bdev->lru_lock);
158 ret = ttm_bo_wait_unreserved(bo, interruptible);
159 spin_lock(&bdev->lru_lock);
160
161 if (unlikely(ret))
162 return ret;
163 }
164
165 if (use_sequence) {
166 bo->val_seq = sequence;
167 bo->seq_valid = true;
168 } else {
169 bo->seq_valid = false;
170 }
171
172 return 0;
173}
174EXPORT_SYMBOL(ttm_bo_reserve);
175
176static void ttm_bo_ref_bug(struct kref *list_kref)
177{
178 BUG();
179}
180
181int ttm_bo_reserve(struct ttm_buffer_object *bo,
182 bool interruptible,
183 bool no_wait, bool use_sequence, uint32_t sequence)
184{
185 struct ttm_bo_device *bdev = bo->bdev;
186 int put_count = 0;
187 int ret;
188
189 spin_lock(&bdev->lru_lock);
190 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
191 sequence);
192 if (likely(ret == 0))
193 put_count = ttm_bo_del_from_lru(bo);
194 spin_unlock(&bdev->lru_lock);
195
196 while (put_count--)
197 kref_put(&bo->list_kref, ttm_bo_ref_bug);
198
199 return ret;
200}
201
202void ttm_bo_unreserve(struct ttm_buffer_object *bo)
203{
204 struct ttm_bo_device *bdev = bo->bdev;
205
206 spin_lock(&bdev->lru_lock);
207 ttm_bo_add_to_lru(bo);
208 atomic_set(&bo->reserved, 0);
209 wake_up_all(&bo->event_queue);
210 spin_unlock(&bdev->lru_lock);
211}
212EXPORT_SYMBOL(ttm_bo_unreserve);
213
214/*
215 * Call bo->mutex locked.
216 */
217
218static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
219{
220 struct ttm_bo_device *bdev = bo->bdev;
221 int ret = 0;
222 uint32_t page_flags = 0;
223
224 TTM_ASSERT_LOCKED(&bo->mutex);
225 bo->ttm = NULL;
226
227 switch (bo->type) {
228 case ttm_bo_type_device:
229 if (zero_alloc)
230 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
231 case ttm_bo_type_kernel:
232 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
233 page_flags, bdev->dummy_read_page);
234 if (unlikely(bo->ttm == NULL))
235 ret = -ENOMEM;
236 break;
237 case ttm_bo_type_user:
238 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
239 page_flags | TTM_PAGE_FLAG_USER,
240 bdev->dummy_read_page);
241 if (unlikely(bo->ttm == NULL))
242 ret = -ENOMEM;
243 break;
244
245 ret = ttm_tt_set_user(bo->ttm, current,
246 bo->buffer_start, bo->num_pages);
247 if (unlikely(ret != 0))
248 ttm_tt_destroy(bo->ttm);
249 break;
250 default:
251 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
252 ret = -EINVAL;
253 break;
254 }
255
256 return ret;
257}
258
259static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
260 struct ttm_mem_reg *mem,
261 bool evict, bool interruptible, bool no_wait)
262{
263 struct ttm_bo_device *bdev = bo->bdev;
264 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
265 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
266 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
267 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
268 int ret = 0;
269
270 if (old_is_pci || new_is_pci ||
271 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
272 ttm_bo_unmap_virtual(bo);
273
274 /*
275 * Create and bind a ttm if required.
276 */
277
278 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
279 ret = ttm_bo_add_ttm(bo, false);
280 if (ret)
281 goto out_err;
282
283 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
284 if (ret)
285 return ret;
286
287 if (mem->mem_type != TTM_PL_SYSTEM) {
288 ret = ttm_tt_bind(bo->ttm, mem);
289 if (ret)
290 goto out_err;
291 }
292
293 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
294
295 struct ttm_mem_reg *old_mem = &bo->mem;
296 uint32_t save_flags = old_mem->placement;
297
298 *old_mem = *mem;
299 mem->mm_node = NULL;
300 ttm_flag_masked(&save_flags, mem->placement,
301 TTM_PL_MASK_MEMTYPE);
302 goto moved;
303 }
304
305 }
306
307 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
308 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
309 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
310 else if (bdev->driver->move)
311 ret = bdev->driver->move(bo, evict, interruptible,
312 no_wait, mem);
313 else
314 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
315
316 if (ret)
317 goto out_err;
318
319moved:
320 if (bo->evicted) {
321 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
322 if (ret)
323 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
324 bo->evicted = false;
325 }
326
327 if (bo->mem.mm_node) {
328 spin_lock(&bo->lock);
329 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
330 bdev->man[bo->mem.mem_type].gpu_offset;
331 bo->cur_placement = bo->mem.placement;
332 spin_unlock(&bo->lock);
333 }
334
335 return 0;
336
337out_err:
338 new_man = &bdev->man[bo->mem.mem_type];
339 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
340 ttm_tt_unbind(bo->ttm);
341 ttm_tt_destroy(bo->ttm);
342 bo->ttm = NULL;
343 }
344
345 return ret;
346}
347
348/**
349 * If bo idle, remove from delayed- and lru lists, and unref.
350 * If not idle, and already on delayed list, do nothing.
351 * If not idle, and not on delayed list, put on delayed list,
352 * up the list_kref and schedule a delayed list check.
353 */
354
355static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
356{
357 struct ttm_bo_device *bdev = bo->bdev;
358 struct ttm_bo_driver *driver = bdev->driver;
359 int ret;
360
361 spin_lock(&bo->lock);
362 (void) ttm_bo_wait(bo, false, false, !remove_all);
363
364 if (!bo->sync_obj) {
365 int put_count;
366
367 spin_unlock(&bo->lock);
368
369 spin_lock(&bdev->lru_lock);
370 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
371 BUG_ON(ret);
372 if (bo->ttm)
373 ttm_tt_unbind(bo->ttm);
374
375 if (!list_empty(&bo->ddestroy)) {
376 list_del_init(&bo->ddestroy);
377 kref_put(&bo->list_kref, ttm_bo_ref_bug);
378 }
379 if (bo->mem.mm_node) {
380 drm_mm_put_block(bo->mem.mm_node);
381 bo->mem.mm_node = NULL;
382 }
383 put_count = ttm_bo_del_from_lru(bo);
384 spin_unlock(&bdev->lru_lock);
385
386 atomic_set(&bo->reserved, 0);
387
388 while (put_count--)
389 kref_put(&bo->list_kref, ttm_bo_release_list);
390
391 return 0;
392 }
393
394 spin_lock(&bdev->lru_lock);
395 if (list_empty(&bo->ddestroy)) {
396 void *sync_obj = bo->sync_obj;
397 void *sync_obj_arg = bo->sync_obj_arg;
398
399 kref_get(&bo->list_kref);
400 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
401 spin_unlock(&bdev->lru_lock);
402 spin_unlock(&bo->lock);
403
404 if (sync_obj)
405 driver->sync_obj_flush(sync_obj, sync_obj_arg);
406 schedule_delayed_work(&bdev->wq,
407 ((HZ / 100) < 1) ? 1 : HZ / 100);
408 ret = 0;
409
410 } else {
411 spin_unlock(&bdev->lru_lock);
412 spin_unlock(&bo->lock);
413 ret = -EBUSY;
414 }
415
416 return ret;
417}
418
419/**
420 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
421 * encountered buffers.
422 */
423
424static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
425{
426 struct ttm_buffer_object *entry, *nentry;
427 struct list_head *list, *next;
428 int ret;
429
430 spin_lock(&bdev->lru_lock);
431 list_for_each_safe(list, next, &bdev->ddestroy) {
432 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
433 nentry = NULL;
434
435 /*
436 * Protect the next list entry from destruction while we
437 * unlock the lru_lock.
438 */
439
440 if (next != &bdev->ddestroy) {
441 nentry = list_entry(next, struct ttm_buffer_object,
442 ddestroy);
443 kref_get(&nentry->list_kref);
444 }
445 kref_get(&entry->list_kref);
446
447 spin_unlock(&bdev->lru_lock);
448 ret = ttm_bo_cleanup_refs(entry, remove_all);
449 kref_put(&entry->list_kref, ttm_bo_release_list);
450
451 spin_lock(&bdev->lru_lock);
452 if (nentry) {
453 bool next_onlist = !list_empty(next);
454 spin_unlock(&bdev->lru_lock);
455 kref_put(&nentry->list_kref, ttm_bo_release_list);
456 spin_lock(&bdev->lru_lock);
457 /*
458 * Someone might have raced us and removed the
459 * next entry from the list. We don't bother restarting
460 * list traversal.
461 */
462
463 if (!next_onlist)
464 break;
465 }
466 if (ret)
467 break;
468 }
469 ret = !list_empty(&bdev->ddestroy);
470 spin_unlock(&bdev->lru_lock);
471
472 return ret;
473}
474
475static void ttm_bo_delayed_workqueue(struct work_struct *work)
476{
477 struct ttm_bo_device *bdev =
478 container_of(work, struct ttm_bo_device, wq.work);
479
480 if (ttm_bo_delayed_delete(bdev, false)) {
481 schedule_delayed_work(&bdev->wq,
482 ((HZ / 100) < 1) ? 1 : HZ / 100);
483 }
484}
485
486static void ttm_bo_release(struct kref *kref)
487{
488 struct ttm_buffer_object *bo =
489 container_of(kref, struct ttm_buffer_object, kref);
490 struct ttm_bo_device *bdev = bo->bdev;
491
492 if (likely(bo->vm_node != NULL)) {
493 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
494 drm_mm_put_block(bo->vm_node);
495 bo->vm_node = NULL;
496 }
497 write_unlock(&bdev->vm_lock);
498 ttm_bo_cleanup_refs(bo, false);
499 kref_put(&bo->list_kref, ttm_bo_release_list);
500 write_lock(&bdev->vm_lock);
501}
502
503void ttm_bo_unref(struct ttm_buffer_object **p_bo)
504{
505 struct ttm_buffer_object *bo = *p_bo;
506 struct ttm_bo_device *bdev = bo->bdev;
507
508 *p_bo = NULL;
509 write_lock(&bdev->vm_lock);
510 kref_put(&bo->kref, ttm_bo_release);
511 write_unlock(&bdev->vm_lock);
512}
513EXPORT_SYMBOL(ttm_bo_unref);
514
515static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
516 bool interruptible, bool no_wait)
517{
518 int ret = 0;
519 struct ttm_bo_device *bdev = bo->bdev;
520 struct ttm_mem_reg evict_mem;
521 uint32_t proposed_placement;
522
523 if (bo->mem.mem_type != mem_type)
524 goto out;
525
526 spin_lock(&bo->lock);
527 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
528 spin_unlock(&bo->lock);
529
530 if (ret && ret != -ERESTART) {
531 printk(KERN_ERR TTM_PFX "Failed to expire sync object before "
532 "buffer eviction.\n");
533 goto out;
534 }
535
536 BUG_ON(!atomic_read(&bo->reserved));
537
538 evict_mem = bo->mem;
539 evict_mem.mm_node = NULL;
540
541 proposed_placement = bdev->driver->evict_flags(bo);
542
543 ret = ttm_bo_mem_space(bo, proposed_placement,
544 &evict_mem, interruptible, no_wait);
545 if (unlikely(ret != 0 && ret != -ERESTART))
546 ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
547 &evict_mem, interruptible, no_wait);
548
549 if (ret) {
550 if (ret != -ERESTART)
551 printk(KERN_ERR TTM_PFX
552 "Failed to find memory space for "
553 "buffer 0x%p eviction.\n", bo);
554 goto out;
555 }
556
557 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
558 no_wait);
559 if (ret) {
560 if (ret != -ERESTART)
561 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
562 goto out;
563 }
564
565 spin_lock(&bdev->lru_lock);
566 if (evict_mem.mm_node) {
567 drm_mm_put_block(evict_mem.mm_node);
568 evict_mem.mm_node = NULL;
569 }
570 spin_unlock(&bdev->lru_lock);
571 bo->evicted = true;
572out:
573 return ret;
574}
575
576/**
577 * Repeatedly evict memory from the LRU for @mem_type until we create enough
578 * space, or we've evicted everything and there isn't enough space.
579 */
580static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
581 struct ttm_mem_reg *mem,
582 uint32_t mem_type,
583 bool interruptible, bool no_wait)
584{
585 struct drm_mm_node *node;
586 struct ttm_buffer_object *entry;
587 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
588 struct list_head *lru;
589 unsigned long num_pages = mem->num_pages;
590 int put_count = 0;
591 int ret;
592
593retry_pre_get:
594 ret = drm_mm_pre_get(&man->manager);
595 if (unlikely(ret != 0))
596 return ret;
597
598 spin_lock(&bdev->lru_lock);
599 do {
600 node = drm_mm_search_free(&man->manager, num_pages,
601 mem->page_alignment, 1);
602 if (node)
603 break;
604
605 lru = &man->lru;
606 if (list_empty(lru))
607 break;
608
609 entry = list_first_entry(lru, struct ttm_buffer_object, lru);
610 kref_get(&entry->list_kref);
611
612 ret =
613 ttm_bo_reserve_locked(entry, interruptible, no_wait,
614 false, 0);
615
616 if (likely(ret == 0))
617 put_count = ttm_bo_del_from_lru(entry);
618
619 spin_unlock(&bdev->lru_lock);
620
621 if (unlikely(ret != 0))
622 return ret;
623
624 while (put_count--)
625 kref_put(&entry->list_kref, ttm_bo_ref_bug);
626
627 ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
628
629 ttm_bo_unreserve(entry);
630
631 kref_put(&entry->list_kref, ttm_bo_release_list);
632 if (ret)
633 return ret;
634
635 spin_lock(&bdev->lru_lock);
636 } while (1);
637
638 if (!node) {
639 spin_unlock(&bdev->lru_lock);
640 return -ENOMEM;
641 }
642
643 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
644 if (unlikely(!node)) {
645 spin_unlock(&bdev->lru_lock);
646 goto retry_pre_get;
647 }
648
649 spin_unlock(&bdev->lru_lock);
650 mem->mm_node = node;
651 mem->mem_type = mem_type;
652 return 0;
653}
654
655static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
656 bool disallow_fixed,
657 uint32_t mem_type,
658 uint32_t mask, uint32_t *res_mask)
659{
660 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
661
662 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
663 return false;
664
665 if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
666 return false;
667
668 if ((mask & man->available_caching) == 0)
669 return false;
670 if (mask & man->default_caching)
671 cur_flags |= man->default_caching;
672 else if (mask & TTM_PL_FLAG_CACHED)
673 cur_flags |= TTM_PL_FLAG_CACHED;
674 else if (mask & TTM_PL_FLAG_WC)
675 cur_flags |= TTM_PL_FLAG_WC;
676 else
677 cur_flags |= TTM_PL_FLAG_UNCACHED;
678
679 *res_mask = cur_flags;
680 return true;
681}
682
683/**
684 * Creates space for memory region @mem according to its type.
685 *
686 * This function first searches for free space in compatible memory types in
687 * the priority order defined by the driver. If free space isn't found, then
688 * ttm_bo_mem_force_space is attempted in priority order to evict and find
689 * space.
690 */
691int ttm_bo_mem_space(struct ttm_buffer_object *bo,
692 uint32_t proposed_placement,
693 struct ttm_mem_reg *mem,
694 bool interruptible, bool no_wait)
695{
696 struct ttm_bo_device *bdev = bo->bdev;
697 struct ttm_mem_type_manager *man;
698
699 uint32_t num_prios = bdev->driver->num_mem_type_prio;
700 const uint32_t *prios = bdev->driver->mem_type_prio;
701 uint32_t i;
702 uint32_t mem_type = TTM_PL_SYSTEM;
703 uint32_t cur_flags = 0;
704 bool type_found = false;
705 bool type_ok = false;
706 bool has_eagain = false;
707 struct drm_mm_node *node = NULL;
708 int ret;
709
710 mem->mm_node = NULL;
711 for (i = 0; i < num_prios; ++i) {
712 mem_type = prios[i];
713 man = &bdev->man[mem_type];
714
715 type_ok = ttm_bo_mt_compatible(man,
716 bo->type == ttm_bo_type_user,
717 mem_type, proposed_placement,
718 &cur_flags);
719
720 if (!type_ok)
721 continue;
722
723 if (mem_type == TTM_PL_SYSTEM)
724 break;
725
726 if (man->has_type && man->use_type) {
727 type_found = true;
728 do {
729 ret = drm_mm_pre_get(&man->manager);
730 if (unlikely(ret))
731 return ret;
732
733 spin_lock(&bdev->lru_lock);
734 node = drm_mm_search_free(&man->manager,
735 mem->num_pages,
736 mem->page_alignment,
737 1);
738 if (unlikely(!node)) {
739 spin_unlock(&bdev->lru_lock);
740 break;
741 }
742 node = drm_mm_get_block_atomic(node,
743 mem->num_pages,
744 mem->
745 page_alignment);
746 spin_unlock(&bdev->lru_lock);
747 } while (!node);
748 }
749 if (node)
750 break;
751 }
752
753 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
754 mem->mm_node = node;
755 mem->mem_type = mem_type;
756 mem->placement = cur_flags;
757 return 0;
758 }
759
760 if (!type_found)
761 return -EINVAL;
762
763 num_prios = bdev->driver->num_mem_busy_prio;
764 prios = bdev->driver->mem_busy_prio;
765
766 for (i = 0; i < num_prios; ++i) {
767 mem_type = prios[i];
768 man = &bdev->man[mem_type];
769
770 if (!man->has_type)
771 continue;
772
773 if (!ttm_bo_mt_compatible(man,
774 bo->type == ttm_bo_type_user,
775 mem_type,
776 proposed_placement, &cur_flags))
777 continue;
778
779 ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
780 interruptible, no_wait);
781
782 if (ret == 0 && mem->mm_node) {
783 mem->placement = cur_flags;
784 return 0;
785 }
786
787 if (ret == -ERESTART)
788 has_eagain = true;
789 }
790
791 ret = (has_eagain) ? -ERESTART : -ENOMEM;
792 return ret;
793}
794EXPORT_SYMBOL(ttm_bo_mem_space);
795
796int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
797{
798 int ret = 0;
799
800 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
801 return -EBUSY;
802
803 ret = wait_event_interruptible(bo->event_queue,
804 atomic_read(&bo->cpu_writers) == 0);
805
806 if (ret == -ERESTARTSYS)
807 ret = -ERESTART;
808
809 return ret;
810}
811
812int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
813 uint32_t proposed_placement,
814 bool interruptible, bool no_wait)
815{
816 struct ttm_bo_device *bdev = bo->bdev;
817 int ret = 0;
818 struct ttm_mem_reg mem;
819
820 BUG_ON(!atomic_read(&bo->reserved));
821
822 /*
823 * FIXME: It's possible to pipeline buffer moves.
824 * Have the driver move function wait for idle when necessary,
825 * instead of doing it here.
826 */
827
828 spin_lock(&bo->lock);
829 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
830 spin_unlock(&bo->lock);
831
832 if (ret)
833 return ret;
834
835 mem.num_pages = bo->num_pages;
836 mem.size = mem.num_pages << PAGE_SHIFT;
837 mem.page_alignment = bo->mem.page_alignment;
838
839 /*
840 * Determine where to move the buffer.
841 */
842
843 ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
844 interruptible, no_wait);
845 if (ret)
846 goto out_unlock;
847
848 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
849
850out_unlock:
851 if (ret && mem.mm_node) {
852 spin_lock(&bdev->lru_lock);
853 drm_mm_put_block(mem.mm_node);
854 spin_unlock(&bdev->lru_lock);
855 }
856 return ret;
857}
858
859static int ttm_bo_mem_compat(uint32_t proposed_placement,
860 struct ttm_mem_reg *mem)
861{
862 if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
863 return 0;
864 if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
865 return 0;
866
867 return 1;
868}
869
870int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
871 uint32_t proposed_placement,
872 bool interruptible, bool no_wait)
873{
874 int ret;
875
876 BUG_ON(!atomic_read(&bo->reserved));
877 bo->proposed_placement = proposed_placement;
878
879 TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
880 (unsigned long)proposed_placement,
881 (unsigned long)bo->mem.placement);
882
883 /*
884 * Check whether we need to move buffer.
885 */
886
887 if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
888 ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
889 interruptible, no_wait);
890 if (ret) {
891 if (ret != -ERESTART)
892 printk(KERN_ERR TTM_PFX
893 "Failed moving buffer. "
894 "Proposed placement 0x%08x\n",
895 bo->proposed_placement);
896 if (ret == -ENOMEM)
897 printk(KERN_ERR TTM_PFX
898 "Out of aperture space or "
899 "DRM memory quota.\n");
900 return ret;
901 }
902 }
903
904 /*
905 * We might need to add a TTM.
906 */
907
908 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
909 ret = ttm_bo_add_ttm(bo, true);
910 if (ret)
911 return ret;
912 }
913 /*
914 * Validation has succeeded, move the access and other
915 * non-mapping-related flag bits from the proposed flags to
916 * the active flags
917 */
918
919 ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
920 ~TTM_PL_MASK_MEMTYPE);
921
922 return 0;
923}
924EXPORT_SYMBOL(ttm_buffer_object_validate);
925
926int
927ttm_bo_check_placement(struct ttm_buffer_object *bo,
928 uint32_t set_flags, uint32_t clr_flags)
929{
930 uint32_t new_mask = set_flags | clr_flags;
931
932 if ((bo->type == ttm_bo_type_user) &&
933 (clr_flags & TTM_PL_FLAG_CACHED)) {
934 printk(KERN_ERR TTM_PFX
935 "User buffers require cache-coherent memory.\n");
936 return -EINVAL;
937 }
938
939 if (!capable(CAP_SYS_ADMIN)) {
940 if (new_mask & TTM_PL_FLAG_NO_EVICT) {
941 printk(KERN_ERR TTM_PFX "Need to be root to modify"
942 " NO_EVICT status.\n");
943 return -EINVAL;
944 }
945
946 if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
947 (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
948 printk(KERN_ERR TTM_PFX
949 "Incompatible memory specification"
950 " for NO_EVICT buffer.\n");
951 return -EINVAL;
952 }
953 }
954 return 0;
955}
956
957int ttm_buffer_object_init(struct ttm_bo_device *bdev,
958 struct ttm_buffer_object *bo,
959 unsigned long size,
960 enum ttm_bo_type type,
961 uint32_t flags,
962 uint32_t page_alignment,
963 unsigned long buffer_start,
964 bool interruptible,
965 struct file *persistant_swap_storage,
966 size_t acc_size,
967 void (*destroy) (struct ttm_buffer_object *))
968{
969 int ret = 0;
970 unsigned long num_pages;
971
972 size += buffer_start & ~PAGE_MASK;
973 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
974 if (num_pages == 0) {
975 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
976 return -EINVAL;
977 }
978 bo->destroy = destroy;
979
980 spin_lock_init(&bo->lock);
981 kref_init(&bo->kref);
982 kref_init(&bo->list_kref);
983 atomic_set(&bo->cpu_writers, 0);
984 atomic_set(&bo->reserved, 1);
985 init_waitqueue_head(&bo->event_queue);
986 INIT_LIST_HEAD(&bo->lru);
987 INIT_LIST_HEAD(&bo->ddestroy);
988 INIT_LIST_HEAD(&bo->swap);
989 bo->bdev = bdev;
990 bo->type = type;
991 bo->num_pages = num_pages;
992 bo->mem.mem_type = TTM_PL_SYSTEM;
993 bo->mem.num_pages = bo->num_pages;
994 bo->mem.mm_node = NULL;
995 bo->mem.page_alignment = page_alignment;
996 bo->buffer_start = buffer_start & PAGE_MASK;
997 bo->priv_flags = 0;
998 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
999 bo->seq_valid = false;
1000 bo->persistant_swap_storage = persistant_swap_storage;
1001 bo->acc_size = acc_size;
1002
1003 ret = ttm_bo_check_placement(bo, flags, 0ULL);
1004 if (unlikely(ret != 0))
1005 goto out_err;
1006
1007 /*
1008 * If no caching attributes are set, accept any form of caching.
1009 */
1010
1011 if ((flags & TTM_PL_MASK_CACHING) == 0)
1012 flags |= TTM_PL_MASK_CACHING;
1013
1014 /*
1015 * For ttm_bo_type_device buffers, allocate
1016 * address space from the device.
1017 */
1018
1019 if (bo->type == ttm_bo_type_device) {
1020 ret = ttm_bo_setup_vm(bo);
1021 if (ret)
1022 goto out_err;
1023 }
1024
1025 ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
1026 if (ret)
1027 goto out_err;
1028
1029 ttm_bo_unreserve(bo);
1030 return 0;
1031
1032out_err:
1033 ttm_bo_unreserve(bo);
1034 ttm_bo_unref(&bo);
1035
1036 return ret;
1037}
1038EXPORT_SYMBOL(ttm_buffer_object_init);
1039
1040static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
1041 unsigned long num_pages)
1042{
1043 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1044 PAGE_MASK;
1045
1046 return bdev->ttm_bo_size + 2 * page_array_size;
1047}
1048
1049int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1050 unsigned long size,
1051 enum ttm_bo_type type,
1052 uint32_t flags,
1053 uint32_t page_alignment,
1054 unsigned long buffer_start,
1055 bool interruptible,
1056 struct file *persistant_swap_storage,
1057 struct ttm_buffer_object **p_bo)
1058{
1059 struct ttm_buffer_object *bo;
1060 int ret;
1061 struct ttm_mem_global *mem_glob = bdev->mem_glob;
1062
1063 size_t acc_size =
1064 ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1065 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
1066 if (unlikely(ret != 0))
1067 return ret;
1068
1069 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1070
1071 if (unlikely(bo == NULL)) {
1072 ttm_mem_global_free(mem_glob, acc_size, false);
1073 return -ENOMEM;
1074 }
1075
1076 ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
1077 page_alignment, buffer_start,
1078 interruptible,
1079 persistant_swap_storage, acc_size, NULL);
1080 if (likely(ret == 0))
1081 *p_bo = bo;
1082
1083 return ret;
1084}
1085
1086static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
1087 uint32_t mem_type, bool allow_errors)
1088{
1089 int ret;
1090
1091 spin_lock(&bo->lock);
1092 ret = ttm_bo_wait(bo, false, false, false);
1093 spin_unlock(&bo->lock);
1094
1095 if (ret && allow_errors)
1096 goto out;
1097
1098 if (bo->mem.mem_type == mem_type)
1099 ret = ttm_bo_evict(bo, mem_type, false, false);
1100
1101 if (ret) {
1102 if (allow_errors) {
1103 goto out;
1104 } else {
1105 ret = 0;
1106 printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
1107 }
1108 }
1109
1110out:
1111 return ret;
1112}
1113
1114static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1115 struct list_head *head,
1116 unsigned mem_type, bool allow_errors)
1117{
1118 struct ttm_buffer_object *entry;
1119 int ret;
1120 int put_count;
1121
1122 /*
1123 * Can't use standard list traversal since we're unlocking.
1124 */
1125
1126 spin_lock(&bdev->lru_lock);
1127
1128 while (!list_empty(head)) {
1129 entry = list_first_entry(head, struct ttm_buffer_object, lru);
1130 kref_get(&entry->list_kref);
1131 ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1132 put_count = ttm_bo_del_from_lru(entry);
1133 spin_unlock(&bdev->lru_lock);
1134 while (put_count--)
1135 kref_put(&entry->list_kref, ttm_bo_ref_bug);
1136 BUG_ON(ret);
1137 ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
1138 ttm_bo_unreserve(entry);
1139 kref_put(&entry->list_kref, ttm_bo_release_list);
1140 spin_lock(&bdev->lru_lock);
1141 }
1142
1143 spin_unlock(&bdev->lru_lock);
1144
1145 return 0;
1146}
1147
1148int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1149{
1150 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1151 int ret = -EINVAL;
1152
1153 if (mem_type >= TTM_NUM_MEM_TYPES) {
1154 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1155 return ret;
1156 }
1157
1158 if (!man->has_type) {
1159 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1160 "memory manager type %u\n", mem_type);
1161 return ret;
1162 }
1163
1164 man->use_type = false;
1165 man->has_type = false;
1166
1167 ret = 0;
1168 if (mem_type > 0) {
1169 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
1170
1171 spin_lock(&bdev->lru_lock);
1172 if (drm_mm_clean(&man->manager))
1173 drm_mm_takedown(&man->manager);
1174 else
1175 ret = -EBUSY;
1176
1177 spin_unlock(&bdev->lru_lock);
1178 }
1179
1180 return ret;
1181}
1182EXPORT_SYMBOL(ttm_bo_clean_mm);
1183
1184int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1185{
1186 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1187
1188 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1189 printk(KERN_ERR TTM_PFX
1190 "Illegal memory manager memory type %u.\n",
1191 mem_type);
1192 return -EINVAL;
1193 }
1194
1195 if (!man->has_type) {
1196 printk(KERN_ERR TTM_PFX
1197 "Memory type %u has not been initialized.\n",
1198 mem_type);
1199 return 0;
1200 }
1201
1202 return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
1203}
1204EXPORT_SYMBOL(ttm_bo_evict_mm);
1205
1206int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1207 unsigned long p_offset, unsigned long p_size)
1208{
1209 int ret = -EINVAL;
1210 struct ttm_mem_type_manager *man;
1211
1212 if (type >= TTM_NUM_MEM_TYPES) {
1213 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1214 return ret;
1215 }
1216
1217 man = &bdev->man[type];
1218 if (man->has_type) {
1219 printk(KERN_ERR TTM_PFX
1220 "Memory manager already initialized for type %d\n",
1221 type);
1222 return ret;
1223 }
1224
1225 ret = bdev->driver->init_mem_type(bdev, type, man);
1226 if (ret)
1227 return ret;
1228
1229 ret = 0;
1230 if (type != TTM_PL_SYSTEM) {
1231 if (!p_size) {
1232 printk(KERN_ERR TTM_PFX
1233 "Zero size memory manager type %d\n",
1234 type);
1235 return ret;
1236 }
1237 ret = drm_mm_init(&man->manager, p_offset, p_size);
1238 if (ret)
1239 return ret;
1240 }
1241 man->has_type = true;
1242 man->use_type = true;
1243 man->size = p_size;
1244
1245 INIT_LIST_HEAD(&man->lru);
1246
1247 return 0;
1248}
1249EXPORT_SYMBOL(ttm_bo_init_mm);
1250
1251int ttm_bo_device_release(struct ttm_bo_device *bdev)
1252{
1253 int ret = 0;
1254 unsigned i = TTM_NUM_MEM_TYPES;
1255 struct ttm_mem_type_manager *man;
1256
1257 while (i--) {
1258 man = &bdev->man[i];
1259 if (man->has_type) {
1260 man->use_type = false;
1261 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1262 ret = -EBUSY;
1263 printk(KERN_ERR TTM_PFX
1264 "DRM memory manager type %d "
1265 "is not clean.\n", i);
1266 }
1267 man->has_type = false;
1268 }
1269 }
1270
1271 if (!cancel_delayed_work(&bdev->wq))
1272 flush_scheduled_work();
1273
1274 while (ttm_bo_delayed_delete(bdev, true))
1275 ;
1276
1277 spin_lock(&bdev->lru_lock);
1278 if (list_empty(&bdev->ddestroy))
1279 TTM_DEBUG("Delayed destroy list was clean\n");
1280
1281 if (list_empty(&bdev->man[0].lru))
1282 TTM_DEBUG("Swap list was clean\n");
1283 spin_unlock(&bdev->lru_lock);
1284
1285 ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
1286 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1287 write_lock(&bdev->vm_lock);
1288 drm_mm_takedown(&bdev->addr_space_mm);
1289 write_unlock(&bdev->vm_lock);
1290
1291 __free_page(bdev->dummy_read_page);
1292 return ret;
1293}
1294EXPORT_SYMBOL(ttm_bo_device_release);
1295
1296/*
1297 * This function is intended to be called on drm driver load.
1298 * If you decide to call it from firstopen, you must protect the call
1299 * from a potentially racing ttm_bo_driver_finish in lastclose.
1300 * (This may happen on X server restart).
1301 */
1302
1303int ttm_bo_device_init(struct ttm_bo_device *bdev,
1304 struct ttm_mem_global *mem_glob,
1305 struct ttm_bo_driver *driver, uint64_t file_page_offset)
1306{
1307 int ret = -EINVAL;
1308
1309 bdev->dummy_read_page = NULL;
1310 rwlock_init(&bdev->vm_lock);
1311 spin_lock_init(&bdev->lru_lock);
1312
1313 bdev->driver = driver;
1314 bdev->mem_glob = mem_glob;
1315
1316 memset(bdev->man, 0, sizeof(bdev->man));
1317
1318 bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1319 if (unlikely(bdev->dummy_read_page == NULL)) {
1320 ret = -ENOMEM;
1321 goto out_err0;
1322 }
1323
1324 /*
1325 * Initialize the system memory buffer type.
1326 * Other types need to be driver / IOCTL initialized.
1327 */
1328 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
1329 if (unlikely(ret != 0))
1330 goto out_err1;
1331
1332 bdev->addr_space_rb = RB_ROOT;
1333 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1334 if (unlikely(ret != 0))
1335 goto out_err2;
1336
1337 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1338 bdev->nice_mode = true;
1339 INIT_LIST_HEAD(&bdev->ddestroy);
1340 INIT_LIST_HEAD(&bdev->swap_lru);
1341 bdev->dev_mapping = NULL;
1342 ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
1343 ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
1344 if (unlikely(ret != 0)) {
1345 printk(KERN_ERR TTM_PFX
1346 "Could not register buffer object swapout.\n");
1347 goto out_err2;
1348 }
1349
1350 bdev->ttm_bo_extra_size =
1351 ttm_round_pot(sizeof(struct ttm_tt)) +
1352 ttm_round_pot(sizeof(struct ttm_backend));
1353
1354 bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
1355 ttm_round_pot(sizeof(struct ttm_buffer_object));
1356
1357 return 0;
1358out_err2:
1359 ttm_bo_clean_mm(bdev, 0);
1360out_err1:
1361 __free_page(bdev->dummy_read_page);
1362out_err0:
1363 return ret;
1364}
1365EXPORT_SYMBOL(ttm_bo_device_init);
1366
1367/*
1368 * buffer object vm functions.
1369 */
1370
1371bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1372{
1373 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1374
1375 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1376 if (mem->mem_type == TTM_PL_SYSTEM)
1377 return false;
1378
1379 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1380 return false;
1381
1382 if (mem->placement & TTM_PL_FLAG_CACHED)
1383 return false;
1384 }
1385 return true;
1386}
1387
1388int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1389 struct ttm_mem_reg *mem,
1390 unsigned long *bus_base,
1391 unsigned long *bus_offset, unsigned long *bus_size)
1392{
1393 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1394
1395 *bus_size = 0;
1396 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1397 return -EINVAL;
1398
1399 if (ttm_mem_reg_is_pci(bdev, mem)) {
1400 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1401 *bus_size = mem->num_pages << PAGE_SHIFT;
1402 *bus_base = man->io_offset;
1403 }
1404
1405 return 0;
1406}
1407
1408void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1409{
1410 struct ttm_bo_device *bdev = bo->bdev;
1411 loff_t offset = (loff_t) bo->addr_space_offset;
1412 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1413
1414 if (!bdev->dev_mapping)
1415 return;
1416
1417 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1418}
1419
1420static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1421{
1422 struct ttm_bo_device *bdev = bo->bdev;
1423 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1424 struct rb_node *parent = NULL;
1425 struct ttm_buffer_object *cur_bo;
1426 unsigned long offset = bo->vm_node->start;
1427 unsigned long cur_offset;
1428
1429 while (*cur) {
1430 parent = *cur;
1431 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1432 cur_offset = cur_bo->vm_node->start;
1433 if (offset < cur_offset)
1434 cur = &parent->rb_left;
1435 else if (offset > cur_offset)
1436 cur = &parent->rb_right;
1437 else
1438 BUG();
1439 }
1440
1441 rb_link_node(&bo->vm_rb, parent, cur);
1442 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1443}
1444
1445/**
1446 * ttm_bo_setup_vm:
1447 *
1448 * @bo: the buffer to allocate address space for
1449 *
1450 * Allocate address space in the drm device so that applications
1451 * can mmap the buffer and access the contents. This only
1452 * applies to ttm_bo_type_device objects as others are not
1453 * placed in the drm device address space.
1454 */
1455
1456static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1457{
1458 struct ttm_bo_device *bdev = bo->bdev;
1459 int ret;
1460
1461retry_pre_get:
1462 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1463 if (unlikely(ret != 0))
1464 return ret;
1465
1466 write_lock(&bdev->vm_lock);
1467 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1468 bo->mem.num_pages, 0, 0);
1469
1470 if (unlikely(bo->vm_node == NULL)) {
1471 ret = -ENOMEM;
1472 goto out_unlock;
1473 }
1474
1475 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1476 bo->mem.num_pages, 0);
1477
1478 if (unlikely(bo->vm_node == NULL)) {
1479 write_unlock(&bdev->vm_lock);
1480 goto retry_pre_get;
1481 }
1482
1483 ttm_bo_vm_insert_rb(bo);
1484 write_unlock(&bdev->vm_lock);
1485 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1486
1487 return 0;
1488out_unlock:
1489 write_unlock(&bdev->vm_lock);
1490 return ret;
1491}
1492
1493int ttm_bo_wait(struct ttm_buffer_object *bo,
1494 bool lazy, bool interruptible, bool no_wait)
1495{
1496 struct ttm_bo_driver *driver = bo->bdev->driver;
1497 void *sync_obj;
1498 void *sync_obj_arg;
1499 int ret = 0;
1500
1501 if (likely(bo->sync_obj == NULL))
1502 return 0;
1503
1504 while (bo->sync_obj) {
1505
1506 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1507 void *tmp_obj = bo->sync_obj;
1508 bo->sync_obj = NULL;
1509 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1510 spin_unlock(&bo->lock);
1511 driver->sync_obj_unref(&tmp_obj);
1512 spin_lock(&bo->lock);
1513 continue;
1514 }
1515
1516 if (no_wait)
1517 return -EBUSY;
1518
1519 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1520 sync_obj_arg = bo->sync_obj_arg;
1521 spin_unlock(&bo->lock);
1522 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1523 lazy, interruptible);
1524 if (unlikely(ret != 0)) {
1525 driver->sync_obj_unref(&sync_obj);
1526 spin_lock(&bo->lock);
1527 return ret;
1528 }
1529 spin_lock(&bo->lock);
1530 if (likely(bo->sync_obj == sync_obj &&
1531 bo->sync_obj_arg == sync_obj_arg)) {
1532 void *tmp_obj = bo->sync_obj;
1533 bo->sync_obj = NULL;
1534 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1535 &bo->priv_flags);
1536 spin_unlock(&bo->lock);
1537 driver->sync_obj_unref(&sync_obj);
1538 driver->sync_obj_unref(&tmp_obj);
1539 spin_lock(&bo->lock);
1540 }
1541 }
1542 return 0;
1543}
1544EXPORT_SYMBOL(ttm_bo_wait);
1545
1546void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1547{
1548 atomic_set(&bo->reserved, 0);
1549 wake_up_all(&bo->event_queue);
1550}
1551
1552int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1553 bool no_wait)
1554{
1555 int ret;
1556
1557 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1558 if (no_wait)
1559 return -EBUSY;
1560 else if (interruptible) {
1561 ret = wait_event_interruptible
1562 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1563 if (unlikely(ret != 0))
1564 return -ERESTART;
1565 } else {
1566 wait_event(bo->event_queue,
1567 atomic_read(&bo->reserved) == 0);
1568 }
1569 }
1570 return 0;
1571}
1572
1573int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1574{
1575 int ret = 0;
1576
1577 /*
1578 * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1579 * makes sure the lru lists are updated.
1580 */
1581
1582 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1583 if (unlikely(ret != 0))
1584 return ret;
1585 spin_lock(&bo->lock);
1586 ret = ttm_bo_wait(bo, false, true, no_wait);
1587 spin_unlock(&bo->lock);
1588 if (likely(ret == 0))
1589 atomic_inc(&bo->cpu_writers);
1590 ttm_bo_unreserve(bo);
1591 return ret;
1592}
1593
1594void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1595{
1596 if (atomic_dec_and_test(&bo->cpu_writers))
1597 wake_up_all(&bo->event_queue);
1598}
1599
1600/**
1601 * A buffer object shrink method that tries to swap out the first
1602 * buffer object on the bo_global::swap_lru list.
1603 */
1604
1605static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1606{
1607 struct ttm_bo_device *bdev =
1608 container_of(shrink, struct ttm_bo_device, shrink);
1609 struct ttm_buffer_object *bo;
1610 int ret = -EBUSY;
1611 int put_count;
1612 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1613
1614 spin_lock(&bdev->lru_lock);
1615 while (ret == -EBUSY) {
1616 if (unlikely(list_empty(&bdev->swap_lru))) {
1617 spin_unlock(&bdev->lru_lock);
1618 return -EBUSY;
1619 }
1620
1621 bo = list_first_entry(&bdev->swap_lru,
1622 struct ttm_buffer_object, swap);
1623 kref_get(&bo->list_kref);
1624
1625 /**
1626 * Reserve buffer. Since we unlock while sleeping, we need
1627 * to re-check that nobody removed us from the swap-list while
1628 * we slept.
1629 */
1630
1631 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1632 if (unlikely(ret == -EBUSY)) {
1633 spin_unlock(&bdev->lru_lock);
1634 ttm_bo_wait_unreserved(bo, false);
1635 kref_put(&bo->list_kref, ttm_bo_release_list);
1636 spin_lock(&bdev->lru_lock);
1637 }
1638 }
1639
1640 BUG_ON(ret != 0);
1641 put_count = ttm_bo_del_from_lru(bo);
1642 spin_unlock(&bdev->lru_lock);
1643
1644 while (put_count--)
1645 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1646
1647 /**
1648 * Wait for GPU, then move to system cached.
1649 */
1650
1651 spin_lock(&bo->lock);
1652 ret = ttm_bo_wait(bo, false, false, false);
1653 spin_unlock(&bo->lock);
1654
1655 if (unlikely(ret != 0))
1656 goto out;
1657
1658 if ((bo->mem.placement & swap_placement) != swap_placement) {
1659 struct ttm_mem_reg evict_mem;
1660
1661 evict_mem = bo->mem;
1662 evict_mem.mm_node = NULL;
1663 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1664 evict_mem.mem_type = TTM_PL_SYSTEM;
1665
1666 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1667 false, false);
1668 if (unlikely(ret != 0))
1669 goto out;
1670 }
1671
1672 ttm_bo_unmap_virtual(bo);
1673
1674 /**
1675 * Swap out. Buffer will be swapped in again as soon as
1676 * anyone tries to access a ttm page.
1677 */
1678
1679 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1680out:
1681
1682 /**
1683 *
1684 * Unreserve without putting on LRU to avoid swapping out an
1685 * already swapped buffer.
1686 */
1687
1688 atomic_set(&bo->reserved, 0);
1689 wake_up_all(&bo->event_queue);
1690 kref_put(&bo->list_kref, ttm_bo_release_list);
1691 return ret;
1692}
1693
1694void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1695{
1696 while (ttm_bo_swapout(&bdev->shrink) == 0)
1697 ;
1698}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
new file mode 100644
index 000000000000..517c84559633
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -0,0 +1,561 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_bo_driver.h"
32#include "ttm/ttm_placement.h"
33#include <linux/io.h>
34#include <linux/highmem.h>
35#include <linux/wait.h>
36#include <linux/vmalloc.h>
37#include <linux/version.h>
38#include <linux/module.h>
39
40void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41{
42 struct ttm_mem_reg *old_mem = &bo->mem;
43
44 if (old_mem->mm_node) {
45 spin_lock(&bo->bdev->lru_lock);
46 drm_mm_put_block(old_mem->mm_node);
47 spin_unlock(&bo->bdev->lru_lock);
48 }
49 old_mem->mm_node = NULL;
50}
51
52int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
53 bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
54{
55 struct ttm_tt *ttm = bo->ttm;
56 struct ttm_mem_reg *old_mem = &bo->mem;
57 uint32_t save_flags = old_mem->placement;
58 int ret;
59
60 if (old_mem->mem_type != TTM_PL_SYSTEM) {
61 ttm_tt_unbind(ttm);
62 ttm_bo_free_old_node(bo);
63 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
64 TTM_PL_MASK_MEM);
65 old_mem->mem_type = TTM_PL_SYSTEM;
66 save_flags = old_mem->placement;
67 }
68
69 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
70 if (unlikely(ret != 0))
71 return ret;
72
73 if (new_mem->mem_type != TTM_PL_SYSTEM) {
74 ret = ttm_tt_bind(ttm, new_mem);
75 if (unlikely(ret != 0))
76 return ret;
77 }
78
79 *old_mem = *new_mem;
80 new_mem->mm_node = NULL;
81 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
82 return 0;
83}
84EXPORT_SYMBOL(ttm_bo_move_ttm);
85
86int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
87 void **virtual)
88{
89 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
90 unsigned long bus_offset;
91 unsigned long bus_size;
92 unsigned long bus_base;
93 int ret;
94 void *addr;
95
96 *virtual = NULL;
97 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
98 if (ret || bus_size == 0)
99 return ret;
100
101 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
102 addr = (void *)(((u8 *) man->io_addr) + bus_offset);
103 else {
104 if (mem->placement & TTM_PL_FLAG_WC)
105 addr = ioremap_wc(bus_base + bus_offset, bus_size);
106 else
107 addr = ioremap_nocache(bus_base + bus_offset, bus_size);
108 if (!addr)
109 return -ENOMEM;
110 }
111 *virtual = addr;
112 return 0;
113}
114
115void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
116 void *virtual)
117{
118 struct ttm_mem_type_manager *man;
119
120 man = &bdev->man[mem->mem_type];
121
122 if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
123 iounmap(virtual);
124}
125
126static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
127{
128 uint32_t *dstP =
129 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
130 uint32_t *srcP =
131 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
132
133 int i;
134 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
135 iowrite32(ioread32(srcP++), dstP++);
136 return 0;
137}
138
139static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
140 unsigned long page)
141{
142 struct page *d = ttm_tt_get_page(ttm, page);
143 void *dst;
144
145 if (!d)
146 return -ENOMEM;
147
148 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
149 dst = kmap(d);
150 if (!dst)
151 return -ENOMEM;
152
153 memcpy_fromio(dst, src, PAGE_SIZE);
154 kunmap(d);
155 return 0;
156}
157
158static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
159 unsigned long page)
160{
161 struct page *s = ttm_tt_get_page(ttm, page);
162 void *src;
163
164 if (!s)
165 return -ENOMEM;
166
167 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
168 src = kmap(s);
169 if (!src)
170 return -ENOMEM;
171
172 memcpy_toio(dst, src, PAGE_SIZE);
173 kunmap(s);
174 return 0;
175}
176
177int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
178 bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
179{
180 struct ttm_bo_device *bdev = bo->bdev;
181 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
182 struct ttm_tt *ttm = bo->ttm;
183 struct ttm_mem_reg *old_mem = &bo->mem;
184 struct ttm_mem_reg old_copy = *old_mem;
185 void *old_iomap;
186 void *new_iomap;
187 int ret;
188 uint32_t save_flags = old_mem->placement;
189 unsigned long i;
190 unsigned long page;
191 unsigned long add = 0;
192 int dir;
193
194 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
195 if (ret)
196 return ret;
197 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
198 if (ret)
199 goto out;
200
201 if (old_iomap == NULL && new_iomap == NULL)
202 goto out2;
203 if (old_iomap == NULL && ttm == NULL)
204 goto out2;
205
206 add = 0;
207 dir = 1;
208
209 if ((old_mem->mem_type == new_mem->mem_type) &&
210 (new_mem->mm_node->start <
211 old_mem->mm_node->start + old_mem->mm_node->size)) {
212 dir = -1;
213 add = new_mem->num_pages - 1;
214 }
215
216 for (i = 0; i < new_mem->num_pages; ++i) {
217 page = i * dir + add;
218 if (old_iomap == NULL)
219 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
220 else if (new_iomap == NULL)
221 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
222 else
223 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
224 if (ret)
225 goto out1;
226 }
227 mb();
228out2:
229 ttm_bo_free_old_node(bo);
230
231 *old_mem = *new_mem;
232 new_mem->mm_node = NULL;
233 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
234
235 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
236 ttm_tt_unbind(ttm);
237 ttm_tt_destroy(ttm);
238 bo->ttm = NULL;
239 }
240
241out1:
242 ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
243out:
244 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
245 return ret;
246}
247EXPORT_SYMBOL(ttm_bo_move_memcpy);
248
249static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
250{
251 kfree(bo);
252}
253
254/**
255 * ttm_buffer_object_transfer
256 *
257 * @bo: A pointer to a struct ttm_buffer_object.
258 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
259 * holding the data of @bo with the old placement.
260 *
261 * This is a utility function that may be called after an accelerated move
262 * has been scheduled. A new buffer object is created as a placeholder for
263 * the old data while it's being copied. When that buffer object is idle,
264 * it can be destroyed, releasing the space of the old placement.
265 * Returns:
266 * !0: Failure.
267 */
268
269static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
270 struct ttm_buffer_object **new_obj)
271{
272 struct ttm_buffer_object *fbo;
273 struct ttm_bo_device *bdev = bo->bdev;
274 struct ttm_bo_driver *driver = bdev->driver;
275
276 fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
277 if (!fbo)
278 return -ENOMEM;
279
280 *fbo = *bo;
281
282 /**
283 * Fix up members that we shouldn't copy directly:
284 * TODO: Explicit member copy would probably be better here.
285 */
286
287 spin_lock_init(&fbo->lock);
288 init_waitqueue_head(&fbo->event_queue);
289 INIT_LIST_HEAD(&fbo->ddestroy);
290 INIT_LIST_HEAD(&fbo->lru);
291 INIT_LIST_HEAD(&fbo->swap);
292 fbo->vm_node = NULL;
293
294 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
295 if (fbo->mem.mm_node)
296 fbo->mem.mm_node->private = (void *)fbo;
297 kref_init(&fbo->list_kref);
298 kref_init(&fbo->kref);
299 fbo->destroy = &ttm_transfered_destroy;
300
301 *new_obj = fbo;
302 return 0;
303}
304
305pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
306{
307#if defined(__i386__) || defined(__x86_64__)
308 if (caching_flags & TTM_PL_FLAG_WC)
309 tmp = pgprot_writecombine(tmp);
310 else if (boot_cpu_data.x86 > 3)
311 tmp = pgprot_noncached(tmp);
312
313#elif defined(__powerpc__)
314 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
315 pgprot_val(tmp) |= _PAGE_NO_CACHE;
316 if (caching_flags & TTM_PL_FLAG_UNCACHED)
317 pgprot_val(tmp) |= _PAGE_GUARDED;
318 }
319#endif
320#if defined(__ia64__)
321 if (caching_flags & TTM_PL_FLAG_WC)
322 tmp = pgprot_writecombine(tmp);
323 else
324 tmp = pgprot_noncached(tmp);
325#endif
326#if defined(__sparc__)
327 if (!(caching_flags & TTM_PL_FLAG_CACHED))
328 tmp = pgprot_noncached(tmp);
329#endif
330 return tmp;
331}
332
333static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
334 unsigned long bus_base,
335 unsigned long bus_offset,
336 unsigned long bus_size,
337 struct ttm_bo_kmap_obj *map)
338{
339 struct ttm_bo_device *bdev = bo->bdev;
340 struct ttm_mem_reg *mem = &bo->mem;
341 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
342
343 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
344 map->bo_kmap_type = ttm_bo_map_premapped;
345 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
346 } else {
347 map->bo_kmap_type = ttm_bo_map_iomap;
348 if (mem->placement & TTM_PL_FLAG_WC)
349 map->virtual = ioremap_wc(bus_base + bus_offset,
350 bus_size);
351 else
352 map->virtual = ioremap_nocache(bus_base + bus_offset,
353 bus_size);
354 }
355 return (!map->virtual) ? -ENOMEM : 0;
356}
357
358static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
359 unsigned long start_page,
360 unsigned long num_pages,
361 struct ttm_bo_kmap_obj *map)
362{
363 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
364 struct ttm_tt *ttm = bo->ttm;
365 struct page *d;
366 int i;
367
368 BUG_ON(!ttm);
369 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
370 /*
371 * We're mapping a single page, and the desired
372 * page protection is consistent with the bo.
373 */
374
375 map->bo_kmap_type = ttm_bo_map_kmap;
376 map->page = ttm_tt_get_page(ttm, start_page);
377 map->virtual = kmap(map->page);
378 } else {
379 /*
380 * Populate the part we're mapping;
381 */
382 for (i = start_page; i < start_page + num_pages; ++i) {
383 d = ttm_tt_get_page(ttm, i);
384 if (!d)
385 return -ENOMEM;
386 }
387
388 /*
389 * We need to use vmap to get the desired page protection
390 * or to make the buffer object look contigous.
391 */
392 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
393 PAGE_KERNEL :
394 ttm_io_prot(mem->placement, PAGE_KERNEL);
395 map->bo_kmap_type = ttm_bo_map_vmap;
396 map->virtual = vmap(ttm->pages + start_page, num_pages,
397 0, prot);
398 }
399 return (!map->virtual) ? -ENOMEM : 0;
400}
401
402int ttm_bo_kmap(struct ttm_buffer_object *bo,
403 unsigned long start_page, unsigned long num_pages,
404 struct ttm_bo_kmap_obj *map)
405{
406 int ret;
407 unsigned long bus_base;
408 unsigned long bus_offset;
409 unsigned long bus_size;
410
411 BUG_ON(!list_empty(&bo->swap));
412 map->virtual = NULL;
413 if (num_pages > bo->num_pages)
414 return -EINVAL;
415 if (start_page > bo->num_pages)
416 return -EINVAL;
417#if 0
418 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
419 return -EPERM;
420#endif
421 ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
422 &bus_offset, &bus_size);
423 if (ret)
424 return ret;
425 if (bus_size == 0) {
426 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
427 } else {
428 bus_offset += start_page << PAGE_SHIFT;
429 bus_size = num_pages << PAGE_SHIFT;
430 return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
431 }
432}
433EXPORT_SYMBOL(ttm_bo_kmap);
434
435void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
436{
437 if (!map->virtual)
438 return;
439 switch (map->bo_kmap_type) {
440 case ttm_bo_map_iomap:
441 iounmap(map->virtual);
442 break;
443 case ttm_bo_map_vmap:
444 vunmap(map->virtual);
445 break;
446 case ttm_bo_map_kmap:
447 kunmap(map->page);
448 break;
449 case ttm_bo_map_premapped:
450 break;
451 default:
452 BUG();
453 }
454 map->virtual = NULL;
455 map->page = NULL;
456}
457EXPORT_SYMBOL(ttm_bo_kunmap);
458
459int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
460 unsigned long dst_offset,
461 unsigned long *pfn, pgprot_t *prot)
462{
463 struct ttm_mem_reg *mem = &bo->mem;
464 struct ttm_bo_device *bdev = bo->bdev;
465 unsigned long bus_offset;
466 unsigned long bus_size;
467 unsigned long bus_base;
468 int ret;
469 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
470 &bus_size);
471 if (ret)
472 return -EINVAL;
473 if (bus_size != 0)
474 *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
475 else
476 if (!bo->ttm)
477 return -EINVAL;
478 else
479 *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
480 dst_offset >>
481 PAGE_SHIFT));
482 *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
483 PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
484
485 return 0;
486}
487
488int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
489 void *sync_obj,
490 void *sync_obj_arg,
491 bool evict, bool no_wait,
492 struct ttm_mem_reg *new_mem)
493{
494 struct ttm_bo_device *bdev = bo->bdev;
495 struct ttm_bo_driver *driver = bdev->driver;
496 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
497 struct ttm_mem_reg *old_mem = &bo->mem;
498 int ret;
499 uint32_t save_flags = old_mem->placement;
500 struct ttm_buffer_object *ghost_obj;
501 void *tmp_obj = NULL;
502
503 spin_lock(&bo->lock);
504 if (bo->sync_obj) {
505 tmp_obj = bo->sync_obj;
506 bo->sync_obj = NULL;
507 }
508 bo->sync_obj = driver->sync_obj_ref(sync_obj);
509 bo->sync_obj_arg = sync_obj_arg;
510 if (evict) {
511 ret = ttm_bo_wait(bo, false, false, false);
512 spin_unlock(&bo->lock);
513 driver->sync_obj_unref(&bo->sync_obj);
514
515 if (ret)
516 return ret;
517
518 ttm_bo_free_old_node(bo);
519 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
520 (bo->ttm != NULL)) {
521 ttm_tt_unbind(bo->ttm);
522 ttm_tt_destroy(bo->ttm);
523 bo->ttm = NULL;
524 }
525 } else {
526 /**
527 * This should help pipeline ordinary buffer moves.
528 *
529 * Hang old buffer memory on a new buffer object,
530 * and leave it to be released when the GPU
531 * operation has completed.
532 */
533
534 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
535 spin_unlock(&bo->lock);
536
537 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
538 if (ret)
539 return ret;
540
541 /**
542 * If we're not moving to fixed memory, the TTM object
543 * needs to stay alive. Otherwhise hang it on the ghost
544 * bo to be unbound and destroyed.
545 */
546
547 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
548 ghost_obj->ttm = NULL;
549 else
550 bo->ttm = NULL;
551
552 ttm_bo_unreserve(ghost_obj);
553 ttm_bo_unref(&ghost_obj);
554 }
555
556 *old_mem = *new_mem;
557 new_mem->mm_node = NULL;
558 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
559 return 0;
560}
561EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
new file mode 100644
index 000000000000..27b146c54fbc
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -0,0 +1,454 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include <ttm/ttm_module.h>
32#include <ttm/ttm_bo_driver.h>
33#include <ttm/ttm_placement.h>
34#include <linux/mm.h>
35#include <linux/version.h>
36#include <linux/rbtree.h>
37#include <linux/module.h>
38#include <linux/uaccess.h>
39
40#define TTM_BO_VM_NUM_PREFAULT 16
41
42static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
43 unsigned long page_start,
44 unsigned long num_pages)
45{
46 struct rb_node *cur = bdev->addr_space_rb.rb_node;
47 unsigned long cur_offset;
48 struct ttm_buffer_object *bo;
49 struct ttm_buffer_object *best_bo = NULL;
50
51 while (likely(cur != NULL)) {
52 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
53 cur_offset = bo->vm_node->start;
54 if (page_start >= cur_offset) {
55 cur = cur->rb_right;
56 best_bo = bo;
57 if (page_start == cur_offset)
58 break;
59 } else
60 cur = cur->rb_left;
61 }
62
63 if (unlikely(best_bo == NULL))
64 return NULL;
65
66 if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
67 (page_start + num_pages)))
68 return NULL;
69
70 return best_bo;
71}
72
73static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
74{
75 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
76 vma->vm_private_data;
77 struct ttm_bo_device *bdev = bo->bdev;
78 unsigned long bus_base;
79 unsigned long bus_offset;
80 unsigned long bus_size;
81 unsigned long page_offset;
82 unsigned long page_last;
83 unsigned long pfn;
84 struct ttm_tt *ttm = NULL;
85 struct page *page;
86 int ret;
87 int i;
88 bool is_iomem;
89 unsigned long address = (unsigned long)vmf->virtual_address;
90 int retval = VM_FAULT_NOPAGE;
91
92 /*
93 * Work around locking order reversal in fault / nopfn
94 * between mmap_sem and bo_reserve: Perform a trylock operation
95 * for reserve, and if it fails, retry the fault after scheduling.
96 */
97
98 ret = ttm_bo_reserve(bo, true, true, false, 0);
99 if (unlikely(ret != 0)) {
100 if (ret == -EBUSY)
101 set_need_resched();
102 return VM_FAULT_NOPAGE;
103 }
104
105 /*
106 * Wait for buffer data in transit, due to a pipelined
107 * move.
108 */
109
110 spin_lock(&bo->lock);
111 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
112 ret = ttm_bo_wait(bo, false, true, false);
113 spin_unlock(&bo->lock);
114 if (unlikely(ret != 0)) {
115 retval = (ret != -ERESTART) ?
116 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
117 goto out_unlock;
118 }
119 } else
120 spin_unlock(&bo->lock);
121
122
123 ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
124 &bus_size);
125 if (unlikely(ret != 0)) {
126 retval = VM_FAULT_SIGBUS;
127 goto out_unlock;
128 }
129
130 is_iomem = (bus_size != 0);
131
132 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
133 bo->vm_node->start - vma->vm_pgoff;
134 page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
135 bo->vm_node->start - vma->vm_pgoff;
136
137 if (unlikely(page_offset >= bo->num_pages)) {
138 retval = VM_FAULT_SIGBUS;
139 goto out_unlock;
140 }
141
142 /*
143 * Strictly, we're not allowed to modify vma->vm_page_prot here,
144 * since the mmap_sem is only held in read mode. However, we
145 * modify only the caching bits of vma->vm_page_prot and
146 * consider those bits protected by
147 * the bo->mutex, as we should be the only writers.
148 * There shouldn't really be any readers of these bits except
149 * within vm_insert_mixed()? fork?
150 *
151 * TODO: Add a list of vmas to the bo, and change the
152 * vma->vm_page_prot when the object changes caching policy, with
153 * the correct locks held.
154 */
155
156 if (is_iomem) {
157 vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
158 vma->vm_page_prot);
159 } else {
160 ttm = bo->ttm;
161 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
162 vm_get_page_prot(vma->vm_flags) :
163 ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
164 }
165
166 /*
167 * Speculatively prefault a number of pages. Only error on
168 * first page.
169 */
170
171 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
172
173 if (is_iomem)
174 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
175 page_offset;
176 else {
177 page = ttm_tt_get_page(ttm, page_offset);
178 if (unlikely(!page && i == 0)) {
179 retval = VM_FAULT_OOM;
180 goto out_unlock;
181 } else if (unlikely(!page)) {
182 break;
183 }
184 pfn = page_to_pfn(page);
185 }
186
187 ret = vm_insert_mixed(vma, address, pfn);
188 /*
189 * Somebody beat us to this PTE or prefaulting to
190 * an already populated PTE, or prefaulting error.
191 */
192
193 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
194 break;
195 else if (unlikely(ret != 0)) {
196 retval =
197 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
198 goto out_unlock;
199
200 }
201
202 address += PAGE_SIZE;
203 if (unlikely(++page_offset >= page_last))
204 break;
205 }
206
207out_unlock:
208 ttm_bo_unreserve(bo);
209 return retval;
210}
211
212static void ttm_bo_vm_open(struct vm_area_struct *vma)
213{
214 struct ttm_buffer_object *bo =
215 (struct ttm_buffer_object *)vma->vm_private_data;
216
217 (void)ttm_bo_reference(bo);
218}
219
220static void ttm_bo_vm_close(struct vm_area_struct *vma)
221{
222 struct ttm_buffer_object *bo =
223 (struct ttm_buffer_object *)vma->vm_private_data;
224
225 ttm_bo_unref(&bo);
226 vma->vm_private_data = NULL;
227}
228
229static struct vm_operations_struct ttm_bo_vm_ops = {
230 .fault = ttm_bo_vm_fault,
231 .open = ttm_bo_vm_open,
232 .close = ttm_bo_vm_close
233};
234
235int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
236 struct ttm_bo_device *bdev)
237{
238 struct ttm_bo_driver *driver;
239 struct ttm_buffer_object *bo;
240 int ret;
241
242 read_lock(&bdev->vm_lock);
243 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
244 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
245 if (likely(bo != NULL))
246 ttm_bo_reference(bo);
247 read_unlock(&bdev->vm_lock);
248
249 if (unlikely(bo == NULL)) {
250 printk(KERN_ERR TTM_PFX
251 "Could not find buffer object to map.\n");
252 return -EINVAL;
253 }
254
255 driver = bo->bdev->driver;
256 if (unlikely(!driver->verify_access)) {
257 ret = -EPERM;
258 goto out_unref;
259 }
260 ret = driver->verify_access(bo, filp);
261 if (unlikely(ret != 0))
262 goto out_unref;
263
264 vma->vm_ops = &ttm_bo_vm_ops;
265
266 /*
267 * Note: We're transferring the bo reference to
268 * vma->vm_private_data here.
269 */
270
271 vma->vm_private_data = bo;
272 vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
273 return 0;
274out_unref:
275 ttm_bo_unref(&bo);
276 return ret;
277}
278EXPORT_SYMBOL(ttm_bo_mmap);
279
280int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
281{
282 if (vma->vm_pgoff != 0)
283 return -EACCES;
284
285 vma->vm_ops = &ttm_bo_vm_ops;
286 vma->vm_private_data = ttm_bo_reference(bo);
287 vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
288 return 0;
289}
290EXPORT_SYMBOL(ttm_fbdev_mmap);
291
292
293ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
294 const char __user *wbuf, char __user *rbuf, size_t count,
295 loff_t *f_pos, bool write)
296{
297 struct ttm_buffer_object *bo;
298 struct ttm_bo_driver *driver;
299 struct ttm_bo_kmap_obj map;
300 unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
301 unsigned long kmap_offset;
302 unsigned long kmap_end;
303 unsigned long kmap_num;
304 size_t io_size;
305 unsigned int page_offset;
306 char *virtual;
307 int ret;
308 bool no_wait = false;
309 bool dummy;
310
311 read_lock(&bdev->vm_lock);
312 bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
313 if (likely(bo != NULL))
314 ttm_bo_reference(bo);
315 read_unlock(&bdev->vm_lock);
316
317 if (unlikely(bo == NULL))
318 return -EFAULT;
319
320 driver = bo->bdev->driver;
321 if (unlikely(driver->verify_access)) {
322 ret = -EPERM;
323 goto out_unref;
324 }
325
326 ret = driver->verify_access(bo, filp);
327 if (unlikely(ret != 0))
328 goto out_unref;
329
330 kmap_offset = dev_offset - bo->vm_node->start;
331 if (unlikely(kmap_offset) >= bo->num_pages) {
332 ret = -EFBIG;
333 goto out_unref;
334 }
335
336 page_offset = *f_pos & ~PAGE_MASK;
337 io_size = bo->num_pages - kmap_offset;
338 io_size = (io_size << PAGE_SHIFT) - page_offset;
339 if (count < io_size)
340 io_size = count;
341
342 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
343 kmap_num = kmap_end - kmap_offset + 1;
344
345 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
346
347 switch (ret) {
348 case 0:
349 break;
350 case -ERESTART:
351 ret = -EINTR;
352 goto out_unref;
353 case -EBUSY:
354 ret = -EAGAIN;
355 goto out_unref;
356 default:
357 goto out_unref;
358 }
359
360 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
361 if (unlikely(ret != 0)) {
362 ttm_bo_unreserve(bo);
363 goto out_unref;
364 }
365
366 virtual = ttm_kmap_obj_virtual(&map, &dummy);
367 virtual += page_offset;
368
369 if (write)
370 ret = copy_from_user(virtual, wbuf, io_size);
371 else
372 ret = copy_to_user(rbuf, virtual, io_size);
373
374 ttm_bo_kunmap(&map);
375 ttm_bo_unreserve(bo);
376 ttm_bo_unref(&bo);
377
378 if (unlikely(ret != 0))
379 return -EFBIG;
380
381 *f_pos += io_size;
382
383 return io_size;
384out_unref:
385 ttm_bo_unref(&bo);
386 return ret;
387}
388
389ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
390 char __user *rbuf, size_t count, loff_t *f_pos,
391 bool write)
392{
393 struct ttm_bo_kmap_obj map;
394 unsigned long kmap_offset;
395 unsigned long kmap_end;
396 unsigned long kmap_num;
397 size_t io_size;
398 unsigned int page_offset;
399 char *virtual;
400 int ret;
401 bool no_wait = false;
402 bool dummy;
403
404 kmap_offset = (*f_pos >> PAGE_SHIFT);
405 if (unlikely(kmap_offset) >= bo->num_pages)
406 return -EFBIG;
407
408 page_offset = *f_pos & ~PAGE_MASK;
409 io_size = bo->num_pages - kmap_offset;
410 io_size = (io_size << PAGE_SHIFT) - page_offset;
411 if (count < io_size)
412 io_size = count;
413
414 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
415 kmap_num = kmap_end - kmap_offset + 1;
416
417 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
418
419 switch (ret) {
420 case 0:
421 break;
422 case -ERESTART:
423 return -EINTR;
424 case -EBUSY:
425 return -EAGAIN;
426 default:
427 return ret;
428 }
429
430 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
431 if (unlikely(ret != 0)) {
432 ttm_bo_unreserve(bo);
433 return ret;
434 }
435
436 virtual = ttm_kmap_obj_virtual(&map, &dummy);
437 virtual += page_offset;
438
439 if (write)
440 ret = copy_from_user(virtual, wbuf, io_size);
441 else
442 ret = copy_to_user(rbuf, virtual, io_size);
443
444 ttm_bo_kunmap(&map);
445 ttm_bo_unreserve(bo);
446 ttm_bo_unref(&bo);
447
448 if (unlikely(ret != 0))
449 return ret;
450
451 *f_pos += io_size;
452
453 return io_size;
454}
diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
new file mode 100644
index 000000000000..0b14eb1972b8
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_global.c
@@ -0,0 +1,114 @@
1/**************************************************************************
2 *
3 * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_module.h"
32#include <linux/mutex.h>
33#include <linux/slab.h>
34#include <linux/module.h>
35
36struct ttm_global_item {
37 struct mutex mutex;
38 void *object;
39 int refcount;
40};
41
42static struct ttm_global_item glob[TTM_GLOBAL_NUM];
43
44void ttm_global_init(void)
45{
46 int i;
47
48 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
49 struct ttm_global_item *item = &glob[i];
50 mutex_init(&item->mutex);
51 item->object = NULL;
52 item->refcount = 0;
53 }
54}
55
56void ttm_global_release(void)
57{
58 int i;
59 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
60 struct ttm_global_item *item = &glob[i];
61 BUG_ON(item->object != NULL);
62 BUG_ON(item->refcount != 0);
63 }
64}
65
66int ttm_global_item_ref(struct ttm_global_reference *ref)
67{
68 int ret;
69 struct ttm_global_item *item = &glob[ref->global_type];
70 void *object;
71
72 mutex_lock(&item->mutex);
73 if (item->refcount == 0) {
74 item->object = kmalloc(ref->size, GFP_KERNEL);
75 if (unlikely(item->object == NULL)) {
76 ret = -ENOMEM;
77 goto out_err;
78 }
79
80 ref->object = item->object;
81 ret = ref->init(ref);
82 if (unlikely(ret != 0))
83 goto out_err;
84
85 ++item->refcount;
86 }
87 ref->object = item->object;
88 object = item->object;
89 mutex_unlock(&item->mutex);
90 return 0;
91out_err:
92 kfree(item->object);
93 mutex_unlock(&item->mutex);
94 item->object = NULL;
95 return ret;
96}
97EXPORT_SYMBOL(ttm_global_item_ref);
98
99void ttm_global_item_unref(struct ttm_global_reference *ref)
100{
101 struct ttm_global_item *item = &glob[ref->global_type];
102
103 mutex_lock(&item->mutex);
104 BUG_ON(item->refcount == 0);
105 BUG_ON(ref->object != item->object);
106 if (--item->refcount == 0) {
107 ref->release(ref);
108 kfree(item->object);
109 item->object = NULL;
110 }
111 mutex_unlock(&item->mutex);
112}
113EXPORT_SYMBOL(ttm_global_item_unref);
114
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
new file mode 100644
index 000000000000..87323d4ff68d
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -0,0 +1,234 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "ttm/ttm_memory.h"
29#include <linux/spinlock.h>
30#include <linux/sched.h>
31#include <linux/wait.h>
32#include <linux/mm.h>
33#include <linux/module.h>
34
35#define TTM_PFX "[TTM] "
36#define TTM_MEMORY_ALLOC_RETRIES 4
37
38/**
39 * At this point we only support a single shrink callback.
40 * Extend this if needed, perhaps using a linked list of callbacks.
41 * Note that this function is reentrant:
42 * many threads may try to swap out at any given time.
43 */
44
45static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
46 uint64_t extra)
47{
48 int ret;
49 struct ttm_mem_shrink *shrink;
50 uint64_t target;
51 uint64_t total_target;
52
53 spin_lock(&glob->lock);
54 if (glob->shrink == NULL)
55 goto out;
56
57 if (from_workqueue) {
58 target = glob->swap_limit;
59 total_target = glob->total_memory_swap_limit;
60 } else if (capable(CAP_SYS_ADMIN)) {
61 total_target = glob->emer_total_memory;
62 target = glob->emer_memory;
63 } else {
64 total_target = glob->max_total_memory;
65 target = glob->max_memory;
66 }
67
68 total_target = (extra >= total_target) ? 0 : total_target - extra;
69 target = (extra >= target) ? 0 : target - extra;
70
71 while (glob->used_memory > target ||
72 glob->used_total_memory > total_target) {
73 shrink = glob->shrink;
74 spin_unlock(&glob->lock);
75 ret = shrink->do_shrink(shrink);
76 spin_lock(&glob->lock);
77 if (unlikely(ret != 0))
78 goto out;
79 }
80out:
81 spin_unlock(&glob->lock);
82}
83
84static void ttm_shrink_work(struct work_struct *work)
85{
86 struct ttm_mem_global *glob =
87 container_of(work, struct ttm_mem_global, work);
88
89 ttm_shrink(glob, true, 0ULL);
90}
91
92int ttm_mem_global_init(struct ttm_mem_global *glob)
93{
94 struct sysinfo si;
95 uint64_t mem;
96
97 spin_lock_init(&glob->lock);
98 glob->swap_queue = create_singlethread_workqueue("ttm_swap");
99 INIT_WORK(&glob->work, ttm_shrink_work);
100 init_waitqueue_head(&glob->queue);
101
102 si_meminfo(&si);
103
104 mem = si.totalram - si.totalhigh;
105 mem *= si.mem_unit;
106
107 glob->max_memory = mem >> 1;
108 glob->emer_memory = (mem >> 1) + (mem >> 2);
109 glob->swap_limit = glob->max_memory - (mem >> 3);
110 glob->used_memory = 0;
111 glob->used_total_memory = 0;
112 glob->shrink = NULL;
113
114 mem = si.totalram;
115 mem *= si.mem_unit;
116
117 glob->max_total_memory = mem >> 1;
118 glob->emer_total_memory = (mem >> 1) + (mem >> 2);
119
120 glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 3);
121
122 printk(KERN_INFO TTM_PFX "TTM available graphics memory: %llu MiB\n",
123 glob->max_total_memory >> 20);
124 printk(KERN_INFO TTM_PFX "TTM available object memory: %llu MiB\n",
125 glob->max_memory >> 20);
126
127 return 0;
128}
129EXPORT_SYMBOL(ttm_mem_global_init);
130
131void ttm_mem_global_release(struct ttm_mem_global *glob)
132{
133 printk(KERN_INFO TTM_PFX "Used total memory is %llu bytes.\n",
134 (unsigned long long)glob->used_total_memory);
135 flush_workqueue(glob->swap_queue);
136 destroy_workqueue(glob->swap_queue);
137 glob->swap_queue = NULL;
138}
139EXPORT_SYMBOL(ttm_mem_global_release);
140
141static inline void ttm_check_swapping(struct ttm_mem_global *glob)
142{
143 bool needs_swapping;
144
145 spin_lock(&glob->lock);
146 needs_swapping = (glob->used_memory > glob->swap_limit ||
147 glob->used_total_memory >
148 glob->total_memory_swap_limit);
149 spin_unlock(&glob->lock);
150
151 if (unlikely(needs_swapping))
152 (void)queue_work(glob->swap_queue, &glob->work);
153
154}
155
156void ttm_mem_global_free(struct ttm_mem_global *glob,
157 uint64_t amount, bool himem)
158{
159 spin_lock(&glob->lock);
160 glob->used_total_memory -= amount;
161 if (!himem)
162 glob->used_memory -= amount;
163 wake_up_all(&glob->queue);
164 spin_unlock(&glob->lock);
165}
166
167static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
168 uint64_t amount, bool himem, bool reserve)
169{
170 uint64_t limit;
171 uint64_t lomem_limit;
172 int ret = -ENOMEM;
173
174 spin_lock(&glob->lock);
175
176 if (capable(CAP_SYS_ADMIN)) {
177 limit = glob->emer_total_memory;
178 lomem_limit = glob->emer_memory;
179 } else {
180 limit = glob->max_total_memory;
181 lomem_limit = glob->max_memory;
182 }
183
184 if (unlikely(glob->used_total_memory + amount > limit))
185 goto out_unlock;
186 if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
187 goto out_unlock;
188
189 if (reserve) {
190 glob->used_total_memory += amount;
191 if (!himem)
192 glob->used_memory += amount;
193 }
194 ret = 0;
195out_unlock:
196 spin_unlock(&glob->lock);
197 ttm_check_swapping(glob);
198
199 return ret;
200}
201
202int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
203 bool no_wait, bool interruptible, bool himem)
204{
205 int count = TTM_MEMORY_ALLOC_RETRIES;
206
207 while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true)
208 != 0)) {
209 if (no_wait)
210 return -ENOMEM;
211 if (unlikely(count-- == 0))
212 return -ENOMEM;
213 ttm_shrink(glob, false, memory + (memory >> 2) + 16);
214 }
215
216 return 0;
217}
218
219size_t ttm_round_pot(size_t size)
220{
221 if ((size & (size - 1)) == 0)
222 return size;
223 else if (size > PAGE_SIZE)
224 return PAGE_ALIGN(size);
225 else {
226 size_t tmp_size = 4;
227
228 while (tmp_size < size)
229 tmp_size <<= 1;
230
231 return tmp_size;
232 }
233 return 0;
234}
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
new file mode 100644
index 000000000000..59ce8191d584
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -0,0 +1,50 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 * Jerome Glisse
30 */
31#include <linux/module.h>
32#include <ttm/ttm_module.h>
33
34static int __init ttm_init(void)
35{
36 ttm_global_init();
37 return 0;
38}
39
40static void __exit ttm_exit(void)
41{
42 ttm_global_release();
43}
44
45module_init(ttm_init);
46module_exit(ttm_exit);
47
48MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
49MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
50MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
new file mode 100644
index 000000000000..c27ab3a877ad
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -0,0 +1,635 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include <linux/version.h>
32#include <linux/vmalloc.h>
33#include <linux/sched.h>
34#include <linux/highmem.h>
35#include <linux/pagemap.h>
36#include <linux/file.h>
37#include <linux/swap.h>
38#include "ttm/ttm_module.h"
39#include "ttm/ttm_bo_driver.h"
40#include "ttm/ttm_placement.h"
41
42static int ttm_tt_swapin(struct ttm_tt *ttm);
43
44#if defined(CONFIG_X86)
45static void ttm_tt_clflush_page(struct page *page)
46{
47 uint8_t *page_virtual;
48 unsigned int i;
49
50 if (unlikely(page == NULL))
51 return;
52
53 page_virtual = kmap_atomic(page, KM_USER0);
54
55 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
56 clflush(page_virtual + i);
57
58 kunmap_atomic(page_virtual, KM_USER0);
59}
60
61static void ttm_tt_cache_flush_clflush(struct page *pages[],
62 unsigned long num_pages)
63{
64 unsigned long i;
65
66 mb();
67 for (i = 0; i < num_pages; ++i)
68 ttm_tt_clflush_page(*pages++);
69 mb();
70}
71#else
72static void ttm_tt_ipi_handler(void *null)
73{
74 ;
75}
76#endif
77
78void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
79{
80
81#if defined(CONFIG_X86)
82 if (cpu_has_clflush) {
83 ttm_tt_cache_flush_clflush(pages, num_pages);
84 return;
85 }
86#else
87 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
88 printk(KERN_ERR TTM_PFX
89 "Timed out waiting for drm cache flush.\n");
90#endif
91}
92
93/**
94 * Allocates storage for pointers to the pages that back the ttm.
95 *
96 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
97 */
98static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
99{
100 unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
101 ttm->pages = NULL;
102
103 if (size <= PAGE_SIZE)
104 ttm->pages = kzalloc(size, GFP_KERNEL);
105
106 if (!ttm->pages) {
107 ttm->pages = vmalloc_user(size);
108 if (ttm->pages)
109 ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
110 }
111}
112
113static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
114{
115 if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
116 vfree(ttm->pages);
117 ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
118 } else {
119 kfree(ttm->pages);
120 }
121 ttm->pages = NULL;
122}
123
124static struct page *ttm_tt_alloc_page(unsigned page_flags)
125{
126 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
127 return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
128
129 return alloc_page(GFP_HIGHUSER);
130}
131
132static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
133{
134 int write;
135 int dirty;
136 struct page *page;
137 int i;
138 struct ttm_backend *be = ttm->be;
139
140 BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
141 write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
142 dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
143
144 if (be)
145 be->func->clear(be);
146
147 for (i = 0; i < ttm->num_pages; ++i) {
148 page = ttm->pages[i];
149 if (page == NULL)
150 continue;
151
152 if (page == ttm->dummy_read_page) {
153 BUG_ON(write);
154 continue;
155 }
156
157 if (write && dirty && !PageReserved(page))
158 set_page_dirty_lock(page);
159
160 ttm->pages[i] = NULL;
161 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
162 put_page(page);
163 }
164 ttm->state = tt_unpopulated;
165 ttm->first_himem_page = ttm->num_pages;
166 ttm->last_lomem_page = -1;
167}
168
169static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
170{
171 struct page *p;
172 struct ttm_bo_device *bdev = ttm->bdev;
173 struct ttm_mem_global *mem_glob = bdev->mem_glob;
174 int ret;
175
176 while (NULL == (p = ttm->pages[index])) {
177 p = ttm_tt_alloc_page(ttm->page_flags);
178
179 if (!p)
180 return NULL;
181
182 if (PageHighMem(p)) {
183 ret =
184 ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
185 false, false, true);
186 if (unlikely(ret != 0))
187 goto out_err;
188 ttm->pages[--ttm->first_himem_page] = p;
189 } else {
190 ret =
191 ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
192 false, false, false);
193 if (unlikely(ret != 0))
194 goto out_err;
195 ttm->pages[++ttm->last_lomem_page] = p;
196 }
197 }
198 return p;
199out_err:
200 put_page(p);
201 return NULL;
202}
203
204struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
205{
206 int ret;
207
208 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
209 ret = ttm_tt_swapin(ttm);
210 if (unlikely(ret != 0))
211 return NULL;
212 }
213 return __ttm_tt_get_page(ttm, index);
214}
215
216int ttm_tt_populate(struct ttm_tt *ttm)
217{
218 struct page *page;
219 unsigned long i;
220 struct ttm_backend *be;
221 int ret;
222
223 if (ttm->state != tt_unpopulated)
224 return 0;
225
226 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
227 ret = ttm_tt_swapin(ttm);
228 if (unlikely(ret != 0))
229 return ret;
230 }
231
232 be = ttm->be;
233
234 for (i = 0; i < ttm->num_pages; ++i) {
235 page = __ttm_tt_get_page(ttm, i);
236 if (!page)
237 return -ENOMEM;
238 }
239
240 be->func->populate(be, ttm->num_pages, ttm->pages,
241 ttm->dummy_read_page);
242 ttm->state = tt_unbound;
243 return 0;
244}
245
246#ifdef CONFIG_X86
247static inline int ttm_tt_set_page_caching(struct page *p,
248 enum ttm_caching_state c_state)
249{
250 if (PageHighMem(p))
251 return 0;
252
253 switch (c_state) {
254 case tt_cached:
255 return set_pages_wb(p, 1);
256 case tt_wc:
257 return set_memory_wc((unsigned long) page_address(p), 1);
258 default:
259 return set_pages_uc(p, 1);
260 }
261}
262#else /* CONFIG_X86 */
263static inline int ttm_tt_set_page_caching(struct page *p,
264 enum ttm_caching_state c_state)
265{
266 return 0;
267}
268#endif /* CONFIG_X86 */
269
270/*
271 * Change caching policy for the linear kernel map
272 * for range of pages in a ttm.
273 */
274
275static int ttm_tt_set_caching(struct ttm_tt *ttm,
276 enum ttm_caching_state c_state)
277{
278 int i, j;
279 struct page *cur_page;
280 int ret;
281
282 if (ttm->caching_state == c_state)
283 return 0;
284
285 if (c_state != tt_cached) {
286 ret = ttm_tt_populate(ttm);
287 if (unlikely(ret != 0))
288 return ret;
289 }
290
291 if (ttm->caching_state == tt_cached)
292 ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
293
294 for (i = 0; i < ttm->num_pages; ++i) {
295 cur_page = ttm->pages[i];
296 if (likely(cur_page != NULL)) {
297 ret = ttm_tt_set_page_caching(cur_page, c_state);
298 if (unlikely(ret != 0))
299 goto out_err;
300 }
301 }
302
303 ttm->caching_state = c_state;
304
305 return 0;
306
307out_err:
308 for (j = 0; j < i; ++j) {
309 cur_page = ttm->pages[j];
310 if (likely(cur_page != NULL)) {
311 (void)ttm_tt_set_page_caching(cur_page,
312 ttm->caching_state);
313 }
314 }
315
316 return ret;
317}
318
319int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
320{
321 enum ttm_caching_state state;
322
323 if (placement & TTM_PL_FLAG_WC)
324 state = tt_wc;
325 else if (placement & TTM_PL_FLAG_UNCACHED)
326 state = tt_uncached;
327 else
328 state = tt_cached;
329
330 return ttm_tt_set_caching(ttm, state);
331}
332
333static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
334{
335 int i;
336 struct page *cur_page;
337 struct ttm_backend *be = ttm->be;
338
339 if (be)
340 be->func->clear(be);
341 (void)ttm_tt_set_caching(ttm, tt_cached);
342 for (i = 0; i < ttm->num_pages; ++i) {
343 cur_page = ttm->pages[i];
344 ttm->pages[i] = NULL;
345 if (cur_page) {
346 if (page_count(cur_page) != 1)
347 printk(KERN_ERR TTM_PFX
348 "Erroneous page count. "
349 "Leaking pages.\n");
350 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
351 PageHighMem(cur_page));
352 __free_page(cur_page);
353 }
354 }
355 ttm->state = tt_unpopulated;
356 ttm->first_himem_page = ttm->num_pages;
357 ttm->last_lomem_page = -1;
358}
359
360void ttm_tt_destroy(struct ttm_tt *ttm)
361{
362 struct ttm_backend *be;
363
364 if (unlikely(ttm == NULL))
365 return;
366
367 be = ttm->be;
368 if (likely(be != NULL)) {
369 be->func->destroy(be);
370 ttm->be = NULL;
371 }
372
373 if (likely(ttm->pages != NULL)) {
374 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
375 ttm_tt_free_user_pages(ttm);
376 else
377 ttm_tt_free_alloced_pages(ttm);
378
379 ttm_tt_free_page_directory(ttm);
380 }
381
382 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
383 ttm->swap_storage)
384 fput(ttm->swap_storage);
385
386 kfree(ttm);
387}
388
389int ttm_tt_set_user(struct ttm_tt *ttm,
390 struct task_struct *tsk,
391 unsigned long start, unsigned long num_pages)
392{
393 struct mm_struct *mm = tsk->mm;
394 int ret;
395 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
396 struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
397
398 BUG_ON(num_pages != ttm->num_pages);
399 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
400
401 /**
402 * Account user pages as lowmem pages for now.
403 */
404
405 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
406 false, false, false);
407 if (unlikely(ret != 0))
408 return ret;
409
410 down_read(&mm->mmap_sem);
411 ret = get_user_pages(tsk, mm, start, num_pages,
412 write, 0, ttm->pages, NULL);
413 up_read(&mm->mmap_sem);
414
415 if (ret != num_pages && write) {
416 ttm_tt_free_user_pages(ttm);
417 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
418 return -ENOMEM;
419 }
420
421 ttm->tsk = tsk;
422 ttm->start = start;
423 ttm->state = tt_unbound;
424
425 return 0;
426}
427
428struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
429 uint32_t page_flags, struct page *dummy_read_page)
430{
431 struct ttm_bo_driver *bo_driver = bdev->driver;
432 struct ttm_tt *ttm;
433
434 if (!bo_driver)
435 return NULL;
436
437 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
438 if (!ttm)
439 return NULL;
440
441 ttm->bdev = bdev;
442
443 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
444 ttm->first_himem_page = ttm->num_pages;
445 ttm->last_lomem_page = -1;
446 ttm->caching_state = tt_cached;
447 ttm->page_flags = page_flags;
448
449 ttm->dummy_read_page = dummy_read_page;
450
451 ttm_tt_alloc_page_directory(ttm);
452 if (!ttm->pages) {
453 ttm_tt_destroy(ttm);
454 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
455 return NULL;
456 }
457 ttm->be = bo_driver->create_ttm_backend_entry(bdev);
458 if (!ttm->be) {
459 ttm_tt_destroy(ttm);
460 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
461 return NULL;
462 }
463 ttm->state = tt_unpopulated;
464 return ttm;
465}
466
467void ttm_tt_unbind(struct ttm_tt *ttm)
468{
469 int ret;
470 struct ttm_backend *be = ttm->be;
471
472 if (ttm->state == tt_bound) {
473 ret = be->func->unbind(be);
474 BUG_ON(ret);
475 ttm->state = tt_unbound;
476 }
477}
478
479int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
480{
481 int ret = 0;
482 struct ttm_backend *be;
483
484 if (!ttm)
485 return -EINVAL;
486
487 if (ttm->state == tt_bound)
488 return 0;
489
490 be = ttm->be;
491
492 ret = ttm_tt_populate(ttm);
493 if (ret)
494 return ret;
495
496 ret = be->func->bind(be, bo_mem);
497 if (ret) {
498 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
499 return ret;
500 }
501
502 ttm->state = tt_bound;
503
504 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
505 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
506 return 0;
507}
508EXPORT_SYMBOL(ttm_tt_bind);
509
510static int ttm_tt_swapin(struct ttm_tt *ttm)
511{
512 struct address_space *swap_space;
513 struct file *swap_storage;
514 struct page *from_page;
515 struct page *to_page;
516 void *from_virtual;
517 void *to_virtual;
518 int i;
519 int ret;
520
521 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
522 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
523 ttm->num_pages);
524 if (unlikely(ret != 0))
525 return ret;
526
527 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
528 return 0;
529 }
530
531 swap_storage = ttm->swap_storage;
532 BUG_ON(swap_storage == NULL);
533
534 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
535
536 for (i = 0; i < ttm->num_pages; ++i) {
537 from_page = read_mapping_page(swap_space, i, NULL);
538 if (IS_ERR(from_page))
539 goto out_err;
540 to_page = __ttm_tt_get_page(ttm, i);
541 if (unlikely(to_page == NULL))
542 goto out_err;
543
544 preempt_disable();
545 from_virtual = kmap_atomic(from_page, KM_USER0);
546 to_virtual = kmap_atomic(to_page, KM_USER1);
547 memcpy(to_virtual, from_virtual, PAGE_SIZE);
548 kunmap_atomic(to_virtual, KM_USER1);
549 kunmap_atomic(from_virtual, KM_USER0);
550 preempt_enable();
551 page_cache_release(from_page);
552 }
553
554 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
555 fput(swap_storage);
556 ttm->swap_storage = NULL;
557 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
558
559 return 0;
560out_err:
561 ttm_tt_free_alloced_pages(ttm);
562 return -ENOMEM;
563}
564
565int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
566{
567 struct address_space *swap_space;
568 struct file *swap_storage;
569 struct page *from_page;
570 struct page *to_page;
571 void *from_virtual;
572 void *to_virtual;
573 int i;
574
575 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
576 BUG_ON(ttm->caching_state != tt_cached);
577
578 /*
579 * For user buffers, just unpin the pages, as there should be
580 * vma references.
581 */
582
583 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
584 ttm_tt_free_user_pages(ttm);
585 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
586 ttm->swap_storage = NULL;
587 return 0;
588 }
589
590 if (!persistant_swap_storage) {
591 swap_storage = shmem_file_setup("ttm swap",
592 ttm->num_pages << PAGE_SHIFT,
593 0);
594 if (unlikely(IS_ERR(swap_storage))) {
595 printk(KERN_ERR "Failed allocating swap storage.\n");
596 return -ENOMEM;
597 }
598 } else
599 swap_storage = persistant_swap_storage;
600
601 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
602
603 for (i = 0; i < ttm->num_pages; ++i) {
604 from_page = ttm->pages[i];
605 if (unlikely(from_page == NULL))
606 continue;
607 to_page = read_mapping_page(swap_space, i, NULL);
608 if (unlikely(to_page == NULL))
609 goto out_err;
610
611 preempt_disable();
612 from_virtual = kmap_atomic(from_page, KM_USER0);
613 to_virtual = kmap_atomic(to_page, KM_USER1);
614 memcpy(to_virtual, from_virtual, PAGE_SIZE);
615 kunmap_atomic(to_virtual, KM_USER1);
616 kunmap_atomic(from_virtual, KM_USER0);
617 preempt_enable();
618 set_page_dirty(to_page);
619 mark_page_accessed(to_page);
620 page_cache_release(to_page);
621 }
622
623 ttm_tt_free_alloced_pages(ttm);
624 ttm->swap_storage = swap_storage;
625 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
626 if (persistant_swap_storage)
627 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
628
629 return 0;
630out_err:
631 if (!persistant_swap_storage)
632 fput(swap_storage);
633
634 return -ENOMEM;
635}
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index e9b436d2d944..9e9421525fb9 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -850,8 +850,14 @@ static const struct file_operations hiddev_fops = {
850#endif 850#endif
851}; 851};
852 852
853static char *hiddev_nodename(struct device *dev)
854{
855 return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
856}
857
853static struct usb_class_driver hiddev_class = { 858static struct usb_class_driver hiddev_class = {
854 .name = "hiddev%d", 859 .name = "hiddev%d",
860 .nodename = hiddev_nodename,
855 .fops = &hiddev_fops, 861 .fops = &hiddev_fops,
856 .minor_base = HIDDEV_MINOR_BASE, 862 .minor_base = HIDDEV_MINOR_BASE,
857}; 863};
@@ -955,7 +961,6 @@ static int hiddev_usbd_probe(struct usb_interface *intf,
955 return -ENODEV; 961 return -ENODEV;
956} 962}
957 963
958
959static /* const */ struct usb_driver hiddev_driver = { 964static /* const */ struct usb_driver hiddev_driver = {
960 .name = "hiddev", 965 .name = "hiddev",
961 .probe = hiddev_usbd_probe, 966 .probe = hiddev_usbd_probe,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index f8090e137fef..2d5016691d40 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -950,6 +950,7 @@ config SENSORS_HDAPS
950config SENSORS_LIS3LV02D 950config SENSORS_LIS3LV02D
951 tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer" 951 tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer"
952 depends on ACPI && INPUT 952 depends on ACPI && INPUT
953 select INPUT_POLLDEV
953 select NEW_LEDS 954 select NEW_LEDS
954 select LEDS_CLASS 955 select LEDS_CLASS
955 default n 956 default n
@@ -977,6 +978,7 @@ config SENSORS_LIS3LV02D
977config SENSORS_LIS3_SPI 978config SENSORS_LIS3_SPI
978 tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)" 979 tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)"
979 depends on !ACPI && SPI_MASTER && INPUT 980 depends on !ACPI && SPI_MASTER && INPUT
981 select INPUT_POLLDEV
980 default n 982 default n
981 help 983 help
982 This driver provides support for the LIS3LV02Dx accelerometer connected 984 This driver provides support for the LIS3LV02Dx accelerometer connected
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index abca7e9f953b..6679854c85b0 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -27,9 +27,6 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/input.h>
31#include <linux/kthread.h>
32#include <linux/semaphore.h>
33#include <linux/delay.h> 30#include <linux/delay.h>
34#include <linux/wait.h> 31#include <linux/wait.h>
35#include <linux/poll.h> 32#include <linux/poll.h>
@@ -161,6 +158,7 @@ static struct axis_conversion lis3lv02d_axis_normal = {1, 2, 3};
161static struct axis_conversion lis3lv02d_axis_y_inverted = {1, -2, 3}; 158static struct axis_conversion lis3lv02d_axis_y_inverted = {1, -2, 3};
162static struct axis_conversion lis3lv02d_axis_x_inverted = {-1, 2, 3}; 159static struct axis_conversion lis3lv02d_axis_x_inverted = {-1, 2, 3};
163static struct axis_conversion lis3lv02d_axis_z_inverted = {1, 2, -3}; 160static struct axis_conversion lis3lv02d_axis_z_inverted = {1, 2, -3};
161static struct axis_conversion lis3lv02d_axis_xy_swap = {2, 1, 3};
164static struct axis_conversion lis3lv02d_axis_xy_rotated_left = {-2, 1, 3}; 162static struct axis_conversion lis3lv02d_axis_xy_rotated_left = {-2, 1, 3};
165static struct axis_conversion lis3lv02d_axis_xy_rotated_left_usd = {-2, 1, -3}; 163static struct axis_conversion lis3lv02d_axis_xy_rotated_left_usd = {-2, 1, -3};
166static struct axis_conversion lis3lv02d_axis_xy_swap_inverted = {-2, -1, 3}; 164static struct axis_conversion lis3lv02d_axis_xy_swap_inverted = {-2, -1, 3};
@@ -194,13 +192,16 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
194 AXIS_DMI_MATCH("NX9420", "HP Compaq nx9420", x_inverted), 192 AXIS_DMI_MATCH("NX9420", "HP Compaq nx9420", x_inverted),
195 AXIS_DMI_MATCH("NW9440", "HP Compaq nw9440", x_inverted), 193 AXIS_DMI_MATCH("NW9440", "HP Compaq nw9440", x_inverted),
196 AXIS_DMI_MATCH("NC2510", "HP Compaq 2510", y_inverted), 194 AXIS_DMI_MATCH("NC2510", "HP Compaq 2510", y_inverted),
195 AXIS_DMI_MATCH("NC2710", "HP Compaq 2710", xy_swap),
197 AXIS_DMI_MATCH("NC8510", "HP Compaq 8510", xy_swap_inverted), 196 AXIS_DMI_MATCH("NC8510", "HP Compaq 8510", xy_swap_inverted),
198 AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left), 197 AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left),
199 AXIS_DMI_MATCH("HP2140", "HP 2140", xy_swap_inverted), 198 AXIS_DMI_MATCH("HP2140", "HP 2140", xy_swap_inverted),
200 AXIS_DMI_MATCH("NC653x", "HP Compaq 653", xy_rotated_left_usd), 199 AXIS_DMI_MATCH("NC653x", "HP Compaq 653", xy_rotated_left_usd),
201 AXIS_DMI_MATCH("NC673x", "HP Compaq 673", xy_rotated_left_usd), 200 AXIS_DMI_MATCH("NC673x", "HP Compaq 673", xy_rotated_left_usd),
202 AXIS_DMI_MATCH("NC651xx", "HP Compaq 651", xy_rotated_right), 201 AXIS_DMI_MATCH("NC651xx", "HP Compaq 651", xy_rotated_right),
203 AXIS_DMI_MATCH("NC671xx", "HP Compaq 671", xy_swap_yz_inverted), 202 AXIS_DMI_MATCH("NC6710x", "HP Compaq 6710", xy_swap_yz_inverted),
203 AXIS_DMI_MATCH("NC6715x", "HP Compaq 6715", y_inverted),
204 AXIS_DMI_MATCH("NC693xx", "HP EliteBook 693", xy_rotated_right),
204 /* Intel-based HP Pavilion dv5 */ 205 /* Intel-based HP Pavilion dv5 */
205 AXIS_DMI_MATCH2("HPDV5_I", 206 AXIS_DMI_MATCH2("HPDV5_I",
206 PRODUCT_NAME, "HP Pavilion dv5", 207 PRODUCT_NAME, "HP Pavilion dv5",
@@ -216,7 +217,6 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
216 { NULL, } 217 { NULL, }
217/* Laptop models without axis info (yet): 218/* Laptop models without axis info (yet):
218 * "NC6910" "HP Compaq 6910" 219 * "NC6910" "HP Compaq 6910"
219 * HP Compaq 8710x Notebook PC / Mobile Workstation
220 * "NC2400" "HP Compaq nc2400" 220 * "NC2400" "HP Compaq nc2400"
221 * "NX74x0" "HP Compaq nx74" 221 * "NX74x0" "HP Compaq nx74"
222 * "NX6325" "HP Compaq nx6325" 222 * "NX6325" "HP Compaq nx6325"
@@ -324,7 +324,7 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
324 flush_work(&hpled_led.work); 324 flush_work(&hpled_led.work);
325 led_classdev_unregister(&hpled_led.led_classdev); 325 led_classdev_unregister(&hpled_led.led_classdev);
326 326
327 return lis3lv02d_remove_fs(); 327 return lis3lv02d_remove_fs(&lis3_dev);
328} 328}
329 329
330 330
@@ -338,13 +338,7 @@ static int lis3lv02d_suspend(struct acpi_device *device, pm_message_t state)
338 338
339static int lis3lv02d_resume(struct acpi_device *device) 339static int lis3lv02d_resume(struct acpi_device *device)
340{ 340{
341 /* put back the device in the right state (ACPI might turn it on) */ 341 lis3lv02d_poweron(&lis3_dev);
342 mutex_lock(&lis3_dev.lock);
343 if (lis3_dev.usage > 0)
344 lis3lv02d_poweron(&lis3_dev);
345 else
346 lis3lv02d_poweroff(&lis3_dev);
347 mutex_unlock(&lis3_dev.lock);
348 return 0; 342 return 0;
349} 343}
350#else 344#else
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 778eb7795983..271338bdb6be 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -27,9 +27,7 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/input.h> 30#include <linux/input-polldev.h>
31#include <linux/kthread.h>
32#include <linux/semaphore.h>
33#include <linux/delay.h> 31#include <linux/delay.h>
34#include <linux/wait.h> 32#include <linux/wait.h>
35#include <linux/poll.h> 33#include <linux/poll.h>
@@ -105,56 +103,39 @@ static void lis3lv02d_get_xyz(struct lis3lv02d *lis3, int *x, int *y, int *z)
105{ 103{
106 int position[3]; 104 int position[3];
107 105
108 position[0] = lis3_dev.read_data(lis3, OUTX); 106 position[0] = lis3->read_data(lis3, OUTX);
109 position[1] = lis3_dev.read_data(lis3, OUTY); 107 position[1] = lis3->read_data(lis3, OUTY);
110 position[2] = lis3_dev.read_data(lis3, OUTZ); 108 position[2] = lis3->read_data(lis3, OUTZ);
111 109
112 *x = lis3lv02d_get_axis(lis3_dev.ac.x, position); 110 *x = lis3lv02d_get_axis(lis3->ac.x, position);
113 *y = lis3lv02d_get_axis(lis3_dev.ac.y, position); 111 *y = lis3lv02d_get_axis(lis3->ac.y, position);
114 *z = lis3lv02d_get_axis(lis3_dev.ac.z, position); 112 *z = lis3lv02d_get_axis(lis3->ac.z, position);
115} 113}
116 114
117void lis3lv02d_poweroff(struct lis3lv02d *lis3) 115void lis3lv02d_poweroff(struct lis3lv02d *lis3)
118{ 116{
119 lis3_dev.is_on = 0; 117 /* disable X,Y,Z axis and power down */
118 lis3->write(lis3, CTRL_REG1, 0x00);
120} 119}
121EXPORT_SYMBOL_GPL(lis3lv02d_poweroff); 120EXPORT_SYMBOL_GPL(lis3lv02d_poweroff);
122 121
123void lis3lv02d_poweron(struct lis3lv02d *lis3) 122void lis3lv02d_poweron(struct lis3lv02d *lis3)
124{ 123{
125 lis3_dev.is_on = 1; 124 u8 reg;
126 lis3_dev.init(lis3);
127}
128EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
129 125
130/* 126 lis3->init(lis3);
131 * To be called before starting to use the device. It makes sure that the
132 * device will always be on until a call to lis3lv02d_decrease_use(). Not to be
133 * used from interrupt context.
134 */
135static void lis3lv02d_increase_use(struct lis3lv02d *dev)
136{
137 mutex_lock(&dev->lock);
138 dev->usage++;
139 if (dev->usage == 1) {
140 if (!dev->is_on)
141 lis3lv02d_poweron(dev);
142 }
143 mutex_unlock(&dev->lock);
144}
145 127
146/* 128 /*
147 * To be called whenever a usage of the device is stopped. 129 * Common configuration
148 * It will make sure to turn off the device when there is not usage. 130 * BDU: LSB and MSB values are not updated until both have been read.
149 */ 131 * So the value read will always be correct.
150static void lis3lv02d_decrease_use(struct lis3lv02d *dev) 132 */
151{ 133 lis3->read(lis3, CTRL_REG2, &reg);
152 mutex_lock(&dev->lock); 134 reg |= CTRL2_BDU;
153 dev->usage--; 135 lis3->write(lis3, CTRL_REG2, reg);
154 if (dev->usage == 0)
155 lis3lv02d_poweroff(dev);
156 mutex_unlock(&dev->lock);
157} 136}
137EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
138
158 139
159static irqreturn_t lis302dl_interrupt(int irq, void *dummy) 140static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
160{ 141{
@@ -198,15 +179,12 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
198 printk(KERN_ERR DRIVER_NAME ": IRQ%d allocation failed\n", lis3_dev.irq); 179 printk(KERN_ERR DRIVER_NAME ": IRQ%d allocation failed\n", lis3_dev.irq);
199 return -EBUSY; 180 return -EBUSY;
200 } 181 }
201 lis3lv02d_increase_use(&lis3_dev);
202 printk("lis3: registered interrupt %d\n", lis3_dev.irq);
203 return 0; 182 return 0;
204} 183}
205 184
206static int lis3lv02d_misc_release(struct inode *inode, struct file *file) 185static int lis3lv02d_misc_release(struct inode *inode, struct file *file)
207{ 186{
208 fasync_helper(-1, file, 0, &lis3_dev.async_queue); 187 fasync_helper(-1, file, 0, &lis3_dev.async_queue);
209 lis3lv02d_decrease_use(&lis3_dev);
210 free_irq(lis3_dev.irq, &lis3_dev); 188 free_irq(lis3_dev.irq, &lis3_dev);
211 clear_bit(0, &lis3_dev.misc_opened); /* release the device */ 189 clear_bit(0, &lis3_dev.misc_opened); /* release the device */
212 return 0; 190 return 0;
@@ -290,46 +268,16 @@ static struct miscdevice lis3lv02d_misc_device = {
290 .fops = &lis3lv02d_misc_fops, 268 .fops = &lis3lv02d_misc_fops,
291}; 269};
292 270
293/** 271static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
294 * lis3lv02d_joystick_kthread - Kthread polling function
295 * @data: unused - here to conform to threadfn prototype
296 */
297static int lis3lv02d_joystick_kthread(void *data)
298{ 272{
299 int x, y, z; 273 int x, y, z;
300 274
301 while (!kthread_should_stop()) { 275 lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
302 lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z); 276 input_report_abs(pidev->input, ABS_X, x - lis3_dev.xcalib);
303 input_report_abs(lis3_dev.idev, ABS_X, x - lis3_dev.xcalib); 277 input_report_abs(pidev->input, ABS_Y, y - lis3_dev.ycalib);
304 input_report_abs(lis3_dev.idev, ABS_Y, y - lis3_dev.ycalib); 278 input_report_abs(pidev->input, ABS_Z, z - lis3_dev.zcalib);
305 input_report_abs(lis3_dev.idev, ABS_Z, z - lis3_dev.zcalib);
306
307 input_sync(lis3_dev.idev);
308
309 try_to_freeze();
310 msleep_interruptible(MDPS_POLL_INTERVAL);
311 }
312
313 return 0;
314}
315
316static int lis3lv02d_joystick_open(struct input_dev *input)
317{
318 lis3lv02d_increase_use(&lis3_dev);
319 lis3_dev.kthread = kthread_run(lis3lv02d_joystick_kthread, NULL, "klis3lv02d");
320 if (IS_ERR(lis3_dev.kthread)) {
321 lis3lv02d_decrease_use(&lis3_dev);
322 return PTR_ERR(lis3_dev.kthread);
323 }
324
325 return 0;
326} 279}
327 280
328static void lis3lv02d_joystick_close(struct input_dev *input)
329{
330 kthread_stop(lis3_dev.kthread);
331 lis3lv02d_decrease_use(&lis3_dev);
332}
333 281
334static inline void lis3lv02d_calibrate_joystick(void) 282static inline void lis3lv02d_calibrate_joystick(void)
335{ 283{
@@ -339,33 +287,36 @@ static inline void lis3lv02d_calibrate_joystick(void)
339 287
340int lis3lv02d_joystick_enable(void) 288int lis3lv02d_joystick_enable(void)
341{ 289{
290 struct input_dev *input_dev;
342 int err; 291 int err;
343 292
344 if (lis3_dev.idev) 293 if (lis3_dev.idev)
345 return -EINVAL; 294 return -EINVAL;
346 295
347 lis3_dev.idev = input_allocate_device(); 296 lis3_dev.idev = input_allocate_polled_device();
348 if (!lis3_dev.idev) 297 if (!lis3_dev.idev)
349 return -ENOMEM; 298 return -ENOMEM;
350 299
300 lis3_dev.idev->poll = lis3lv02d_joystick_poll;
301 lis3_dev.idev->poll_interval = MDPS_POLL_INTERVAL;
302 input_dev = lis3_dev.idev->input;
303
351 lis3lv02d_calibrate_joystick(); 304 lis3lv02d_calibrate_joystick();
352 305
353 lis3_dev.idev->name = "ST LIS3LV02DL Accelerometer"; 306 input_dev->name = "ST LIS3LV02DL Accelerometer";
354 lis3_dev.idev->phys = DRIVER_NAME "/input0"; 307 input_dev->phys = DRIVER_NAME "/input0";
355 lis3_dev.idev->id.bustype = BUS_HOST; 308 input_dev->id.bustype = BUS_HOST;
356 lis3_dev.idev->id.vendor = 0; 309 input_dev->id.vendor = 0;
357 lis3_dev.idev->dev.parent = &lis3_dev.pdev->dev; 310 input_dev->dev.parent = &lis3_dev.pdev->dev;
358 lis3_dev.idev->open = lis3lv02d_joystick_open;
359 lis3_dev.idev->close = lis3lv02d_joystick_close;
360 311
361 set_bit(EV_ABS, lis3_dev.idev->evbit); 312 set_bit(EV_ABS, input_dev->evbit);
362 input_set_abs_params(lis3_dev.idev, ABS_X, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3); 313 input_set_abs_params(input_dev, ABS_X, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
363 input_set_abs_params(lis3_dev.idev, ABS_Y, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3); 314 input_set_abs_params(input_dev, ABS_Y, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
364 input_set_abs_params(lis3_dev.idev, ABS_Z, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3); 315 input_set_abs_params(input_dev, ABS_Z, -lis3_dev.mdps_max_val, lis3_dev.mdps_max_val, 3, 3);
365 316
366 err = input_register_device(lis3_dev.idev); 317 err = input_register_polled_device(lis3_dev.idev);
367 if (err) { 318 if (err) {
368 input_free_device(lis3_dev.idev); 319 input_free_polled_device(lis3_dev.idev);
369 lis3_dev.idev = NULL; 320 lis3_dev.idev = NULL;
370 } 321 }
371 322
@@ -378,8 +329,9 @@ void lis3lv02d_joystick_disable(void)
378 if (!lis3_dev.idev) 329 if (!lis3_dev.idev)
379 return; 330 return;
380 331
381 misc_deregister(&lis3lv02d_misc_device); 332 if (lis3_dev.irq)
382 input_unregister_device(lis3_dev.idev); 333 misc_deregister(&lis3lv02d_misc_device);
334 input_unregister_polled_device(lis3_dev.idev);
383 lis3_dev.idev = NULL; 335 lis3_dev.idev = NULL;
384} 336}
385EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable); 337EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
@@ -390,9 +342,7 @@ static ssize_t lis3lv02d_position_show(struct device *dev,
390{ 342{
391 int x, y, z; 343 int x, y, z;
392 344
393 lis3lv02d_increase_use(&lis3_dev);
394 lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z); 345 lis3lv02d_get_xyz(&lis3_dev, &x, &y, &z);
395 lis3lv02d_decrease_use(&lis3_dev);
396 return sprintf(buf, "(%d,%d,%d)\n", x, y, z); 346 return sprintf(buf, "(%d,%d,%d)\n", x, y, z);
397} 347}
398 348
@@ -406,9 +356,7 @@ static ssize_t lis3lv02d_calibrate_store(struct device *dev,
406 struct device_attribute *attr, 356 struct device_attribute *attr,
407 const char *buf, size_t count) 357 const char *buf, size_t count)
408{ 358{
409 lis3lv02d_increase_use(&lis3_dev);
410 lis3lv02d_calibrate_joystick(); 359 lis3lv02d_calibrate_joystick();
411 lis3lv02d_decrease_use(&lis3_dev);
412 return count; 360 return count;
413} 361}
414 362
@@ -420,9 +368,7 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
420 u8 ctrl; 368 u8 ctrl;
421 int val; 369 int val;
422 370
423 lis3lv02d_increase_use(&lis3_dev);
424 lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl); 371 lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
425 lis3lv02d_decrease_use(&lis3_dev);
426 val = (ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4; 372 val = (ctrl & (CTRL1_DF0 | CTRL1_DF1)) >> 4;
427 return sprintf(buf, "%d\n", lis3lv02dl_df_val[val]); 373 return sprintf(buf, "%d\n", lis3lv02dl_df_val[val]);
428} 374}
@@ -446,17 +392,17 @@ static struct attribute_group lis3lv02d_attribute_group = {
446 392
447static int lis3lv02d_add_fs(struct lis3lv02d *lis3) 393static int lis3lv02d_add_fs(struct lis3lv02d *lis3)
448{ 394{
449 lis3_dev.pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0); 395 lis3->pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
450 if (IS_ERR(lis3_dev.pdev)) 396 if (IS_ERR(lis3->pdev))
451 return PTR_ERR(lis3_dev.pdev); 397 return PTR_ERR(lis3->pdev);
452 398
453 return sysfs_create_group(&lis3_dev.pdev->dev.kobj, &lis3lv02d_attribute_group); 399 return sysfs_create_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
454} 400}
455 401
456int lis3lv02d_remove_fs(void) 402int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
457{ 403{
458 sysfs_remove_group(&lis3_dev.pdev->dev.kobj, &lis3lv02d_attribute_group); 404 sysfs_remove_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
459 platform_device_unregister(lis3_dev.pdev); 405 platform_device_unregister(lis3->pdev);
460 return 0; 406 return 0;
461} 407}
462EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs); 408EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
@@ -482,18 +428,35 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
482 break; 428 break;
483 default: 429 default:
484 printk(KERN_ERR DRIVER_NAME 430 printk(KERN_ERR DRIVER_NAME
485 ": unknown sensor type 0x%X\n", lis3_dev.whoami); 431 ": unknown sensor type 0x%X\n", dev->whoami);
486 return -EINVAL; 432 return -EINVAL;
487 } 433 }
488 434
489 mutex_init(&dev->lock);
490 lis3lv02d_add_fs(dev); 435 lis3lv02d_add_fs(dev);
491 lis3lv02d_increase_use(dev); 436 lis3lv02d_poweron(dev);
492 437
493 if (lis3lv02d_joystick_enable()) 438 if (lis3lv02d_joystick_enable())
494 printk(KERN_ERR DRIVER_NAME ": joystick initialization failed\n"); 439 printk(KERN_ERR DRIVER_NAME ": joystick initialization failed\n");
495 440
496 printk("lis3_init_device: irq %d\n", dev->irq); 441 /* passing in platform specific data is purely optional and only
442 * used by the SPI transport layer at the moment */
443 if (dev->pdata) {
444 struct lis3lv02d_platform_data *p = dev->pdata;
445
446 if (p->click_flags && (dev->whoami == LIS_SINGLE_ID)) {
447 dev->write(dev, CLICK_CFG, p->click_flags);
448 dev->write(dev, CLICK_TIMELIMIT, p->click_time_limit);
449 dev->write(dev, CLICK_LATENCY, p->click_latency);
450 dev->write(dev, CLICK_WINDOW, p->click_window);
451 dev->write(dev, CLICK_THSZ, p->click_thresh_z & 0xf);
452 dev->write(dev, CLICK_THSY_X,
453 (p->click_thresh_x & 0xf) |
454 (p->click_thresh_y << 4));
455 }
456
457 if (p->irq_cfg)
458 dev->write(dev, CTRL_REG3, p->irq_cfg);
459 }
497 460
498 /* bail if we did not get an IRQ from the bus layer */ 461 /* bail if we did not get an IRQ from the bus layer */
499 if (!dev->irq) { 462 if (!dev->irq) {
@@ -502,11 +465,9 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
502 goto out; 465 goto out;
503 } 466 }
504 467
505 printk("lis3: registering device\n");
506 if (misc_register(&lis3lv02d_misc_device)) 468 if (misc_register(&lis3lv02d_misc_device))
507 printk(KERN_ERR DRIVER_NAME ": misc_register failed\n"); 469 printk(KERN_ERR DRIVER_NAME ": misc_register failed\n");
508out: 470out:
509 lis3lv02d_decrease_use(dev);
510 return 0; 471 return 0;
511} 472}
512EXPORT_SYMBOL_GPL(lis3lv02d_init_device); 473EXPORT_SYMBOL_GPL(lis3lv02d_init_device);
diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
index 745ec96806d4..e320e2f511f1 100644
--- a/drivers/hwmon/lis3lv02d.h
+++ b/drivers/hwmon/lis3lv02d.h
@@ -18,6 +18,8 @@
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21#include <linux/platform_device.h>
22#include <linux/input-polldev.h>
21 23
22/* 24/*
23 * The actual chip is STMicroelectronics LIS3LV02DL or LIS3LV02DQ that seems to 25 * The actual chip is STMicroelectronics LIS3LV02DL or LIS3LV02DQ that seems to
@@ -27,12 +29,14 @@
27 * They can also be connected via I²C. 29 * They can also be connected via I²C.
28 */ 30 */
29 31
32#include <linux/lis3lv02d.h>
33
30/* 2-byte registers */ 34/* 2-byte registers */
31#define LIS_DOUBLE_ID 0x3A /* LIS3LV02D[LQ] */ 35#define LIS_DOUBLE_ID 0x3A /* LIS3LV02D[LQ] */
32/* 1-byte registers */ 36/* 1-byte registers */
33#define LIS_SINGLE_ID 0x3B /* LIS[32]02DL and others */ 37#define LIS_SINGLE_ID 0x3B /* LIS[32]02DL and others */
34 38
35enum lis3lv02d_reg { 39enum lis3_reg {
36 WHO_AM_I = 0x0F, 40 WHO_AM_I = 0x0F,
37 OFFSET_X = 0x16, 41 OFFSET_X = 0x16,
38 OFFSET_Y = 0x17, 42 OFFSET_Y = 0x17,
@@ -60,6 +64,19 @@ enum lis3lv02d_reg {
60 FF_WU_THS_L = 0x34, 64 FF_WU_THS_L = 0x34,
61 FF_WU_THS_H = 0x35, 65 FF_WU_THS_H = 0x35,
62 FF_WU_DURATION = 0x36, 66 FF_WU_DURATION = 0x36,
67};
68
69enum lis302d_reg {
70 CLICK_CFG = 0x38,
71 CLICK_SRC = 0x39,
72 CLICK_THSY_X = 0x3B,
73 CLICK_THSZ = 0x3C,
74 CLICK_TIMELIMIT = 0x3D,
75 CLICK_LATENCY = 0x3E,
76 CLICK_WINDOW = 0x3F,
77};
78
79enum lis3lv02d_reg {
63 DD_CFG = 0x38, 80 DD_CFG = 0x38,
64 DD_SRC = 0x39, 81 DD_SRC = 0x39,
65 DD_ACK = 0x3A, 82 DD_ACK = 0x3A,
@@ -169,22 +186,20 @@ struct lis3lv02d {
169 s16 (*read_data) (struct lis3lv02d *lis3, int reg); 186 s16 (*read_data) (struct lis3lv02d *lis3, int reg);
170 int mdps_max_val; 187 int mdps_max_val;
171 188
172 struct input_dev *idev; /* input device */ 189 struct input_polled_dev *idev; /* input device */
173 struct task_struct *kthread; /* kthread for input */
174 struct mutex lock;
175 struct platform_device *pdev; /* platform device */ 190 struct platform_device *pdev; /* platform device */
176 atomic_t count; /* interrupt count after last read */ 191 atomic_t count; /* interrupt count after last read */
177 int xcalib; /* calibrated null value for x */ 192 int xcalib; /* calibrated null value for x */
178 int ycalib; /* calibrated null value for y */ 193 int ycalib; /* calibrated null value for y */
179 int zcalib; /* calibrated null value for z */ 194 int zcalib; /* calibrated null value for z */
180 unsigned char is_on; /* whether the device is on or off */
181 unsigned char usage; /* usage counter */
182 struct axis_conversion ac; /* hw -> logical axis */ 195 struct axis_conversion ac; /* hw -> logical axis */
183 196
184 u32 irq; /* IRQ number */ 197 u32 irq; /* IRQ number */
185 struct fasync_struct *async_queue; /* queue for the misc device */ 198 struct fasync_struct *async_queue; /* queue for the misc device */
186 wait_queue_head_t misc_wait; /* Wait queue for the misc device */ 199 wait_queue_head_t misc_wait; /* Wait queue for the misc device */
187 unsigned long misc_opened; /* bit0: whether the device is open */ 200 unsigned long misc_opened; /* bit0: whether the device is open */
201
202 struct lis3lv02d_platform_data *pdata; /* for passing board config */
188}; 203};
189 204
190int lis3lv02d_init_device(struct lis3lv02d *lis3); 205int lis3lv02d_init_device(struct lis3lv02d *lis3);
@@ -192,6 +207,6 @@ int lis3lv02d_joystick_enable(void);
192void lis3lv02d_joystick_disable(void); 207void lis3lv02d_joystick_disable(void);
193void lis3lv02d_poweroff(struct lis3lv02d *lis3); 208void lis3lv02d_poweroff(struct lis3lv02d *lis3);
194void lis3lv02d_poweron(struct lis3lv02d *lis3); 209void lis3lv02d_poweron(struct lis3lv02d *lis3);
195int lis3lv02d_remove_fs(void); 210int lis3lv02d_remove_fs(struct lis3lv02d *lis3);
196 211
197extern struct lis3lv02d lis3_dev; 212extern struct lis3lv02d lis3_dev;
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index 07ae74b0e191..3827ff04485f 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -72,6 +72,7 @@ static int __devinit lis302dl_spi_probe(struct spi_device *spi)
72 lis3_dev.write = lis3_spi_write; 72 lis3_dev.write = lis3_spi_write;
73 lis3_dev.irq = spi->irq; 73 lis3_dev.irq = spi->irq;
74 lis3_dev.ac = lis3lv02d_axis_normal; 74 lis3_dev.ac = lis3lv02d_axis_normal;
75 lis3_dev.pdata = spi->dev.platform_data;
75 spi_set_drvdata(spi, &lis3_dev); 76 spi_set_drvdata(spi, &lis3_dev);
76 77
77 ret = lis3lv02d_init_device(&lis3_dev); 78 ret = lis3lv02d_init_device(&lis3_dev);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 0d04d3ebfc2d..3c259ee7ddda 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -513,6 +513,19 @@ config I2C_SIMTEC
513 This driver can also be built as a module. If so, the module 513 This driver can also be built as a module. If so, the module
514 will be called i2c-simtec. 514 will be called i2c-simtec.
515 515
516config I2C_STU300
517 tristate "ST Microelectronics DDC I2C interface"
518 depends on MACH_U300
519 default y if MACH_U300
520 help
521 If you say yes to this option, support will be included for the
522 I2C interface from ST Microelectronics simply called "DDC I2C"
523 supporting both I2C and DDC, used in e.g. the U300 series
524 mobile platforms.
525
526 This driver can also be built as a module. If so, the module
527 will be called i2c-stu300.
528
516config I2C_VERSATILE 529config I2C_VERSATILE
517 tristate "ARM Versatile/Realview I2C bus support" 530 tristate "ARM Versatile/Realview I2C bus support"
518 depends on ARCH_VERSATILE || ARCH_REALVIEW 531 depends on ARCH_VERSATILE || ARCH_REALVIEW
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 776acb6403a7..edeabf003106 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_I2C_S6000) += i2c-s6000.o
48obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o 48obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
49obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o 49obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
50obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o 50obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
51obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
51obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o 52obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
52 53
53# External I2C/SMBus adapter drivers 54# External I2C/SMBus adapter drivers
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 67d9dc5b351b..06e1ecb4919f 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -200,10 +200,10 @@ static int __devinit at91_i2c_probe(struct platform_device *pdev)
200 if (!res) 200 if (!res)
201 return -ENXIO; 201 return -ENXIO;
202 202
203 if (!request_mem_region(res->start, res->end - res->start + 1, "at91_i2c")) 203 if (!request_mem_region(res->start, resource_size(res), "at91_i2c"))
204 return -EBUSY; 204 return -EBUSY;
205 205
206 twi_base = ioremap(res->start, res->end - res->start + 1); 206 twi_base = ioremap(res->start, resource_size(res));
207 if (!twi_base) { 207 if (!twi_base) {
208 rc = -ENOMEM; 208 rc = -ENOMEM;
209 goto fail0; 209 goto fail0;
@@ -252,7 +252,7 @@ fail2:
252fail1: 252fail1:
253 iounmap(twi_base); 253 iounmap(twi_base);
254fail0: 254fail0:
255 release_mem_region(res->start, res->end - res->start + 1); 255 release_mem_region(res->start, resource_size(res));
256 256
257 return rc; 257 return rc;
258} 258}
@@ -268,7 +268,7 @@ static int __devexit at91_i2c_remove(struct platform_device *pdev)
268 268
269 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 269 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
270 iounmap(twi_base); 270 iounmap(twi_base);
271 release_mem_region(res->start, res->end - res->start + 1); 271 release_mem_region(res->start, resource_size(res));
272 272
273 clk_disable(twi_clk); /* disable peripheral clock */ 273 clk_disable(twi_clk); /* disable peripheral clock */
274 clk_put(twi_clk); 274 clk_put(twi_clk);
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index f78ce523e3db..532828bc50e6 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -389,7 +389,7 @@ i2c_au1550_probe(struct platform_device *pdev)
389 goto out; 389 goto out;
390 } 390 }
391 391
392 priv->ioarea = request_mem_region(r->start, r->end - r->start + 1, 392 priv->ioarea = request_mem_region(r->start, resource_size(r),
393 pdev->name); 393 pdev->name);
394 if (!priv->ioarea) { 394 if (!priv->ioarea) {
395 ret = -EBUSY; 395 ret = -EBUSY;
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 26d8987e69bf..b309ac2c3d5c 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -12,6 +12,7 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/i2c.h> 14#include <linux/i2c.h>
15#include <linux/io.h>
15#include <linux/mm.h> 16#include <linux/mm.h>
16#include <linux/timer.h> 17#include <linux/timer.h>
17#include <linux/spinlock.h> 18#include <linux/spinlock.h>
@@ -651,7 +652,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
651 goto out_error_get_res; 652 goto out_error_get_res;
652 } 653 }
653 654
654 iface->regs_base = ioremap(res->start, res->end - res->start + 1); 655 iface->regs_base = ioremap(res->start, resource_size(res));
655 if (iface->regs_base == NULL) { 656 if (iface->regs_base == NULL) {
656 dev_err(&pdev->dev, "Cannot map IO\n"); 657 dev_err(&pdev->dev, "Cannot map IO\n");
657 rc = -ENXIO; 658 rc = -ENXIO;
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index e5a8dae4a289..87ecace415da 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -373,7 +373,7 @@ static int __devinit highlander_i2c_probe(struct platform_device *pdev)
373 if (unlikely(!dev)) 373 if (unlikely(!dev))
374 return -ENOMEM; 374 return -ENOMEM;
375 375
376 dev->base = ioremap_nocache(res->start, res->end - res->start + 1); 376 dev->base = ioremap_nocache(res->start, resource_size(res));
377 if (unlikely(!dev->base)) { 377 if (unlikely(!dev->base)) {
378 ret = -ENXIO; 378 ret = -ENXIO;
379 goto err; 379 goto err;
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 5a4945d1dba4..c3869d94ad42 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -469,7 +469,7 @@ mv64xxx_i2c_map_regs(struct platform_device *pd,
469 if (!r) 469 if (!r)
470 return -ENODEV; 470 return -ENODEV;
471 471
472 size = r->end - r->start + 1; 472 size = resource_size(r);
473 473
474 if (!request_mem_region(r->start, size, drv_data->adapter.name)) 474 if (!request_mem_region(r->start, size, drv_data->adapter.name))
475 return -EBUSY; 475 return -EBUSY;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 3542c6ba98f1..0dabe643ec51 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -234,14 +234,14 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
234 if (!i2c) 234 if (!i2c)
235 return -ENOMEM; 235 return -ENOMEM;
236 236
237 if (!request_mem_region(res->start, res->end - res->start + 1, 237 if (!request_mem_region(res->start, resource_size(res),
238 pdev->name)) { 238 pdev->name)) {
239 dev_err(&pdev->dev, "Memory region busy\n"); 239 dev_err(&pdev->dev, "Memory region busy\n");
240 ret = -EBUSY; 240 ret = -EBUSY;
241 goto request_mem_failed; 241 goto request_mem_failed;
242 } 242 }
243 243
244 i2c->base = ioremap(res->start, res->end - res->start + 1); 244 i2c->base = ioremap(res->start, resource_size(res));
245 if (!i2c->base) { 245 if (!i2c->base) {
246 dev_err(&pdev->dev, "Unable to map registers\n"); 246 dev_err(&pdev->dev, "Unable to map registers\n");
247 ret = -EIO; 247 ret = -EIO;
@@ -283,7 +283,7 @@ add_adapter_failed:
283request_irq_failed: 283request_irq_failed:
284 iounmap(i2c->base); 284 iounmap(i2c->base);
285map_failed: 285map_failed:
286 release_mem_region(res->start, res->end - res->start + 1); 286 release_mem_region(res->start, resource_size(res));
287request_mem_failed: 287request_mem_failed:
288 kfree(i2c); 288 kfree(i2c);
289 289
@@ -311,7 +311,7 @@ static int __devexit ocores_i2c_remove(struct platform_device* pdev)
311 311
312 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 312 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
313 if (res) 313 if (res)
314 release_mem_region(res->start, res->end - res->start + 1); 314 release_mem_region(res->start, resource_size(res));
315 315
316 kfree(i2c); 316 kfree(i2c);
317 317
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index c73475dd0fba..b606db85525d 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -828,7 +828,7 @@ omap_i2c_probe(struct platform_device *pdev)
828 dev->idle = 1; 828 dev->idle = 1;
829 dev->dev = &pdev->dev; 829 dev->dev = &pdev->dev;
830 dev->irq = irq->start; 830 dev->irq = irq->start;
831 dev->base = ioremap(mem->start, mem->end - mem->start + 1); 831 dev->base = ioremap(mem->start, resource_size(mem));
832 if (!dev->base) { 832 if (!dev->base) {
833 r = -ENOMEM; 833 r = -ENOMEM;
834 goto err_free_mem; 834 goto err_free_mem;
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 7b23891b7d59..c4df9d411cd5 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -27,8 +27,6 @@
27#include <asm/irq.h> 27#include <asm/irq.h>
28#include <asm/io.h> 28#include <asm/io.h>
29 29
30#define res_len(r) ((r)->end - (r)->start + 1)
31
32struct i2c_pca_pf_data { 30struct i2c_pca_pf_data {
33 void __iomem *reg_base; 31 void __iomem *reg_base;
34 int irq; /* if 0, use polling */ 32 int irq; /* if 0, use polling */
@@ -148,7 +146,7 @@ static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
148 goto e_print; 146 goto e_print;
149 } 147 }
150 148
151 if (!request_mem_region(res->start, res_len(res), res->name)) { 149 if (!request_mem_region(res->start, resource_size(res), res->name)) {
152 ret = -ENOMEM; 150 ret = -ENOMEM;
153 goto e_print; 151 goto e_print;
154 } 152 }
@@ -161,13 +159,13 @@ static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
161 159
162 init_waitqueue_head(&i2c->wait); 160 init_waitqueue_head(&i2c->wait);
163 161
164 i2c->reg_base = ioremap(res->start, res_len(res)); 162 i2c->reg_base = ioremap(res->start, resource_size(res));
165 if (!i2c->reg_base) { 163 if (!i2c->reg_base) {
166 ret = -ENOMEM; 164 ret = -ENOMEM;
167 goto e_remap; 165 goto e_remap;
168 } 166 }
169 i2c->io_base = res->start; 167 i2c->io_base = res->start;
170 i2c->io_size = res_len(res); 168 i2c->io_size = resource_size(res);
171 i2c->irq = irq; 169 i2c->irq = irq;
172 170
173 i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0; 171 i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0;
@@ -250,7 +248,7 @@ e_reqirq:
250e_remap: 248e_remap:
251 kfree(i2c); 249 kfree(i2c);
252e_alloc: 250e_alloc:
253 release_mem_region(res->start, res_len(res)); 251 release_mem_region(res->start, resource_size(res));
254e_print: 252e_print:
255 printk(KERN_ERR "Registering PCA9564/PCA9665 FAILED! (%d)\n", ret); 253 printk(KERN_ERR "Registering PCA9564/PCA9665 FAILED! (%d)\n", ret);
256 return ret; 254 return ret;
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 0bdb2d7f0570..7b57d5f267e1 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -283,7 +283,7 @@ static int __devinit pmcmsptwi_probe(struct platform_device *pldev)
283 } 283 }
284 284
285 /* reserve the memory region */ 285 /* reserve the memory region */
286 if (!request_mem_region(res->start, res->end - res->start + 1, 286 if (!request_mem_region(res->start, resource_size(res),
287 pldev->name)) { 287 pldev->name)) {
288 dev_err(&pldev->dev, 288 dev_err(&pldev->dev,
289 "Unable to get memory/io address region 0x%08x\n", 289 "Unable to get memory/io address region 0x%08x\n",
@@ -294,7 +294,7 @@ static int __devinit pmcmsptwi_probe(struct platform_device *pldev)
294 294
295 /* remap the memory */ 295 /* remap the memory */
296 pmcmsptwi_data.iobase = ioremap_nocache(res->start, 296 pmcmsptwi_data.iobase = ioremap_nocache(res->start,
297 res->end - res->start + 1); 297 resource_size(res));
298 if (!pmcmsptwi_data.iobase) { 298 if (!pmcmsptwi_data.iobase) {
299 dev_err(&pldev->dev, 299 dev_err(&pldev->dev,
300 "Unable to ioremap address 0x%08x\n", res->start); 300 "Unable to ioremap address 0x%08x\n", res->start);
@@ -360,7 +360,7 @@ ret_unmap:
360 iounmap(pmcmsptwi_data.iobase); 360 iounmap(pmcmsptwi_data.iobase);
361 361
362ret_unreserve: 362ret_unreserve:
363 release_mem_region(res->start, res->end - res->start + 1); 363 release_mem_region(res->start, resource_size(res));
364 364
365ret_err: 365ret_err:
366 return rc; 366 return rc;
@@ -385,7 +385,7 @@ static int __devexit pmcmsptwi_remove(struct platform_device *pldev)
385 iounmap(pmcmsptwi_data.iobase); 385 iounmap(pmcmsptwi_data.iobase);
386 386
387 res = platform_get_resource(pldev, IORESOURCE_MEM, 0); 387 res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
388 release_mem_region(res->start, res->end - res->start + 1); 388 release_mem_region(res->start, resource_size(res));
389 389
390 return 0; 390 return 0;
391} 391}
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 035a6c7e59df..762e1e530882 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -993,7 +993,6 @@ static const struct i2c_algorithm i2c_pxa_pio_algorithm = {
993 .functionality = i2c_pxa_functionality, 993 .functionality = i2c_pxa_functionality,
994}; 994};
995 995
996#define res_len(r) ((r)->end - (r)->start + 1)
997static int i2c_pxa_probe(struct platform_device *dev) 996static int i2c_pxa_probe(struct platform_device *dev)
998{ 997{
999 struct pxa_i2c *i2c; 998 struct pxa_i2c *i2c;
@@ -1008,7 +1007,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
1008 if (res == NULL || irq < 0) 1007 if (res == NULL || irq < 0)
1009 return -ENODEV; 1008 return -ENODEV;
1010 1009
1011 if (!request_mem_region(res->start, res_len(res), res->name)) 1010 if (!request_mem_region(res->start, resource_size(res), res->name))
1012 return -ENOMEM; 1011 return -ENOMEM;
1013 1012
1014 i2c = kzalloc(sizeof(struct pxa_i2c), GFP_KERNEL); 1013 i2c = kzalloc(sizeof(struct pxa_i2c), GFP_KERNEL);
@@ -1038,7 +1037,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
1038 goto eclk; 1037 goto eclk;
1039 } 1038 }
1040 1039
1041 i2c->reg_base = ioremap(res->start, res_len(res)); 1040 i2c->reg_base = ioremap(res->start, resource_size(res));
1042 if (!i2c->reg_base) { 1041 if (!i2c->reg_base) {
1043 ret = -EIO; 1042 ret = -EIO;
1044 goto eremap; 1043 goto eremap;
@@ -1046,7 +1045,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
1046 i2c->reg_shift = REG_SHIFT(id->driver_data); 1045 i2c->reg_shift = REG_SHIFT(id->driver_data);
1047 1046
1048 i2c->iobase = res->start; 1047 i2c->iobase = res->start;
1049 i2c->iosize = res_len(res); 1048 i2c->iosize = resource_size(res);
1050 1049
1051 i2c->irq = irq; 1050 i2c->irq = irq;
1052 1051
@@ -1110,7 +1109,7 @@ eremap:
1110eclk: 1109eclk:
1111 kfree(i2c); 1110 kfree(i2c);
1112emalloc: 1111emalloc:
1113 release_mem_region(res->start, res_len(res)); 1112 release_mem_region(res->start, resource_size(res));
1114 return ret; 1113 return ret;
1115} 1114}
1116 1115
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 079a312d36fd..8f42a4536cdf 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -828,7 +828,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
828 goto err_clk; 828 goto err_clk;
829 } 829 }
830 830
831 i2c->ioarea = request_mem_region(res->start, (res->end-res->start)+1, 831 i2c->ioarea = request_mem_region(res->start, resource_size(res),
832 pdev->name); 832 pdev->name);
833 833
834 if (i2c->ioarea == NULL) { 834 if (i2c->ioarea == NULL) {
@@ -837,7 +837,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
837 goto err_clk; 837 goto err_clk;
838 } 838 }
839 839
840 i2c->regs = ioremap(res->start, (res->end-res->start)+1); 840 i2c->regs = ioremap(res->start, resource_size(res));
841 841
842 if (i2c->regs == NULL) { 842 if (i2c->regs == NULL) {
843 dev_err(&pdev->dev, "cannot map IO\n"); 843 dev_err(&pdev->dev, "cannot map IO\n");
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
new file mode 100644
index 000000000000..182e711318ba
--- /dev/null
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -0,0 +1,1029 @@
1/*
2 * Copyright (C) 2007-2009 ST-Ericsson AB
3 * License terms: GNU General Public License (GPL) version 2
4 * ST DDC I2C master mode driver, used in e.g. U300 series platforms.
5 * Author: Linus Walleij <linus.walleij@stericsson.com>
6 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
7 */
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/platform_device.h>
11#include <linux/delay.h>
12#include <linux/i2c.h>
13#include <linux/spinlock.h>
14#include <linux/completion.h>
15#include <linux/err.h>
16#include <linux/interrupt.h>
17#include <linux/clk.h>
18#include <linux/io.h>
19
20/* the name of this kernel module */
21#define NAME "stu300"
22
23/* CR (Control Register) 8bit (R/W) */
24#define I2C_CR (0x00000000)
25#define I2C_CR_RESET_VALUE (0x00)
26#define I2C_CR_RESET_UMASK (0x00)
27#define I2C_CR_DDC1_ENABLE (0x80)
28#define I2C_CR_TRANS_ENABLE (0x40)
29#define I2C_CR_PERIPHERAL_ENABLE (0x20)
30#define I2C_CR_DDC2B_ENABLE (0x10)
31#define I2C_CR_START_ENABLE (0x08)
32#define I2C_CR_ACK_ENABLE (0x04)
33#define I2C_CR_STOP_ENABLE (0x02)
34#define I2C_CR_INTERRUPT_ENABLE (0x01)
35/* SR1 (Status Register 1) 8bit (R/-) */
36#define I2C_SR1 (0x00000004)
37#define I2C_SR1_RESET_VALUE (0x00)
38#define I2C_SR1_RESET_UMASK (0x00)
39#define I2C_SR1_EVF_IND (0x80)
40#define I2C_SR1_ADD10_IND (0x40)
41#define I2C_SR1_TRA_IND (0x20)
42#define I2C_SR1_BUSY_IND (0x10)
43#define I2C_SR1_BTF_IND (0x08)
44#define I2C_SR1_ADSL_IND (0x04)
45#define I2C_SR1_MSL_IND (0x02)
46#define I2C_SR1_SB_IND (0x01)
47/* SR2 (Status Register 2) 8bit (R/-) */
48#define I2C_SR2 (0x00000008)
49#define I2C_SR2_RESET_VALUE (0x00)
50#define I2C_SR2_RESET_UMASK (0x40)
51#define I2C_SR2_MASK (0xBF)
52#define I2C_SR2_SCLFAL_IND (0x80)
53#define I2C_SR2_ENDAD_IND (0x20)
54#define I2C_SR2_AF_IND (0x10)
55#define I2C_SR2_STOPF_IND (0x08)
56#define I2C_SR2_ARLO_IND (0x04)
57#define I2C_SR2_BERR_IND (0x02)
58#define I2C_SR2_DDC2BF_IND (0x01)
59/* CCR (Clock Control Register) 8bit (R/W) */
60#define I2C_CCR (0x0000000C)
61#define I2C_CCR_RESET_VALUE (0x00)
62#define I2C_CCR_RESET_UMASK (0x00)
63#define I2C_CCR_MASK (0xFF)
64#define I2C_CCR_FMSM (0x80)
65#define I2C_CCR_CC_MASK (0x7F)
66/* OAR1 (Own Address Register 1) 8bit (R/W) */
67#define I2C_OAR1 (0x00000010)
68#define I2C_OAR1_RESET_VALUE (0x00)
69#define I2C_OAR1_RESET_UMASK (0x00)
70#define I2C_OAR1_ADD_MASK (0xFF)
71/* OAR2 (Own Address Register 2) 8bit (R/W) */
72#define I2C_OAR2 (0x00000014)
73#define I2C_OAR2_RESET_VALUE (0x40)
74#define I2C_OAR2_RESET_UMASK (0x19)
75#define I2C_OAR2_MASK (0xE6)
76#define I2C_OAR2_FR_25_10MHZ (0x00)
77#define I2C_OAR2_FR_10_1667MHZ (0x20)
78#define I2C_OAR2_FR_1667_2667MHZ (0x40)
79#define I2C_OAR2_FR_2667_40MHZ (0x60)
80#define I2C_OAR2_FR_40_5333MHZ (0x80)
81#define I2C_OAR2_FR_5333_66MHZ (0xA0)
82#define I2C_OAR2_FR_66_80MHZ (0xC0)
83#define I2C_OAR2_FR_80_100MHZ (0xE0)
84#define I2C_OAR2_FR_MASK (0xE0)
85#define I2C_OAR2_ADD_MASK (0x06)
86/* DR (Data Register) 8bit (R/W) */
87#define I2C_DR (0x00000018)
88#define I2C_DR_RESET_VALUE (0x00)
89#define I2C_DR_RESET_UMASK (0xFF)
90#define I2C_DR_D_MASK (0xFF)
91/* ECCR (Extended Clock Control Register) 8bit (R/W) */
92#define I2C_ECCR (0x0000001C)
93#define I2C_ECCR_RESET_VALUE (0x00)
94#define I2C_ECCR_RESET_UMASK (0xE0)
95#define I2C_ECCR_MASK (0x1F)
96#define I2C_ECCR_CC_MASK (0x1F)
97
98/*
99 * These events are more or less responses to commands
100 * sent into the hardware, presumably reflecting the state
101 * of an internal state machine.
102 */
103enum stu300_event {
104 STU300_EVENT_NONE = 0,
105 STU300_EVENT_1,
106 STU300_EVENT_2,
107 STU300_EVENT_3,
108 STU300_EVENT_4,
109 STU300_EVENT_5,
110 STU300_EVENT_6,
111 STU300_EVENT_7,
112 STU300_EVENT_8,
113 STU300_EVENT_9
114};
115
116enum stu300_error {
117 STU300_ERROR_NONE = 0,
118 STU300_ERROR_ACKNOWLEDGE_FAILURE,
119 STU300_ERROR_BUS_ERROR,
120 STU300_ERROR_ARBITRATION_LOST
121};
122
123/* timeout waiting for the controller to respond */
124#define STU300_TIMEOUT (msecs_to_jiffies(1000))
125
126/*
127 * The number of address send athemps tried before giving up.
128 * If the first one failes it seems like 5 to 8 attempts are required.
129 */
130#define NUM_ADDR_RESEND_ATTEMPTS 10
131
132/* I2C clock speed, in Hz 0-400kHz*/
133static unsigned int scl_frequency = 100000;
134module_param(scl_frequency, uint, 0644);
135
136/**
137 * struct stu300_dev - the stu300 driver state holder
138 * @pdev: parent platform device
139 * @adapter: corresponding I2C adapter
140 * @phybase: location of I/O area in memory
141 * @physize: size of I/O area in memory
142 * @clk: hardware block clock
143 * @irq: assigned interrupt line
144 * @cmd_issue_lock: this locks the following cmd_ variables
145 * @cmd_complete: acknowledge completion for an I2C command
146 * @cmd_event: expected event coming in as a response to a command
147 * @cmd_err: error code as response to a command
148 * @speed: current bus speed in Hz
149 * @msg_index: index of current message
150 * @msg_len: length of current message
151 */
152struct stu300_dev {
153 struct platform_device *pdev;
154 struct i2c_adapter adapter;
155 resource_size_t phybase;
156 resource_size_t physize;
157 void __iomem *virtbase;
158 struct clk *clk;
159 int irq;
160 spinlock_t cmd_issue_lock;
161 struct completion cmd_complete;
162 enum stu300_event cmd_event;
163 enum stu300_error cmd_err;
164 unsigned int speed;
165 int msg_index;
166 int msg_len;
167};
168
169/* Local forward function declarations */
170static int stu300_init_hw(struct stu300_dev *dev);
171
172/*
173 * The block needs writes in both MSW and LSW in order
174 * for all data lines to reach their destination.
175 */
176static inline void stu300_wr8(u32 value, void __iomem *address)
177{
178 writel((value << 16) | value, address);
179}
180
181/*
182 * This merely masks off the duplicates which appear
183 * in bytes 1-3. You _MUST_ use 32-bit bus access on this
184 * device, else it will not work.
185 */
186static inline u32 stu300_r8(void __iomem *address)
187{
188 return readl(address) & 0x000000FFU;
189}
190
191/*
192 * Tells whether a certain event or events occurred in
193 * response to a command. The events represent states in
194 * the internal state machine of the hardware. The events
195 * are not very well described in the hardware
196 * documentation and can only be treated as abstract state
197 * machine states.
198 *
199 * @ret 0 = event has not occurred, any other value means
200 * the event occurred.
201 */
202static int stu300_event_occurred(struct stu300_dev *dev,
203 enum stu300_event mr_event) {
204 u32 status1;
205 u32 status2;
206
207 /* What event happened? */
208 status1 = stu300_r8(dev->virtbase + I2C_SR1);
209 if (!(status1 & I2C_SR1_EVF_IND))
210 /* No event at all */
211 return 0;
212 status2 = stu300_r8(dev->virtbase + I2C_SR2);
213
214 switch (mr_event) {
215 case STU300_EVENT_1:
216 if (status1 & I2C_SR1_ADSL_IND)
217 return 1;
218 break;
219 case STU300_EVENT_2:
220 case STU300_EVENT_3:
221 case STU300_EVENT_7:
222 case STU300_EVENT_8:
223 if (status1 & I2C_SR1_BTF_IND) {
224 if (status2 & I2C_SR2_AF_IND)
225 dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE;
226 else if (status2 & I2C_SR2_BERR_IND)
227 dev->cmd_err = STU300_ERROR_BUS_ERROR;
228 return 1;
229 }
230 break;
231 case STU300_EVENT_4:
232 if (status2 & I2C_SR2_STOPF_IND)
233 return 1;
234 break;
235 case STU300_EVENT_5:
236 if (status1 & I2C_SR1_SB_IND)
237 /* Clear start bit */
238 return 1;
239 break;
240 case STU300_EVENT_6:
241 if (status2 & I2C_SR2_ENDAD_IND) {
242 /* First check for any errors */
243 if (status2 & I2C_SR2_AF_IND)
244 dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE;
245 return 1;
246 }
247 break;
248 case STU300_EVENT_9:
249 if (status1 & I2C_SR1_ADD10_IND)
250 return 1;
251 break;
252 default:
253 break;
254 }
255 if (status2 & I2C_SR2_ARLO_IND)
256 dev->cmd_err = STU300_ERROR_ARBITRATION_LOST;
257 return 0;
258}
259
260static irqreturn_t stu300_irh(int irq, void *data)
261{
262 struct stu300_dev *dev = data;
263 int res;
264
265 /* See if this was what we were waiting for */
266 spin_lock(&dev->cmd_issue_lock);
267 if (dev->cmd_event != STU300_EVENT_NONE) {
268 res = stu300_event_occurred(dev, dev->cmd_event);
269 if (res || dev->cmd_err != STU300_ERROR_NONE) {
270 u32 val;
271
272 complete(&dev->cmd_complete);
273 /* Block any multiple interrupts */
274 val = stu300_r8(dev->virtbase + I2C_CR);
275 val &= ~I2C_CR_INTERRUPT_ENABLE;
276 stu300_wr8(val, dev->virtbase + I2C_CR);
277 }
278 }
279 spin_unlock(&dev->cmd_issue_lock);
280 return IRQ_HANDLED;
281}
282
283/*
284 * Sends a command and then waits for the bits masked by *flagmask*
285 * to go high or low by IRQ awaiting.
286 */
287static int stu300_start_and_await_event(struct stu300_dev *dev,
288 u8 cr_value,
289 enum stu300_event mr_event)
290{
291 int ret;
292
293 if (unlikely(irqs_disabled())) {
294 /* TODO: implement polling for this case if need be. */
295 WARN(1, "irqs are disabled, cannot poll for event\n");
296 return -EIO;
297 }
298
299 /* Lock command issue, fill in an event we wait for */
300 spin_lock_irq(&dev->cmd_issue_lock);
301 init_completion(&dev->cmd_complete);
302 dev->cmd_err = STU300_ERROR_NONE;
303 dev->cmd_event = mr_event;
304 spin_unlock_irq(&dev->cmd_issue_lock);
305
306 /* Turn on interrupt, send command and wait. */
307 cr_value |= I2C_CR_INTERRUPT_ENABLE;
308 stu300_wr8(cr_value, dev->virtbase + I2C_CR);
309 ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
310 STU300_TIMEOUT);
311
312 if (ret < 0) {
313 dev_err(&dev->pdev->dev,
314 "wait_for_completion_interruptible_timeout() "
315 "returned %d waiting for event %04x\n", ret, mr_event);
316 return ret;
317 }
318
319 if (ret == 0) {
320 dev_err(&dev->pdev->dev, "controller timed out "
321 "waiting for event %d, reinit hardware\n", mr_event);
322 (void) stu300_init_hw(dev);
323 return -ETIMEDOUT;
324 }
325
326 if (dev->cmd_err != STU300_ERROR_NONE) {
327 dev_err(&dev->pdev->dev, "controller (start) "
328 "error %d waiting for event %d, reinit hardware\n",
329 dev->cmd_err, mr_event);
330 (void) stu300_init_hw(dev);
331 return -EIO;
332 }
333
334 return 0;
335}
336
337/*
338 * This waits for a flag to be set, if it is not set on entry, an interrupt is
339 * configured to wait for the flag using a completion.
340 */
341static int stu300_await_event(struct stu300_dev *dev,
342 enum stu300_event mr_event)
343{
344 int ret;
345 u32 val;
346
347 if (unlikely(irqs_disabled())) {
348 /* TODO: implement polling for this case if need be. */
349 dev_err(&dev->pdev->dev, "irqs are disabled on this "
350 "system!\n");
351 return -EIO;
352 }
353
354 /* Is it already here? */
355 spin_lock_irq(&dev->cmd_issue_lock);
356 dev->cmd_err = STU300_ERROR_NONE;
357 if (stu300_event_occurred(dev, mr_event)) {
358 spin_unlock_irq(&dev->cmd_issue_lock);
359 goto exit_await_check_err;
360 }
361 init_completion(&dev->cmd_complete);
362 dev->cmd_err = STU300_ERROR_NONE;
363 dev->cmd_event = mr_event;
364
365 /* Turn on the I2C interrupt for current operation */
366 val = stu300_r8(dev->virtbase + I2C_CR);
367 val |= I2C_CR_INTERRUPT_ENABLE;
368 stu300_wr8(val, dev->virtbase + I2C_CR);
369
370 /* Twice paranoia (possible HW glitch) */
371 stu300_wr8(val, dev->virtbase + I2C_CR);
372
373 /* Check again: is it already here? */
374 if (unlikely(stu300_event_occurred(dev, mr_event))) {
375 /* Disable IRQ again. */
376 val &= ~I2C_CR_INTERRUPT_ENABLE;
377 stu300_wr8(val, dev->virtbase + I2C_CR);
378 spin_unlock_irq(&dev->cmd_issue_lock);
379 goto exit_await_check_err;
380 }
381
382 /* Unlock the command block and wait for the event to occur */
383 spin_unlock_irq(&dev->cmd_issue_lock);
384 ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
385 STU300_TIMEOUT);
386
387 if (ret < 0) {
388 dev_err(&dev->pdev->dev,
389 "wait_for_completion_interruptible_timeout()"
390 "returned %d waiting for event %04x\n", ret, mr_event);
391 return ret;
392 }
393
394 if (ret == 0) {
395 if (mr_event != STU300_EVENT_6) {
396 dev_err(&dev->pdev->dev, "controller "
397 "timed out waiting for event %d, reinit "
398 "hardware\n", mr_event);
399 (void) stu300_init_hw(dev);
400 }
401 return -ETIMEDOUT;
402 }
403
404 exit_await_check_err:
405 if (dev->cmd_err != STU300_ERROR_NONE) {
406 if (mr_event != STU300_EVENT_6) {
407 dev_err(&dev->pdev->dev, "controller "
408 "error (await_event) %d waiting for event %d, "
409 "reinit hardware\n", dev->cmd_err, mr_event);
410 (void) stu300_init_hw(dev);
411 }
412 return -EIO;
413 }
414
415 return 0;
416}
417
418/*
419 * Waits for the busy bit to go low by repeated polling.
420 */
421#define BUSY_RELEASE_ATTEMPTS 10
422static int stu300_wait_while_busy(struct stu300_dev *dev)
423{
424 unsigned long timeout;
425 int i;
426
427 for (i = 0; i < BUSY_RELEASE_ATTEMPTS; i++) {
428 timeout = jiffies + STU300_TIMEOUT;
429
430 while (!time_after(jiffies, timeout)) {
431 /* Is not busy? */
432 if ((stu300_r8(dev->virtbase + I2C_SR1) &
433 I2C_SR1_BUSY_IND) == 0)
434 return 0;
435 msleep(1);
436 }
437
438 dev_err(&dev->pdev->dev, "transaction timed out "
439 "waiting for device to be free (not busy). "
440 "Attempt: %d\n", i+1);
441
442 dev_err(&dev->pdev->dev, "base address = "
443 "0x%08x, reinit hardware\n", (u32) dev->virtbase);
444
445 (void) stu300_init_hw(dev);
446 }
447
448 dev_err(&dev->pdev->dev, "giving up after %d attempts "
449 "to reset the bus.\n", BUSY_RELEASE_ATTEMPTS);
450
451 return -ETIMEDOUT;
452}
453
454struct stu300_clkset {
455 unsigned long rate;
456 u32 setting;
457};
458
459static const struct stu300_clkset stu300_clktable[] = {
460 { 0, 0xFFU },
461 { 2500000, I2C_OAR2_FR_25_10MHZ },
462 { 10000000, I2C_OAR2_FR_10_1667MHZ },
463 { 16670000, I2C_OAR2_FR_1667_2667MHZ },
464 { 26670000, I2C_OAR2_FR_2667_40MHZ },
465 { 40000000, I2C_OAR2_FR_40_5333MHZ },
466 { 53330000, I2C_OAR2_FR_5333_66MHZ },
467 { 66000000, I2C_OAR2_FR_66_80MHZ },
468 { 80000000, I2C_OAR2_FR_80_100MHZ },
469 { 100000000, 0xFFU },
470};
471
472static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate)
473{
474
475 u32 val;
476 int i = 0;
477
478 /* Locate the apropriate clock setting */
479 while (i < ARRAY_SIZE(stu300_clktable) &&
480 stu300_clktable[i].rate < clkrate)
481 i++;
482
483 if (stu300_clktable[i].setting == 0xFFU) {
484 dev_err(&dev->pdev->dev, "too %s clock rate requested "
485 "(%lu Hz).\n", i ? "high" : "low", clkrate);
486 return -EINVAL;
487 }
488
489 stu300_wr8(stu300_clktable[i].setting,
490 dev->virtbase + I2C_OAR2);
491
492 dev_dbg(&dev->pdev->dev, "Clock rate %lu Hz, I2C bus speed %d Hz "
493 "virtbase %p\n", clkrate, dev->speed, dev->virtbase);
494
495 if (dev->speed > 100000)
496 /* Fast Mode I2C */
497 val = ((clkrate/dev->speed)-9)/3;
498 else
499 /* Standard Mode I2C */
500 val = ((clkrate/dev->speed)-7)/2;
501
502 /* According to spec the divider must be > 2 */
503 if (val < 0x002) {
504 dev_err(&dev->pdev->dev, "too low clock rate (%lu Hz).\n",
505 clkrate);
506 return -EINVAL;
507 }
508
509 /* We have 12 bits clock divider only! */
510 if (val & 0xFFFFF000U) {
511 dev_err(&dev->pdev->dev, "too high clock rate (%lu Hz).\n",
512 clkrate);
513 return -EINVAL;
514 }
515
516 if (dev->speed > 100000) {
517 /* CC6..CC0 */
518 stu300_wr8((val & I2C_CCR_CC_MASK) | I2C_CCR_FMSM,
519 dev->virtbase + I2C_CCR);
520 dev_dbg(&dev->pdev->dev, "set clock divider to 0x%08x, "
521 "Fast Mode I2C\n", val);
522 } else {
523 /* CC6..CC0 */
524 stu300_wr8((val & I2C_CCR_CC_MASK),
525 dev->virtbase + I2C_CCR);
526 dev_dbg(&dev->pdev->dev, "set clock divider to "
527 "0x%08x, Standard Mode I2C\n", val);
528 }
529
530 /* CC11..CC7 */
531 stu300_wr8(((val >> 7) & 0x1F),
532 dev->virtbase + I2C_ECCR);
533
534 return 0;
535}
536
537
538static int stu300_init_hw(struct stu300_dev *dev)
539{
540 u32 dummy;
541 unsigned long clkrate;
542 int ret;
543
544 /* Disable controller */
545 stu300_wr8(0x00, dev->virtbase + I2C_CR);
546 /*
547 * Set own address to some default value (0x00).
548 * We do not support slave mode anyway.
549 */
550 stu300_wr8(0x00, dev->virtbase + I2C_OAR1);
551 /*
552 * The I2C controller only operates properly in 26 MHz but we
553 * program this driver as if we didn't know. This will also set the two
554 * high bits of the own address to zero as well.
555 * There is no known hardware issue with running in 13 MHz
556 * However, speeds over 200 kHz are not used.
557 */
558 clkrate = clk_get_rate(dev->clk);
559 ret = stu300_set_clk(dev, clkrate);
560 if (ret)
561 return ret;
562 /*
563 * Enable block, do it TWICE (hardware glitch)
564 * Setting bit 7 can enable DDC mode. (Not used currently.)
565 */
566 stu300_wr8(I2C_CR_PERIPHERAL_ENABLE,
567 dev->virtbase + I2C_CR);
568 stu300_wr8(I2C_CR_PERIPHERAL_ENABLE,
569 dev->virtbase + I2C_CR);
570 /* Make a dummy read of the status register SR1 & SR2 */
571 dummy = stu300_r8(dev->virtbase + I2C_SR2);
572 dummy = stu300_r8(dev->virtbase + I2C_SR1);
573
574 return 0;
575}
576
577
578
579/* Send slave address. */
580static int stu300_send_address(struct stu300_dev *dev,
581 struct i2c_msg *msg, int resend)
582{
583 u32 val;
584 int ret;
585
586 if (msg->flags & I2C_M_TEN)
587 /* This is probably how 10 bit addresses look */
588 val = (0xf0 | (((u32) msg->addr & 0x300) >> 7)) &
589 I2C_DR_D_MASK;
590 else
591 val = ((msg->addr << 1) & I2C_DR_D_MASK);
592
593 if (msg->flags & I2C_M_RD) {
594 /* This is the direction bit */
595 val |= 0x01;
596 if (resend)
597 dev_dbg(&dev->pdev->dev, "read resend\n");
598 } else if (resend)
599 dev_dbg(&dev->pdev->dev, "write resend\n");
600 stu300_wr8(val, dev->virtbase + I2C_DR);
601
602 /* For 10bit addressing, await 10bit request (EVENT 9) */
603 if (msg->flags & I2C_M_TEN) {
604 ret = stu300_await_event(dev, STU300_EVENT_9);
605 /*
606 * The slave device wants a 10bit address, send the rest
607 * of the bits (the LSBits)
608 */
609 val = msg->addr & I2C_DR_D_MASK;
610 /* This clears "event 9" */
611 stu300_wr8(val, dev->virtbase + I2C_DR);
612 if (ret != 0)
613 return ret;
614 }
615 /* FIXME: Why no else here? two events for 10bit?
616 * Await event 6 (normal) or event 9 (10bit)
617 */
618
619 if (resend)
620 dev_dbg(&dev->pdev->dev, "await event 6\n");
621 ret = stu300_await_event(dev, STU300_EVENT_6);
622
623 /*
624 * Clear any pending EVENT 6 no matter what happend during
625 * await_event.
626 */
627 val = stu300_r8(dev->virtbase + I2C_CR);
628 val |= I2C_CR_PERIPHERAL_ENABLE;
629 stu300_wr8(val, dev->virtbase + I2C_CR);
630
631 return ret;
632}
633
634static int stu300_xfer_msg(struct i2c_adapter *adap,
635 struct i2c_msg *msg, int stop)
636{
637 u32 cr;
638 u32 val;
639 u32 i;
640 int ret;
641 int attempts = 0;
642 struct stu300_dev *dev = i2c_get_adapdata(adap);
643
644
645 clk_enable(dev->clk);
646
647 /* Remove this if (0) to trace each and every message. */
648 if (0) {
649 dev_dbg(&dev->pdev->dev, "I2C message to: 0x%04x, len: %d, "
650 "flags: 0x%04x, stop: %d\n",
651 msg->addr, msg->len, msg->flags, stop);
652 }
653
654 /* Zero-length messages are not supported by this hardware */
655 if (msg->len == 0) {
656 ret = -EINVAL;
657 goto exit_disable;
658 }
659
660 /*
661 * For some reason, sending the address sometimes fails when running
662 * on the 13 MHz clock. No interrupt arrives. This is a work around,
663 * which tries to restart and send the address up to 10 times before
664 * really giving up. Usually 5 to 8 attempts are enough.
665 */
666 do {
667 if (attempts)
668 dev_dbg(&dev->pdev->dev, "wait while busy\n");
669 /* Check that the bus is free, or wait until some timeout */
670 ret = stu300_wait_while_busy(dev);
671 if (ret != 0)
672 goto exit_disable;
673
674 if (attempts)
675 dev_dbg(&dev->pdev->dev, "re-int hw\n");
676 /*
677 * According to ST, there is no problem if the clock is
678 * changed between 13 and 26 MHz during a transfer.
679 */
680 ret = stu300_init_hw(dev);
681 if (ret)
682 goto exit_disable;
683
684 /* Send a start condition */
685 cr = I2C_CR_PERIPHERAL_ENABLE;
686 /* Setting the START bit puts the block in master mode */
687 if (!(msg->flags & I2C_M_NOSTART))
688 cr |= I2C_CR_START_ENABLE;
689 if ((msg->flags & I2C_M_RD) && (msg->len > 1))
690 /* On read more than 1 byte, we need ack. */
691 cr |= I2C_CR_ACK_ENABLE;
692 /* Check that it gets through */
693 if (!(msg->flags & I2C_M_NOSTART)) {
694 if (attempts)
695 dev_dbg(&dev->pdev->dev, "send start event\n");
696 ret = stu300_start_and_await_event(dev, cr,
697 STU300_EVENT_5);
698 }
699
700 if (attempts)
701 dev_dbg(&dev->pdev->dev, "send address\n");
702
703 if (ret == 0)
704 /* Send address */
705 ret = stu300_send_address(dev, msg, attempts != 0);
706
707 if (ret != 0) {
708 attempts++;
709 dev_dbg(&dev->pdev->dev, "failed sending address, "
710 "retrying. Attempt: %d msg_index: %d/%d\n",
711 attempts, dev->msg_index, dev->msg_len);
712 }
713
714 } while (ret != 0 && attempts < NUM_ADDR_RESEND_ATTEMPTS);
715
716 if (attempts < NUM_ADDR_RESEND_ATTEMPTS && attempts > 0) {
717 dev_dbg(&dev->pdev->dev, "managed to get address "
718 "through after %d attempts\n", attempts);
719 } else if (attempts == NUM_ADDR_RESEND_ATTEMPTS) {
720 dev_dbg(&dev->pdev->dev, "I give up, tried %d times "
721 "to resend address.\n",
722 NUM_ADDR_RESEND_ATTEMPTS);
723 goto exit_disable;
724 }
725
726 if (msg->flags & I2C_M_RD) {
727 /* READ: we read the actual bytes one at a time */
728 for (i = 0; i < msg->len; i++) {
729 if (i == msg->len-1) {
730 /*
731 * Disable ACK and set STOP condition before
732 * reading last byte
733 */
734 val = I2C_CR_PERIPHERAL_ENABLE;
735
736 if (stop)
737 val |= I2C_CR_STOP_ENABLE;
738
739 stu300_wr8(val,
740 dev->virtbase + I2C_CR);
741 }
742 /* Wait for this byte... */
743 ret = stu300_await_event(dev, STU300_EVENT_7);
744 if (ret != 0)
745 goto exit_disable;
746 /* This clears event 7 */
747 msg->buf[i] = (u8) stu300_r8(dev->virtbase + I2C_DR);
748 }
749 } else {
750 /* WRITE: we send the actual bytes one at a time */
751 for (i = 0; i < msg->len; i++) {
752 /* Write the byte */
753 stu300_wr8(msg->buf[i],
754 dev->virtbase + I2C_DR);
755 /* Check status */
756 ret = stu300_await_event(dev, STU300_EVENT_8);
757 /* Next write to DR will clear event 8 */
758 if (ret != 0) {
759 dev_err(&dev->pdev->dev, "error awaiting "
760 "event 8 (%d)\n", ret);
761 goto exit_disable;
762 }
763 }
764 /* Check NAK */
765 if (!(msg->flags & I2C_M_IGNORE_NAK)) {
766 if (stu300_r8(dev->virtbase + I2C_SR2) &
767 I2C_SR2_AF_IND) {
768 dev_err(&dev->pdev->dev, "I2C payload "
769 "send returned NAK!\n");
770 ret = -EIO;
771 goto exit_disable;
772 }
773 }
774 if (stop) {
775 /* Send stop condition */
776 val = I2C_CR_PERIPHERAL_ENABLE;
777 val |= I2C_CR_STOP_ENABLE;
778 stu300_wr8(val, dev->virtbase + I2C_CR);
779 }
780 }
781
782 /* Check that the bus is free, or wait until some timeout occurs */
783 ret = stu300_wait_while_busy(dev);
784 if (ret != 0) {
785 dev_err(&dev->pdev->dev, "timout waiting for transfer "
786 "to commence.\n");
787 goto exit_disable;
788 }
789
790 /* Dummy read status registers */
791 val = stu300_r8(dev->virtbase + I2C_SR2);
792 val = stu300_r8(dev->virtbase + I2C_SR1);
793 ret = 0;
794
795 exit_disable:
796 /* Disable controller */
797 stu300_wr8(0x00, dev->virtbase + I2C_CR);
798 clk_disable(dev->clk);
799 return ret;
800}
801
802static int stu300_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
803 int num)
804{
805 int ret = -1;
806 int i;
807 struct stu300_dev *dev = i2c_get_adapdata(adap);
808 dev->msg_len = num;
809 for (i = 0; i < num; i++) {
810 /*
811 * Another driver appears to send stop for each message,
812 * here we only do that for the last message. Possibly some
813 * peripherals require this behaviour, then their drivers
814 * have to send single messages in order to get "stop" for
815 * each message.
816 */
817 dev->msg_index = i;
818
819 ret = stu300_xfer_msg(adap, &msgs[i], (i == (num - 1)));
820 if (ret != 0) {
821 num = ret;
822 break;
823 }
824 }
825
826 return num;
827}
828
829static u32 stu300_func(struct i2c_adapter *adap)
830{
831 /* This is the simplest thing you can think of... */
832 return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
833}
834
835static const struct i2c_algorithm stu300_algo = {
836 .master_xfer = stu300_xfer,
837 .functionality = stu300_func,
838};
839
840static int __init
841stu300_probe(struct platform_device *pdev)
842{
843 struct stu300_dev *dev;
844 struct i2c_adapter *adap;
845 struct resource *res;
846 int bus_nr;
847 int ret = 0;
848
849 dev = kzalloc(sizeof(struct stu300_dev), GFP_KERNEL);
850 if (!dev) {
851 dev_err(&pdev->dev, "could not allocate device struct\n");
852 ret = -ENOMEM;
853 goto err_no_devmem;
854 }
855
856 bus_nr = pdev->id;
857 dev->clk = clk_get(&pdev->dev, NULL);
858 if (IS_ERR(dev->clk)) {
859 ret = PTR_ERR(dev->clk);
860 dev_err(&pdev->dev, "could not retrieve i2c bus clock\n");
861 goto err_no_clk;
862 }
863
864 dev->pdev = pdev;
865 platform_set_drvdata(pdev, dev);
866
867 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
868 if (!res) {
869 ret = -ENOENT;
870 goto err_no_resource;
871 }
872
873 dev->phybase = res->start;
874 dev->physize = resource_size(res);
875
876 if (request_mem_region(dev->phybase, dev->physize,
877 NAME " I/O Area") == NULL) {
878 ret = -EBUSY;
879 goto err_no_ioregion;
880 }
881
882 dev->virtbase = ioremap(dev->phybase, dev->physize);
883 dev_dbg(&pdev->dev, "initialize bus device I2C%d on virtual "
884 "base %p\n", bus_nr, dev->virtbase);
885 if (!dev->virtbase) {
886 ret = -ENOMEM;
887 goto err_no_ioremap;
888 }
889
890 dev->irq = platform_get_irq(pdev, 0);
891 if (request_irq(dev->irq, stu300_irh, IRQF_DISABLED,
892 NAME, dev)) {
893 ret = -EIO;
894 goto err_no_irq;
895 }
896
897 dev->speed = scl_frequency;
898
899 clk_enable(dev->clk);
900 ret = stu300_init_hw(dev);
901 clk_disable(dev->clk);
902
903 if (ret != 0) {
904 dev_err(&dev->pdev->dev, "error initializing hardware.\n");
905 goto err_init_hw;
906 }
907
908 /* IRQ event handling initialization */
909 spin_lock_init(&dev->cmd_issue_lock);
910 dev->cmd_event = STU300_EVENT_NONE;
911 dev->cmd_err = STU300_ERROR_NONE;
912
913 adap = &dev->adapter;
914 adap->owner = THIS_MODULE;
915 /* DDC class but actually often used for more generic I2C */
916 adap->class = I2C_CLASS_DDC;
917 strncpy(adap->name, "ST Microelectronics DDC I2C adapter",
918 sizeof(adap->name));
919 adap->nr = bus_nr;
920 adap->algo = &stu300_algo;
921 adap->dev.parent = &pdev->dev;
922 i2c_set_adapdata(adap, dev);
923
924 /* i2c device drivers may be active on return from add_adapter() */
925 ret = i2c_add_numbered_adapter(adap);
926 if (ret) {
927 dev_err(&dev->pdev->dev, "failure adding ST Micro DDC "
928 "I2C adapter\n");
929 goto err_add_adapter;
930 }
931 return 0;
932
933 err_add_adapter:
934 err_init_hw:
935 free_irq(dev->irq, dev);
936 err_no_irq:
937 iounmap(dev->virtbase);
938 err_no_ioremap:
939 release_mem_region(dev->phybase, dev->physize);
940 err_no_ioregion:
941 platform_set_drvdata(pdev, NULL);
942 err_no_resource:
943 clk_put(dev->clk);
944 err_no_clk:
945 kfree(dev);
946 err_no_devmem:
947 dev_err(&pdev->dev, "failed to add " NAME " adapter: %d\n",
948 pdev->id);
949 return ret;
950}
951
952#ifdef CONFIG_PM
953static int stu300_suspend(struct platform_device *pdev, pm_message_t state)
954{
955 struct stu300_dev *dev = platform_get_drvdata(pdev);
956
957 /* Turn off everything */
958 stu300_wr8(0x00, dev->virtbase + I2C_CR);
959 return 0;
960}
961
962static int stu300_resume(struct platform_device *pdev)
963{
964 int ret = 0;
965 struct stu300_dev *dev = platform_get_drvdata(pdev);
966
967 clk_enable(dev->clk);
968 ret = stu300_init_hw(dev);
969 clk_disable(dev->clk);
970
971 if (ret != 0)
972 dev_err(&pdev->dev, "error re-initializing hardware.\n");
973 return ret;
974}
975#else
976#define stu300_suspend NULL
977#define stu300_resume NULL
978#endif
979
980static int __exit
981stu300_remove(struct platform_device *pdev)
982{
983 struct stu300_dev *dev = platform_get_drvdata(pdev);
984
985 i2c_del_adapter(&dev->adapter);
986 /* Turn off everything */
987 stu300_wr8(0x00, dev->virtbase + I2C_CR);
988 free_irq(dev->irq, dev);
989 iounmap(dev->virtbase);
990 release_mem_region(dev->phybase, dev->physize);
991 clk_put(dev->clk);
992 platform_set_drvdata(pdev, NULL);
993 kfree(dev);
994 return 0;
995}
996
997static struct platform_driver stu300_i2c_driver = {
998 .driver = {
999 .name = NAME,
1000 .owner = THIS_MODULE,
1001 },
1002 .remove = __exit_p(stu300_remove),
1003 .suspend = stu300_suspend,
1004 .resume = stu300_resume,
1005
1006};
1007
1008static int __init stu300_init(void)
1009{
1010 return platform_driver_probe(&stu300_i2c_driver, stu300_probe);
1011}
1012
1013static void __exit stu300_exit(void)
1014{
1015 platform_driver_unregister(&stu300_i2c_driver);
1016}
1017
1018/*
1019 * The systems using this bus often have very basic devices such
1020 * as regulators on the I2C bus, so this needs to be loaded early.
1021 * Therefore it is registered in the subsys_initcall().
1022 */
1023subsys_initcall(stu300_init);
1024module_exit(stu300_exit);
1025
1026MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
1027MODULE_DESCRIPTION("ST Micro DDC I2C adapter (" NAME ")");
1028MODULE_LICENSE("GPL");
1029MODULE_ALIAS("platform:" NAME);
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index fede619ba227..70de82163463 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -76,7 +76,7 @@ static int i2c_versatile_probe(struct platform_device *dev)
76 goto err_out; 76 goto err_out;
77 } 77 }
78 78
79 if (!request_mem_region(r->start, r->end - r->start + 1, "versatile-i2c")) { 79 if (!request_mem_region(r->start, resource_size(r), "versatile-i2c")) {
80 ret = -EBUSY; 80 ret = -EBUSY;
81 goto err_out; 81 goto err_out;
82 } 82 }
@@ -87,7 +87,7 @@ static int i2c_versatile_probe(struct platform_device *dev)
87 goto err_release; 87 goto err_release;
88 } 88 }
89 89
90 i2c->base = ioremap(r->start, r->end - r->start + 1); 90 i2c->base = ioremap(r->start, resource_size(r));
91 if (!i2c->base) { 91 if (!i2c->base) {
92 ret = -ENOMEM; 92 ret = -ENOMEM;
93 goto err_free; 93 goto err_free;
@@ -118,7 +118,7 @@ static int i2c_versatile_probe(struct platform_device *dev)
118 err_free: 118 err_free:
119 kfree(i2c); 119 kfree(i2c);
120 err_release: 120 err_release:
121 release_mem_region(r->start, r->end - r->start + 1); 121 release_mem_region(r->start, resource_size(r));
122 err_out: 122 err_out:
123 return ret; 123 return ret;
124} 124}
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index ba1488bd8430..c14ca144cffe 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -3,7 +3,8 @@
3 3
4int generic_ide_suspend(struct device *dev, pm_message_t mesg) 4int generic_ide_suspend(struct device *dev, pm_message_t mesg)
5{ 5{
6 ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive); 6 ide_drive_t *drive = dev_get_drvdata(dev);
7 ide_drive_t *pair = ide_get_pair_dev(drive);
7 ide_hwif_t *hwif = drive->hwif; 8 ide_hwif_t *hwif = drive->hwif;
8 struct request *rq; 9 struct request *rq;
9 struct request_pm_state rqpm; 10 struct request_pm_state rqpm;
@@ -34,7 +35,8 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
34 35
35int generic_ide_resume(struct device *dev) 36int generic_ide_resume(struct device *dev)
36{ 37{
37 ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive); 38 ide_drive_t *drive = dev_get_drvdata(dev);
39 ide_drive_t *pair = ide_get_pair_dev(drive);
38 ide_hwif_t *hwif = drive->hwif; 40 ide_hwif_t *hwif = drive->hwif;
39 struct request *rq; 41 struct request *rq;
40 struct request_pm_state rqpm; 42 struct request_pm_state rqpm;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index f371b0de314f..79e0af3fd158 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -535,7 +535,7 @@ static int ide_register_port(ide_hwif_t *hwif)
535 535
536 /* register with global device tree */ 536 /* register with global device tree */
537 dev_set_name(&hwif->gendev, hwif->name); 537 dev_set_name(&hwif->gendev, hwif->name);
538 hwif->gendev.driver_data = hwif; 538 dev_set_drvdata(&hwif->gendev, hwif);
539 if (hwif->gendev.parent == NULL) 539 if (hwif->gendev.parent == NULL)
540 hwif->gendev.parent = hwif->dev; 540 hwif->gendev.parent = hwif->dev;
541 hwif->gendev.release = hwif_release_dev; 541 hwif->gendev.release = hwif_release_dev;
@@ -987,9 +987,9 @@ static void hwif_register_devices(ide_hwif_t *hwif)
987 int ret; 987 int ret;
988 988
989 dev_set_name(dev, "%u.%u", hwif->index, i); 989 dev_set_name(dev, "%u.%u", hwif->index, i);
990 dev_set_drvdata(dev, drive);
990 dev->parent = &hwif->gendev; 991 dev->parent = &hwif->gendev;
991 dev->bus = &ide_bus_type; 992 dev->bus = &ide_bus_type;
992 dev->driver_data = drive;
993 dev->release = drive_release_dev; 993 dev->release = drive_release_dev;
994 994
995 ret = device_register(dev); 995 ret = device_register(dev);
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index ee9b55ecc62b..b579fbe88370 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -112,7 +112,7 @@ out:
112 112
113static int __devexit plat_ide_remove(struct platform_device *pdev) 113static int __devexit plat_ide_remove(struct platform_device *pdev)
114{ 114{
115 struct ide_host *host = pdev->dev.driver_data; 115 struct ide_host *host = dev_get_drvdata(&pdev->dev);
116 116
117 ide_host_remove(host); 117 ide_host_remove(host);
118 118
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
index a6dfeb0b3372..e76cac64c533 100644
--- a/drivers/ieee1394/csr1212.c
+++ b/drivers/ieee1394/csr1212.c
@@ -35,6 +35,7 @@
35 35
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/kernel.h> 37#include <linux/kernel.h>
38#include <linux/kmemcheck.h>
38#include <linux/string.h> 39#include <linux/string.h>
39#include <asm/bug.h> 40#include <asm/bug.h>
40#include <asm/byteorder.h> 41#include <asm/byteorder.h>
@@ -387,6 +388,7 @@ csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id,
387 if (!kv) 388 if (!kv)
388 return NULL; 389 return NULL;
389 390
391 kmemcheck_annotate_variable(kv->value.leaf.data[0]);
390 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype); 392 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
391 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id); 393 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
392 394
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index 4ca103577c0a..f5c586c2bba6 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -361,7 +361,7 @@ static int eth1394_new_node(struct eth1394_host_info *hi,
361 node_info->pdg.sz = 0; 361 node_info->pdg.sz = 0;
362 node_info->fifo = CSR1212_INVALID_ADDR_SPACE; 362 node_info->fifo = CSR1212_INVALID_ADDR_SPACE;
363 363
364 ud->device.driver_data = node_info; 364 dev_set_drvdata(&ud->device, node_info);
365 new_node->ud = ud; 365 new_node->ud = ud;
366 366
367 priv = netdev_priv(hi->dev); 367 priv = netdev_priv(hi->dev);
@@ -406,7 +406,7 @@ static int eth1394_remove(struct device *dev)
406 list_del(&old_node->list); 406 list_del(&old_node->list);
407 kfree(old_node); 407 kfree(old_node);
408 408
409 node_info = (struct eth1394_node_info*)ud->device.driver_data; 409 node_info = dev_get_drvdata(&ud->device);
410 410
411 spin_lock_irqsave(&node_info->pdg.lock, flags); 411 spin_lock_irqsave(&node_info->pdg.lock, flags);
412 /* The partial datagram list should be empty, but we'll just 412 /* The partial datagram list should be empty, but we'll just
@@ -416,7 +416,7 @@ static int eth1394_remove(struct device *dev)
416 spin_unlock_irqrestore(&node_info->pdg.lock, flags); 416 spin_unlock_irqrestore(&node_info->pdg.lock, flags);
417 417
418 kfree(node_info); 418 kfree(node_info);
419 ud->device.driver_data = NULL; 419 dev_set_drvdata(&ud->device, NULL);
420 return 0; 420 return 0;
421} 421}
422 422
@@ -688,7 +688,7 @@ static void ether1394_host_reset(struct hpsb_host *host)
688 ether1394_reset_priv(dev, 0); 688 ether1394_reset_priv(dev, 0);
689 689
690 list_for_each_entry(node, &priv->ip_node_list, list) { 690 list_for_each_entry(node, &priv->ip_node_list, list) {
691 node_info = node->ud->device.driver_data; 691 node_info = dev_get_drvdata(&node->ud->device);
692 692
693 spin_lock_irqsave(&node_info->pdg.lock, flags); 693 spin_lock_irqsave(&node_info->pdg.lock, flags);
694 694
@@ -872,8 +872,7 @@ static __be16 ether1394_parse_encap(struct sk_buff *skb, struct net_device *dev,
872 if (!node) 872 if (!node)
873 return cpu_to_be16(0); 873 return cpu_to_be16(0);
874 874
875 node_info = 875 node_info = dev_get_drvdata(&node->ud->device);
876 (struct eth1394_node_info *)node->ud->device.driver_data;
877 876
878 /* Update our speed/payload/fifo_offset table */ 877 /* Update our speed/payload/fifo_offset table */
879 node_info->maxpayload = maxpayload; 878 node_info->maxpayload = maxpayload;
@@ -1080,7 +1079,7 @@ static int ether1394_data_handler(struct net_device *dev, int srcid, int destid,
1080 priv->ud_list[NODEID_TO_NODE(srcid)] = ud; 1079 priv->ud_list[NODEID_TO_NODE(srcid)] = ud;
1081 } 1080 }
1082 1081
1083 node_info = (struct eth1394_node_info *)ud->device.driver_data; 1082 node_info = dev_get_drvdata(&ud->device);
1084 1083
1085 /* First, did we receive a fragmented or unfragmented datagram? */ 1084 /* First, did we receive a fragmented or unfragmented datagram? */
1086 hdr->words.word1 = ntohs(hdr->words.word1); 1085 hdr->words.word1 = ntohs(hdr->words.word1);
@@ -1617,8 +1616,7 @@ static int ether1394_tx(struct sk_buff *skb, struct net_device *dev)
1617 if (!node) 1616 if (!node)
1618 goto fail; 1617 goto fail;
1619 1618
1620 node_info = 1619 node_info = dev_get_drvdata(&node->ud->device);
1621 (struct eth1394_node_info *)node->ud->device.driver_data;
1622 if (node_info->fifo == CSR1212_INVALID_ADDR_SPACE) 1620 if (node_info->fifo == CSR1212_INVALID_ADDR_SPACE)
1623 goto fail; 1621 goto fail;
1624 1622
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index a6d55bebe61a..5122b5a8aa2d 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/bitmap.h> 11#include <linux/bitmap.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/kmemcheck.h>
13#include <linux/list.h> 14#include <linux/list.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/delay.h> 16#include <linux/delay.h>
@@ -39,7 +40,10 @@ struct nodemgr_csr_info {
39 struct hpsb_host *host; 40 struct hpsb_host *host;
40 nodeid_t nodeid; 41 nodeid_t nodeid;
41 unsigned int generation; 42 unsigned int generation;
43
44 kmemcheck_bitfield_begin(flags);
42 unsigned int speed_unverified:1; 45 unsigned int speed_unverified:1;
46 kmemcheck_bitfield_end(flags);
43}; 47};
44 48
45 49
@@ -1293,6 +1297,7 @@ static void nodemgr_node_scan_one(struct hpsb_host *host,
1293 u8 *speed; 1297 u8 *speed;
1294 1298
1295 ci = kmalloc(sizeof(*ci), GFP_KERNEL); 1299 ci = kmalloc(sizeof(*ci), GFP_KERNEL);
1300 kmemcheck_annotate_bitfield(ci, flags);
1296 if (!ci) 1301 if (!ci)
1297 return; 1302 return;
1298 1303
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index a51ab233342d..83b734aec923 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -718,7 +718,7 @@ static int sbp2_remove(struct device *dev)
718 struct scsi_device *sdev; 718 struct scsi_device *sdev;
719 719
720 ud = container_of(dev, struct unit_directory, device); 720 ud = container_of(dev, struct unit_directory, device);
721 lu = ud->device.driver_data; 721 lu = dev_get_drvdata(&ud->device);
722 if (!lu) 722 if (!lu)
723 return 0; 723 return 0;
724 724
@@ -746,7 +746,7 @@ static int sbp2_remove(struct device *dev)
746 746
747static int sbp2_update(struct unit_directory *ud) 747static int sbp2_update(struct unit_directory *ud)
748{ 748{
749 struct sbp2_lu *lu = ud->device.driver_data; 749 struct sbp2_lu *lu = dev_get_drvdata(&ud->device);
750 750
751 if (sbp2_reconnect_device(lu) != 0) { 751 if (sbp2_reconnect_device(lu) != 0) {
752 /* 752 /*
@@ -815,7 +815,7 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
815 atomic_set(&lu->state, SBP2LU_STATE_RUNNING); 815 atomic_set(&lu->state, SBP2LU_STATE_RUNNING);
816 INIT_WORK(&lu->protocol_work, NULL); 816 INIT_WORK(&lu->protocol_work, NULL);
817 817
818 ud->device.driver_data = lu; 818 dev_set_drvdata(&ud->device, lu);
819 819
820 hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host); 820 hi = hpsb_get_hostinfo(&sbp2_highlevel, ud->ne->host);
821 if (!hi) { 821 if (!hi) {
@@ -1051,7 +1051,7 @@ static void sbp2_remove_device(struct sbp2_lu *lu)
1051 hpsb_unregister_addrspace(&sbp2_highlevel, hi->host, 1051 hpsb_unregister_addrspace(&sbp2_highlevel, hi->host,
1052 lu->status_fifo_addr); 1052 lu->status_fifo_addr);
1053 1053
1054 lu->ud->device.driver_data = NULL; 1054 dev_set_drvdata(&lu->ud->device, NULL);
1055 1055
1056 module_put(hi->host->driver->owner); 1056 module_put(hi->host->driver->owner);
1057no_hi: 1057no_hi:
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 5c04cfb54cb9..158a214da2f7 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -760,9 +760,9 @@ int ib_device_register_sysfs(struct ib_device *device)
760 int i; 760 int i;
761 761
762 class_dev->class = &ib_class; 762 class_dev->class = &ib_class;
763 class_dev->driver_data = device;
764 class_dev->parent = device->dma_device; 763 class_dev->parent = device->dma_device;
765 dev_set_name(class_dev, device->name); 764 dev_set_name(class_dev, device->name);
765 dev_set_drvdata(class_dev, device);
766 766
767 INIT_LIST_HEAD(&device->port_list); 767 INIT_LIST_HEAD(&device->port_list);
768 768
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 85905ab9391f..ce4e6eff4792 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -636,7 +636,7 @@ static ssize_t ehca_show_##name(struct device *dev, \
636 struct hipz_query_hca *rblock; \ 636 struct hipz_query_hca *rblock; \
637 int data; \ 637 int data; \
638 \ 638 \
639 shca = dev->driver_data; \ 639 shca = dev_get_drvdata(dev); \
640 \ 640 \
641 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \ 641 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
642 if (!rblock) { \ 642 if (!rblock) { \
@@ -680,7 +680,7 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
680 struct device_attribute *attr, 680 struct device_attribute *attr,
681 char *buf) 681 char *buf)
682{ 682{
683 struct ehca_shca *shca = dev->driver_data; 683 struct ehca_shca *shca = dev_get_drvdata(dev);
684 684
685 return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle); 685 return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
686 686
@@ -749,7 +749,7 @@ static int __devinit ehca_probe(struct of_device *dev,
749 749
750 shca->ofdev = dev; 750 shca->ofdev = dev;
751 shca->ipz_hca_handle.handle = *handle; 751 shca->ipz_hca_handle.handle = *handle;
752 dev->dev.driver_data = shca; 752 dev_set_drvdata(&dev->dev, shca);
753 753
754 ret = ehca_sense_attributes(shca); 754 ret = ehca_sense_attributes(shca);
755 if (ret < 0) { 755 if (ret < 0) {
@@ -878,7 +878,7 @@ probe1:
878 878
879static int __devexit ehca_remove(struct of_device *dev) 879static int __devexit ehca_remove(struct of_device *dev)
880{ 880{
881 struct ehca_shca *shca = dev->dev.driver_data; 881 struct ehca_shca *shca = dev_get_drvdata(&dev->dev);
882 unsigned long flags; 882 unsigned long flags;
883 int ret; 883 int ret;
884 884
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 5d445f48789b..7c237e6ac711 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1265,8 +1265,14 @@ static struct device_type input_dev_type = {
1265 .uevent = input_dev_uevent, 1265 .uevent = input_dev_uevent,
1266}; 1266};
1267 1267
1268static char *input_nodename(struct device *dev)
1269{
1270 return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
1271}
1272
1268struct class input_class = { 1273struct class input_class = {
1269 .name = "input", 1274 .name = "input",
1275 .nodename = input_nodename,
1270}; 1276};
1271EXPORT_SYMBOL_GPL(input_class); 1277EXPORT_SYMBOL_GPL(input_class);
1272 1278
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 356b3a25efa2..1c0b529c06aa 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -35,7 +35,7 @@
35#include <linux/input.h> 35#include <linux/input.h>
36#include <linux/gameport.h> 36#include <linux/gameport.h>
37#include <linux/jiffies.h> 37#include <linux/jiffies.h>
38#include <asm/timex.h> 38#include <linux/timex.h>
39 39
40#define DRIVER_DESC "Analog joystick and gamepad driver" 40#define DRIVER_DESC "Analog joystick and gamepad driver"
41 41
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index d6a30cee7bc7..6d67af5387ad 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/input.h> 18#include <linux/input.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/timex.h>
20#include <asm/io.h> 21#include <asm/io.h>
21 22
22MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 23MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 69af8385ab14..2957d48e0045 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -569,7 +569,7 @@ static int wm97xx_probe(struct device *dev)
569 mutex_init(&wm->codec_mutex); 569 mutex_init(&wm->codec_mutex);
570 570
571 wm->dev = dev; 571 wm->dev = dev;
572 dev->driver_data = wm; 572 dev_set_drvdata(dev, wm);
573 wm->ac97 = to_ac97_t(dev); 573 wm->ac97 = to_ac97_t(dev);
574 574
575 /* check that we have a supported codec */ 575 /* check that we have a supported codec */
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index 928d2ed8865f..b115726dc088 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -114,7 +114,7 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
114 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); 114 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
115 return -ENOMEM; 115 return -ENOMEM;
116 } 116 }
117 dev->dev.driver_data = info; 117 dev_set_drvdata(&dev->dev, info);
118 info->xbdev = dev; 118 info->xbdev = dev;
119 info->irq = -1; 119 info->irq = -1;
120 snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename); 120 snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);
@@ -186,7 +186,7 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev,
186 186
187static int xenkbd_resume(struct xenbus_device *dev) 187static int xenkbd_resume(struct xenbus_device *dev)
188{ 188{
189 struct xenkbd_info *info = dev->dev.driver_data; 189 struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
190 190
191 xenkbd_disconnect_backend(info); 191 xenkbd_disconnect_backend(info);
192 memset(info->page, 0, PAGE_SIZE); 192 memset(info->page, 0, PAGE_SIZE);
@@ -195,7 +195,7 @@ static int xenkbd_resume(struct xenbus_device *dev)
195 195
196static int xenkbd_remove(struct xenbus_device *dev) 196static int xenkbd_remove(struct xenbus_device *dev)
197{ 197{
198 struct xenkbd_info *info = dev->dev.driver_data; 198 struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
199 199
200 xenkbd_disconnect_backend(info); 200 xenkbd_disconnect_backend(info);
201 if (info->kbd) 201 if (info->kbd)
@@ -266,7 +266,7 @@ static void xenkbd_disconnect_backend(struct xenkbd_info *info)
266static void xenkbd_backend_changed(struct xenbus_device *dev, 266static void xenkbd_backend_changed(struct xenbus_device *dev,
267 enum xenbus_state backend_state) 267 enum xenbus_state backend_state)
268{ 268{
269 struct xenkbd_info *info = dev->dev.driver_data; 269 struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
270 int ret, val; 270 int ret, val;
271 271
272 switch (backend_state) { 272 switch (backend_state) {
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 823ceba6efa8..1128d3fba797 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1513,6 +1513,7 @@ static const struct file_operations _ctl_fops = {
1513static struct miscdevice _dm_misc = { 1513static struct miscdevice _dm_misc = {
1514 .minor = MISC_DYNAMIC_MINOR, 1514 .minor = MISC_DYNAMIC_MINOR,
1515 .name = DM_NAME, 1515 .name = DM_NAME,
1516 .devnode = "mapper/control",
1516 .fops = &_ctl_fops 1517 .fops = &_ctl_fops
1517}; 1518};
1518 1519
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 223c36ede5ae..ba69beeb0e21 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -2,8 +2,14 @@
2# Multimedia device configuration 2# Multimedia device configuration
3# 3#
4 4
5menu "Multimedia devices" 5menuconfig MEDIA_SUPPORT
6 tristate "Multimedia support"
6 depends on HAS_IOMEM 7 depends on HAS_IOMEM
8 help
9 If you want to use Video for Linux, DVB for Linux, or DAB adapters,
10 enable this option and other options below.
11
12if MEDIA_SUPPORT
7 13
8comment "Multimedia core support" 14comment "Multimedia core support"
9 15
@@ -136,4 +142,4 @@ config USB_DABUSB
136 module will be called dabusb. 142 module will be called dabusb.
137endif # DAB 143endif # DAB
138 144
139endmenu 145endif # MEDIA_SUPPORT
diff --git a/drivers/media/common/tuners/tuner-simple.c b/drivers/media/common/tuners/tuner-simple.c
index 78412c9c424a..149d54cdf7b9 100644
--- a/drivers/media/common/tuners/tuner-simple.c
+++ b/drivers/media/common/tuners/tuner-simple.c
@@ -416,6 +416,24 @@ static int simple_std_setup(struct dvb_frontend *fe,
416 return 0; 416 return 0;
417} 417}
418 418
419static int simple_set_aux_byte(struct dvb_frontend *fe, u8 config, u8 aux)
420{
421 struct tuner_simple_priv *priv = fe->tuner_priv;
422 int rc;
423 u8 buffer[2];
424
425 buffer[0] = (config & ~0x38) | 0x18;
426 buffer[1] = aux;
427
428 tuner_dbg("setting aux byte: 0x%02x 0x%02x\n", buffer[0], buffer[1]);
429
430 rc = tuner_i2c_xfer_send(&priv->i2c_props, buffer, 2);
431 if (2 != rc)
432 tuner_warn("i2c i/o error: rc == %d (should be 2)\n", rc);
433
434 return rc == 2 ? 0 : rc;
435}
436
419static int simple_post_tune(struct dvb_frontend *fe, u8 *buffer, 437static int simple_post_tune(struct dvb_frontend *fe, u8 *buffer,
420 u16 div, u8 config, u8 cb) 438 u16 div, u8 config, u8 cb)
421{ 439{
@@ -424,17 +442,10 @@ static int simple_post_tune(struct dvb_frontend *fe, u8 *buffer,
424 442
425 switch (priv->type) { 443 switch (priv->type) {
426 case TUNER_LG_TDVS_H06XF: 444 case TUNER_LG_TDVS_H06XF:
427 /* Set the Auxiliary Byte. */ 445 simple_set_aux_byte(fe, config, 0x20);
428 buffer[0] = buffer[2]; 446 break;
429 buffer[0] &= ~0x20; 447 case TUNER_PHILIPS_FQ1216LME_MK3:
430 buffer[0] |= 0x18; 448 simple_set_aux_byte(fe, config, 0x60); /* External AGC */
431 buffer[1] = 0x20;
432 tuner_dbg("tv 0x%02x 0x%02x\n", buffer[0], buffer[1]);
433
434 rc = tuner_i2c_xfer_send(&priv->i2c_props, buffer, 2);
435 if (2 != rc)
436 tuner_warn("i2c i/o error: rc == %d "
437 "(should be 2)\n", rc);
438 break; 449 break;
439 case TUNER_MICROTUNE_4042FI5: 450 case TUNER_MICROTUNE_4042FI5:
440 { 451 {
@@ -506,6 +517,11 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer)
506 case TUNER_THOMSON_DTT761X: 517 case TUNER_THOMSON_DTT761X:
507 buffer[3] = 0x39; 518 buffer[3] = 0x39;
508 break; 519 break;
520 case TUNER_PHILIPS_FQ1216LME_MK3:
521 tuner_err("This tuner doesn't have FM\n");
522 /* Set the low band for sanity, since it covers 88-108 MHz */
523 buffer[3] = 0x01;
524 break;
509 case TUNER_MICROTUNE_4049FM5: 525 case TUNER_MICROTUNE_4049FM5:
510 default: 526 default:
511 buffer[3] = 0xa4; 527 buffer[3] = 0xa4;
@@ -678,12 +694,12 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
678 return 0; 694 return 0;
679 } 695 }
680 696
681 /* Bandswitch byte */
682 simple_radio_bandswitch(fe, &buffer[0]);
683
684 buffer[2] = (t_params->ranges[0].config & ~TUNER_RATIO_MASK) | 697 buffer[2] = (t_params->ranges[0].config & ~TUNER_RATIO_MASK) |
685 TUNER_RATIO_SELECT_50; /* 50 kHz step */ 698 TUNER_RATIO_SELECT_50; /* 50 kHz step */
686 699
700 /* Bandswitch byte */
701 simple_radio_bandswitch(fe, &buffer[0]);
702
687 /* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps 703 /* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps
688 freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) = 704 freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) =
689 freq * (1/800) */ 705 freq * (1/800) */
diff --git a/drivers/media/common/tuners/tuner-types.c b/drivers/media/common/tuners/tuner-types.c
index 7c0bc064c008..6a7f1a417c27 100644
--- a/drivers/media/common/tuners/tuner-types.c
+++ b/drivers/media/common/tuners/tuner-types.c
@@ -578,6 +578,31 @@ static struct tuner_params tuner_fm1216me_mk3_params[] = {
578 }, 578 },
579}; 579};
580 580
581/* ------------ TUNER_PHILIPS_FM1216MK5 - Philips PAL ------------ */
582
583static struct tuner_range tuner_fm1216mk5_pal_ranges[] = {
584 { 16 * 158.00 /*MHz*/, 0xce, 0x01, },
585 { 16 * 441.00 /*MHz*/, 0xce, 0x02, },
586 { 16 * 864.00 , 0xce, 0x04, },
587};
588
589static struct tuner_params tuner_fm1216mk5_params[] = {
590 {
591 .type = TUNER_PARAM_TYPE_PAL,
592 .ranges = tuner_fm1216mk5_pal_ranges,
593 .count = ARRAY_SIZE(tuner_fm1216mk5_pal_ranges),
594 .cb_first_if_lower_freq = 1,
595 .has_tda9887 = 1,
596 .port1_active = 1,
597 .port2_active = 1,
598 .port2_invert_for_secam_lc = 1,
599 .port1_fm_high_sensitivity = 1,
600 .default_top_mid = -2,
601 .default_top_secam_mid = -2,
602 .default_top_secam_high = -2,
603 },
604};
605
581/* ------------ TUNER_LG_NTSC_NEW_TAPC - LGINNOTEK NTSC ------------ */ 606/* ------------ TUNER_LG_NTSC_NEW_TAPC - LGINNOTEK NTSC ------------ */
582 607
583static struct tuner_params tuner_lg_ntsc_new_tapc_params[] = { 608static struct tuner_params tuner_lg_ntsc_new_tapc_params[] = {
@@ -1254,6 +1279,28 @@ static struct tuner_params tuner_tcl_mf02gip_5n_params[] = {
1254 }, 1279 },
1255}; 1280};
1256 1281
1282/* 80-89 */
1283/* --------- TUNER_PHILIPS_FQ1216LME_MK3 -- active loopthrough, no FM ------- */
1284
1285static struct tuner_params tuner_fq1216lme_mk3_params[] = {
1286 {
1287 .type = TUNER_PARAM_TYPE_PAL,
1288 .ranges = tuner_fm1216me_mk3_pal_ranges,
1289 .count = ARRAY_SIZE(tuner_fm1216me_mk3_pal_ranges),
1290 .cb_first_if_lower_freq = 1, /* not specified, but safe to do */
1291 .has_tda9887 = 1, /* TDA9886 */
1292 .port1_active = 1,
1293 .port2_active = 1,
1294 .port2_invert_for_secam_lc = 1,
1295 .default_top_low = 4,
1296 .default_top_mid = 4,
1297 .default_top_high = 4,
1298 .default_top_secam_low = 4,
1299 .default_top_secam_mid = 4,
1300 .default_top_secam_high = 4,
1301 },
1302};
1303
1257/* --------------------------------------------------------------------- */ 1304/* --------------------------------------------------------------------- */
1258 1305
1259struct tunertype tuners[] = { 1306struct tunertype tuners[] = {
@@ -1694,6 +1741,18 @@ struct tunertype tuners[] = {
1694 .initdata = tua603x_agc112, 1741 .initdata = tua603x_agc112,
1695 .sleepdata = (u8[]){ 4, 0x9c, 0x60, 0x85, 0x54 }, 1742 .sleepdata = (u8[]){ 4, 0x9c, 0x60, 0x85, 0x54 },
1696 }, 1743 },
1744 [TUNER_PHILIPS_FM1216MK5] = { /* Philips PAL */
1745 .name = "Philips PAL/SECAM multi (FM1216 MK5)",
1746 .params = tuner_fm1216mk5_params,
1747 .count = ARRAY_SIZE(tuner_fm1216mk5_params),
1748 },
1749
1750 /* 80-89 */
1751 [TUNER_PHILIPS_FQ1216LME_MK3] = { /* PAL/SECAM, Loop-thru, no FM */
1752 .name = "Philips FQ1216LME MK3 PAL/SECAM w/active loopthrough",
1753 .params = tuner_fq1216lme_mk3_params,
1754 .count = ARRAY_SIZE(tuner_fq1216lme_mk3_params),
1755 },
1697}; 1756};
1698EXPORT_SYMBOL(tuners); 1757EXPORT_SYMBOL(tuners);
1699 1758
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index 1adce9ff52ce..b6da9c3873fe 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -30,7 +30,7 @@ MODULE_PARM_DESC(debug, "enable verbose debug messages");
30 30
31static int no_poweroff; 31static int no_poweroff;
32module_param(no_poweroff, int, 0644); 32module_param(no_poweroff, int, 0644);
33MODULE_PARM_DESC(debug, "0 (default) powers device off when not used.\n" 33MODULE_PARM_DESC(no_poweroff, "0 (default) powers device off when not used.\n"
34 "1 keep device energized and with tuner ready all the times.\n" 34 "1 keep device energized and with tuner ready all the times.\n"
35 " Faster, but consumes more power and keeps the device hotter\n"); 35 " Faster, but consumes more power and keeps the device hotter\n");
36 36
@@ -48,7 +48,7 @@ MODULE_PARM_DESC(audio_std,
48 "NICAM/A\n" 48 "NICAM/A\n"
49 "NICAM/B\n"); 49 "NICAM/B\n");
50 50
51static char firmware_name[FIRMWARE_NAME_MAX]; 51static char firmware_name[30];
52module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0); 52module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0);
53MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the " 53MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the "
54 "default firmware name\n"); 54 "default firmware name\n");
@@ -272,7 +272,7 @@ static int load_all_firmwares(struct dvb_frontend *fe)
272 fname = firmware_name; 272 fname = firmware_name;
273 273
274 tuner_dbg("Reading firmware %s\n", fname); 274 tuner_dbg("Reading firmware %s\n", fname);
275 rc = request_firmware(&fw, fname, &priv->i2c_props.adap->dev); 275 rc = request_firmware(&fw, fname, priv->i2c_props.adap->dev.parent);
276 if (rc < 0) { 276 if (rc < 0) {
277 if (rc == -ENOENT) 277 if (rc == -ENOENT)
278 tuner_err("Error: firmware %s not found.\n", 278 tuner_err("Error: firmware %s not found.\n",
@@ -917,22 +917,29 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
917 * that xc2028 will be in a safe state. 917 * that xc2028 will be in a safe state.
918 * Maybe this might also be needed for DTV. 918 * Maybe this might also be needed for DTV.
919 */ 919 */
920 if (new_mode == T_ANALOG_TV) { 920 if (new_mode == T_ANALOG_TV)
921 rc = send_seq(priv, {0x00, 0x00}); 921 rc = send_seq(priv, {0x00, 0x00});
922 } else if (priv->cur_fw.type & ATSC) { 922
923 offset = 1750000; 923 /*
924 } else { 924 * Digital modes require an offset to adjust to the
925 offset = 2750000; 925 * proper frequency.
926 * Analog modes require offset = 0
927 */
928 if (new_mode == T_DIGITAL_TV) {
929 /* Sets the offset according with firmware */
930 if (priv->cur_fw.type & DTV6)
931 offset = 1750000;
932 else if (priv->cur_fw.type & DTV7)
933 offset = 2250000;
934 else /* DTV8 or DTV78 */
935 offset = 2750000;
936
926 /* 937 /*
927 * We must adjust the offset by 500kHz in two cases in order 938 * We must adjust the offset by 500kHz when
928 * to correctly center the IF output: 939 * tuning a 7MHz VHF channel with DTV78 firmware
929 * 1) When the ZARLINK456 or DIBCOM52 tables were explicitly 940 * (used in Australia, Italy and Germany)
930 * selected and a 7MHz channel is tuned;
931 * 2) When tuning a VHF channel with DTV78 firmware.
932 */ 941 */
933 if (((priv->cur_fw.type & DTV7) && 942 if ((priv->cur_fw.type & DTV78) && freq < 470000000)
934 (priv->cur_fw.scode_table & (ZARLINK456 | DIBCOM52))) ||
935 ((priv->cur_fw.type & DTV78) && freq < 470000000))
936 offset -= 500000; 943 offset -= 500000;
937 } 944 }
938 945
@@ -991,7 +998,7 @@ static int xc2028_set_analog_freq(struct dvb_frontend *fe,
991 if (priv->ctrl.input1) 998 if (priv->ctrl.input1)
992 type |= INPUT1; 999 type |= INPUT1;
993 return generic_set_freq(fe, (625l * p->frequency) / 10, 1000 return generic_set_freq(fe, (625l * p->frequency) / 10,
994 T_ANALOG_TV, type, 0, 0); 1001 T_RADIO, type, 0, 0);
995 } 1002 }
996 1003
997 /* if std is not defined, choose one */ 1004 /* if std is not defined, choose one */
@@ -1022,21 +1029,20 @@ static int xc2028_set_params(struct dvb_frontend *fe,
1022 switch(fe->ops.info.type) { 1029 switch(fe->ops.info.type) {
1023 case FE_OFDM: 1030 case FE_OFDM:
1024 bw = p->u.ofdm.bandwidth; 1031 bw = p->u.ofdm.bandwidth;
1025 break; 1032 /*
1026 case FE_QAM: 1033 * The only countries with 6MHz seem to be Taiwan/Uruguay.
1027 tuner_info("WARN: There are some reports that " 1034 * Both seem to require QAM firmware for OFDM decoding
1028 "QAM 6 MHz doesn't work.\n" 1035 * Tested in Taiwan by Terry Wu <terrywu2009@gmail.com>
1029 "If this works for you, please report by " 1036 */
1030 "e-mail to: v4l-dvb-maintainer@linuxtv.org\n"); 1037 if (bw == BANDWIDTH_6_MHZ)
1031 bw = BANDWIDTH_6_MHZ; 1038 type |= QAM;
1032 type |= QAM;
1033 break; 1039 break;
1034 case FE_ATSC: 1040 case FE_ATSC:
1035 bw = BANDWIDTH_6_MHZ; 1041 bw = BANDWIDTH_6_MHZ;
1036 /* The only ATSC firmware (at least on v2.7) is D2633 */ 1042 /* The only ATSC firmware (at least on v2.7) is D2633 */
1037 type |= ATSC | D2633; 1043 type |= ATSC | D2633;
1038 break; 1044 break;
1039 /* DVB-S is not supported */ 1045 /* DVB-S and pure QAM (FE_QAM) are not supported */
1040 default: 1046 default:
1041 return -EINVAL; 1047 return -EINVAL;
1042 } 1048 }
diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c
index b54598550dc4..f4ffcdc9b848 100644
--- a/drivers/media/common/tuners/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (c) 2007 Xceive Corporation 4 * Copyright (c) 2007 Xceive Corporation
5 * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org> 5 * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
6 * Copyright (c) 2009 Devin Heitmueller <dheitmueller@kernellabs.com>
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -36,14 +37,20 @@ static int debug;
36module_param(debug, int, 0644); 37module_param(debug, int, 0644);
37MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); 38MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
38 39
40static int no_poweroff;
41module_param(no_poweroff, int, 0644);
42MODULE_PARM_DESC(no_poweroff, "0 (default) powers device off when not used.\n"
43 "\t\t1 keep device energized and with tuner ready all the times.\n"
44 "\t\tFaster, but consumes more power and keeps the device hotter");
45
39static DEFINE_MUTEX(xc5000_list_mutex); 46static DEFINE_MUTEX(xc5000_list_mutex);
40static LIST_HEAD(hybrid_tuner_instance_list); 47static LIST_HEAD(hybrid_tuner_instance_list);
41 48
42#define dprintk(level, fmt, arg...) if (debug >= level) \ 49#define dprintk(level, fmt, arg...) if (debug >= level) \
43 printk(KERN_INFO "%s: " fmt, "xc5000", ## arg) 50 printk(KERN_INFO "%s: " fmt, "xc5000", ## arg)
44 51
45#define XC5000_DEFAULT_FIRMWARE "dvb-fe-xc5000-1.1.fw" 52#define XC5000_DEFAULT_FIRMWARE "dvb-fe-xc5000-1.6.114.fw"
46#define XC5000_DEFAULT_FIRMWARE_SIZE 12332 53#define XC5000_DEFAULT_FIRMWARE_SIZE 12401
47 54
48struct xc5000_priv { 55struct xc5000_priv {
49 struct tuner_i2c_props i2c_props; 56 struct tuner_i2c_props i2c_props;
@@ -83,11 +90,11 @@ struct xc5000_priv {
83#define XREG_D_CODE 0x04 90#define XREG_D_CODE 0x04
84#define XREG_IF_OUT 0x05 91#define XREG_IF_OUT 0x05
85#define XREG_SEEK_MODE 0x07 92#define XREG_SEEK_MODE 0x07
86#define XREG_POWER_DOWN 0x0A 93#define XREG_POWER_DOWN 0x0A /* Obsolete */
87#define XREG_SIGNALSOURCE 0x0D /* 0=Air, 1=Cable */ 94#define XREG_SIGNALSOURCE 0x0D /* 0=Air, 1=Cable */
88#define XREG_SMOOTHEDCVBS 0x0E 95#define XREG_SMOOTHEDCVBS 0x0E
89#define XREG_XTALFREQ 0x0F 96#define XREG_XTALFREQ 0x0F
90#define XREG_FINERFFREQ 0x10 97#define XREG_FINERFREQ 0x10
91#define XREG_DDIMODE 0x11 98#define XREG_DDIMODE 0x11
92 99
93#define XREG_ADC_ENV 0x00 100#define XREG_ADC_ENV 0x00
@@ -100,6 +107,7 @@ struct xc5000_priv {
100#define XREG_VERSION 0x07 107#define XREG_VERSION 0x07
101#define XREG_PRODUCT_ID 0x08 108#define XREG_PRODUCT_ID 0x08
102#define XREG_BUSY 0x09 109#define XREG_BUSY 0x09
110#define XREG_BUILD 0x0D
103 111
104/* 112/*
105 Basic firmware description. This will remain with 113 Basic firmware description. This will remain with
@@ -191,27 +199,36 @@ static struct XC_TV_STANDARD XC5000_Standard[MAX_TV_STANDARD] = {
191 {"FM Radio-INPUT1", 0x0208, 0x9002} 199 {"FM Radio-INPUT1", 0x0208, 0x9002}
192}; 200};
193 201
194static int xc5000_is_firmware_loaded(struct dvb_frontend *fe); 202static int xc_load_fw_and_init_tuner(struct dvb_frontend *fe);
195static int xc5000_writeregs(struct xc5000_priv *priv, u8 *buf, u8 len); 203static int xc5000_is_firmware_loaded(struct dvb_frontend *fe);
196static int xc5000_readregs(struct xc5000_priv *priv, u8 *buf, u8 len); 204static int xc5000_readreg(struct xc5000_priv *priv, u16 reg, u16 *val);
197static void xc5000_TunerReset(struct dvb_frontend *fe); 205static int xc5000_TunerReset(struct dvb_frontend *fe);
198 206
199static int xc_send_i2c_data(struct xc5000_priv *priv, u8 *buf, int len) 207static int xc_send_i2c_data(struct xc5000_priv *priv, u8 *buf, int len)
200{ 208{
201 return xc5000_writeregs(priv, buf, len) 209 struct i2c_msg msg = { .addr = priv->i2c_props.addr,
202 ? XC_RESULT_I2C_WRITE_FAILURE : XC_RESULT_SUCCESS; 210 .flags = 0, .buf = buf, .len = len };
211
212 if (i2c_transfer(priv->i2c_props.adap, &msg, 1) != 1) {
213 printk(KERN_ERR "xc5000: I2C write failed (len=%i)\n", len);
214 return XC_RESULT_I2C_WRITE_FAILURE;
215 }
216 return XC_RESULT_SUCCESS;
203} 217}
204 218
219/* This routine is never used because the only time we read data from the
220 i2c bus is when we read registers, and we want that to be an atomic i2c
221 transaction in case we are on a multi-master bus */
205static int xc_read_i2c_data(struct xc5000_priv *priv, u8 *buf, int len) 222static int xc_read_i2c_data(struct xc5000_priv *priv, u8 *buf, int len)
206{ 223{
207 return xc5000_readregs(priv, buf, len) 224 struct i2c_msg msg = { .addr = priv->i2c_props.addr,
208 ? XC_RESULT_I2C_READ_FAILURE : XC_RESULT_SUCCESS; 225 .flags = I2C_M_RD, .buf = buf, .len = len };
209}
210 226
211static int xc_reset(struct dvb_frontend *fe) 227 if (i2c_transfer(priv->i2c_props.adap, &msg, 1) != 1) {
212{ 228 printk(KERN_ERR "xc5000 I2C read failed (len=%i)\n", len);
213 xc5000_TunerReset(fe); 229 return -EREMOTEIO;
214 return XC_RESULT_SUCCESS; 230 }
231 return 0;
215} 232}
216 233
217static void xc_wait(int wait_ms) 234static void xc_wait(int wait_ms)
@@ -219,7 +236,7 @@ static void xc_wait(int wait_ms)
219 msleep(wait_ms); 236 msleep(wait_ms);
220} 237}
221 238
222static void xc5000_TunerReset(struct dvb_frontend *fe) 239static int xc5000_TunerReset(struct dvb_frontend *fe)
223{ 240{
224 struct xc5000_priv *priv = fe->tuner_priv; 241 struct xc5000_priv *priv = fe->tuner_priv;
225 int ret; 242 int ret;
@@ -232,16 +249,21 @@ static void xc5000_TunerReset(struct dvb_frontend *fe)
232 priv->i2c_props.adap->algo_data, 249 priv->i2c_props.adap->algo_data,
233 DVB_FRONTEND_COMPONENT_TUNER, 250 DVB_FRONTEND_COMPONENT_TUNER,
234 XC5000_TUNER_RESET, 0); 251 XC5000_TUNER_RESET, 0);
235 if (ret) 252 if (ret) {
236 printk(KERN_ERR "xc5000: reset failed\n"); 253 printk(KERN_ERR "xc5000: reset failed\n");
237 } else 254 return XC_RESULT_RESET_FAILURE;
255 }
256 } else {
238 printk(KERN_ERR "xc5000: no tuner reset callback function, fatal\n"); 257 printk(KERN_ERR "xc5000: no tuner reset callback function, fatal\n");
258 return XC_RESULT_RESET_FAILURE;
259 }
260 return XC_RESULT_SUCCESS;
239} 261}
240 262
241static int xc_write_reg(struct xc5000_priv *priv, u16 regAddr, u16 i2cData) 263static int xc_write_reg(struct xc5000_priv *priv, u16 regAddr, u16 i2cData)
242{ 264{
243 u8 buf[4]; 265 u8 buf[4];
244 int WatchDogTimer = 5; 266 int WatchDogTimer = 100;
245 int result; 267 int result;
246 268
247 buf[0] = (regAddr >> 8) & 0xFF; 269 buf[0] = (regAddr >> 8) & 0xFF;
@@ -263,7 +285,7 @@ static int xc_write_reg(struct xc5000_priv *priv, u16 regAddr, u16 i2cData)
263 /* busy flag cleared */ 285 /* busy flag cleared */
264 break; 286 break;
265 } else { 287 } else {
266 xc_wait(100); /* wait 5 ms */ 288 xc_wait(5); /* wait 5 ms */
267 WatchDogTimer--; 289 WatchDogTimer--;
268 } 290 }
269 } 291 }
@@ -276,25 +298,6 @@ static int xc_write_reg(struct xc5000_priv *priv, u16 regAddr, u16 i2cData)
276 return result; 298 return result;
277} 299}
278 300
279static int xc_read_reg(struct xc5000_priv *priv, u16 regAddr, u16 *i2cData)
280{
281 u8 buf[2];
282 int result;
283
284 buf[0] = (regAddr >> 8) & 0xFF;
285 buf[1] = regAddr & 0xFF;
286 result = xc_send_i2c_data(priv, buf, 2);
287 if (result != XC_RESULT_SUCCESS)
288 return result;
289
290 result = xc_read_i2c_data(priv, buf, 2);
291 if (result != XC_RESULT_SUCCESS)
292 return result;
293
294 *i2cData = buf[0] * 256 + buf[1];
295 return result;
296}
297
298static int xc_load_i2c_sequence(struct dvb_frontend *fe, const u8 *i2c_sequence) 301static int xc_load_i2c_sequence(struct dvb_frontend *fe, const u8 *i2c_sequence)
299{ 302{
300 struct xc5000_priv *priv = fe->tuner_priv; 303 struct xc5000_priv *priv = fe->tuner_priv;
@@ -309,7 +312,7 @@ static int xc_load_i2c_sequence(struct dvb_frontend *fe, const u8 *i2c_sequence)
309 len = i2c_sequence[index] * 256 + i2c_sequence[index+1]; 312 len = i2c_sequence[index] * 256 + i2c_sequence[index+1];
310 if (len == 0x0000) { 313 if (len == 0x0000) {
311 /* RESET command */ 314 /* RESET command */
312 result = xc_reset(fe); 315 result = xc5000_TunerReset(fe);
313 index += 2; 316 index += 2;
314 if (result != XC_RESULT_SUCCESS) 317 if (result != XC_RESULT_SUCCESS)
315 return result; 318 return result;
@@ -371,15 +374,6 @@ static int xc_SetTVStandard(struct xc5000_priv *priv,
371 return ret; 374 return ret;
372} 375}
373 376
374static int xc_shutdown(struct xc5000_priv *priv)
375{
376 return XC_RESULT_SUCCESS;
377 /* Fixme: cannot bring tuner back alive once shutdown
378 * without reloading the driver modules.
379 * return xc_write_reg(priv, XREG_POWER_DOWN, 0);
380 */
381}
382
383static int xc_SetSignalSource(struct xc5000_priv *priv, u16 rf_mode) 377static int xc_SetSignalSource(struct xc5000_priv *priv, u16 rf_mode)
384{ 378{
385 dprintk(1, "%s(%d) Source = %s\n", __func__, rf_mode, 379 dprintk(1, "%s(%d) Source = %s\n", __func__, rf_mode,
@@ -408,7 +402,10 @@ static int xc_set_RF_frequency(struct xc5000_priv *priv, u32 freq_hz)
408 402
409 freq_code = (u16)(freq_hz / 15625); 403 freq_code = (u16)(freq_hz / 15625);
410 404
411 return xc_write_reg(priv, XREG_RF_FREQ, freq_code); 405 /* Starting in firmware version 1.1.44, Xceive recommends using the
406 FINERFREQ for all normal tuning (the doc indicates reg 0x03 should
407 only be used for fast scanning for channel lock) */
408 return xc_write_reg(priv, XREG_FINERFREQ, freq_code);
412} 409}
413 410
414 411
@@ -424,7 +421,7 @@ static int xc_set_IF_frequency(struct xc5000_priv *priv, u32 freq_khz)
424 421
425static int xc_get_ADC_Envelope(struct xc5000_priv *priv, u16 *adc_envelope) 422static int xc_get_ADC_Envelope(struct xc5000_priv *priv, u16 *adc_envelope)
426{ 423{
427 return xc_read_reg(priv, XREG_ADC_ENV, adc_envelope); 424 return xc5000_readreg(priv, XREG_ADC_ENV, adc_envelope);
428} 425}
429 426
430static int xc_get_frequency_error(struct xc5000_priv *priv, u32 *freq_error_hz) 427static int xc_get_frequency_error(struct xc5000_priv *priv, u32 *freq_error_hz)
@@ -433,8 +430,8 @@ static int xc_get_frequency_error(struct xc5000_priv *priv, u32 *freq_error_hz)
433 u16 regData; 430 u16 regData;
434 u32 tmp; 431 u32 tmp;
435 432
436 result = xc_read_reg(priv, XREG_FREQ_ERROR, &regData); 433 result = xc5000_readreg(priv, XREG_FREQ_ERROR, &regData);
437 if (result) 434 if (result != XC_RESULT_SUCCESS)
438 return result; 435 return result;
439 436
440 tmp = (u32)regData; 437 tmp = (u32)regData;
@@ -444,7 +441,7 @@ static int xc_get_frequency_error(struct xc5000_priv *priv, u32 *freq_error_hz)
444 441
445static int xc_get_lock_status(struct xc5000_priv *priv, u16 *lock_status) 442static int xc_get_lock_status(struct xc5000_priv *priv, u16 *lock_status)
446{ 443{
447 return xc_read_reg(priv, XREG_LOCK, lock_status); 444 return xc5000_readreg(priv, XREG_LOCK, lock_status);
448} 445}
449 446
450static int xc_get_version(struct xc5000_priv *priv, 447static int xc_get_version(struct xc5000_priv *priv,
@@ -454,8 +451,8 @@ static int xc_get_version(struct xc5000_priv *priv,
454 u16 data; 451 u16 data;
455 int result; 452 int result;
456 453
457 result = xc_read_reg(priv, XREG_VERSION, &data); 454 result = xc5000_readreg(priv, XREG_VERSION, &data);
458 if (result) 455 if (result != XC_RESULT_SUCCESS)
459 return result; 456 return result;
460 457
461 (*hw_majorversion) = (data >> 12) & 0x0F; 458 (*hw_majorversion) = (data >> 12) & 0x0F;
@@ -466,13 +463,18 @@ static int xc_get_version(struct xc5000_priv *priv,
466 return 0; 463 return 0;
467} 464}
468 465
466static int xc_get_buildversion(struct xc5000_priv *priv, u16 *buildrev)
467{
468 return xc5000_readreg(priv, XREG_BUILD, buildrev);
469}
470
469static int xc_get_hsync_freq(struct xc5000_priv *priv, u32 *hsync_freq_hz) 471static int xc_get_hsync_freq(struct xc5000_priv *priv, u32 *hsync_freq_hz)
470{ 472{
471 u16 regData; 473 u16 regData;
472 int result; 474 int result;
473 475
474 result = xc_read_reg(priv, XREG_HSYNC_FREQ, &regData); 476 result = xc5000_readreg(priv, XREG_HSYNC_FREQ, &regData);
475 if (result) 477 if (result != XC_RESULT_SUCCESS)
476 return result; 478 return result;
477 479
478 (*hsync_freq_hz) = ((regData & 0x0fff) * 763)/100; 480 (*hsync_freq_hz) = ((regData & 0x0fff) * 763)/100;
@@ -481,12 +483,12 @@ static int xc_get_hsync_freq(struct xc5000_priv *priv, u32 *hsync_freq_hz)
481 483
482static int xc_get_frame_lines(struct xc5000_priv *priv, u16 *frame_lines) 484static int xc_get_frame_lines(struct xc5000_priv *priv, u16 *frame_lines)
483{ 485{
484 return xc_read_reg(priv, XREG_FRAME_LINES, frame_lines); 486 return xc5000_readreg(priv, XREG_FRAME_LINES, frame_lines);
485} 487}
486 488
487static int xc_get_quality(struct xc5000_priv *priv, u16 *quality) 489static int xc_get_quality(struct xc5000_priv *priv, u16 *quality)
488{ 490{
489 return xc_read_reg(priv, XREG_QUALITY, quality); 491 return xc5000_readreg(priv, XREG_QUALITY, quality);
490} 492}
491 493
492static u16 WaitForLock(struct xc5000_priv *priv) 494static u16 WaitForLock(struct xc5000_priv *priv)
@@ -504,7 +506,9 @@ static u16 WaitForLock(struct xc5000_priv *priv)
504 return lockState; 506 return lockState;
505} 507}
506 508
507static int xc_tune_channel(struct xc5000_priv *priv, u32 freq_hz) 509#define XC_TUNE_ANALOG 0
510#define XC_TUNE_DIGITAL 1
511static int xc_tune_channel(struct xc5000_priv *priv, u32 freq_hz, int mode)
508{ 512{
509 int found = 0; 513 int found = 0;
510 514
@@ -513,8 +517,10 @@ static int xc_tune_channel(struct xc5000_priv *priv, u32 freq_hz)
513 if (xc_set_RF_frequency(priv, freq_hz) != XC_RESULT_SUCCESS) 517 if (xc_set_RF_frequency(priv, freq_hz) != XC_RESULT_SUCCESS)
514 return 0; 518 return 0;
515 519
516 if (WaitForLock(priv) == 1) 520 if (mode == XC_TUNE_ANALOG) {
517 found = 1; 521 if (WaitForLock(priv) == 1)
522 found = 1;
523 }
518 524
519 return found; 525 return found;
520} 526}
@@ -536,32 +542,7 @@ static int xc5000_readreg(struct xc5000_priv *priv, u16 reg, u16 *val)
536 } 542 }
537 543
538 *val = (bval[0] << 8) | bval[1]; 544 *val = (bval[0] << 8) | bval[1];
539 return 0; 545 return XC_RESULT_SUCCESS;
540}
541
542static int xc5000_writeregs(struct xc5000_priv *priv, u8 *buf, u8 len)
543{
544 struct i2c_msg msg = { .addr = priv->i2c_props.addr,
545 .flags = 0, .buf = buf, .len = len };
546
547 if (i2c_transfer(priv->i2c_props.adap, &msg, 1) != 1) {
548 printk(KERN_ERR "xc5000: I2C write failed (len=%i)\n",
549 (int)len);
550 return -EREMOTEIO;
551 }
552 return 0;
553}
554
555static int xc5000_readregs(struct xc5000_priv *priv, u8 *buf, u8 len)
556{
557 struct i2c_msg msg = { .addr = priv->i2c_props.addr,
558 .flags = I2C_M_RD, .buf = buf, .len = len };
559
560 if (i2c_transfer(priv->i2c_props.adap, &msg, 1) != 1) {
561 printk(KERN_ERR "xc5000 I2C read failed (len=%i)\n", (int)len);
562 return -EREMOTEIO;
563 }
564 return 0;
565} 546}
566 547
567static int xc5000_fwupload(struct dvb_frontend *fe) 548static int xc5000_fwupload(struct dvb_frontend *fe)
@@ -575,13 +556,13 @@ static int xc5000_fwupload(struct dvb_frontend *fe)
575 XC5000_DEFAULT_FIRMWARE); 556 XC5000_DEFAULT_FIRMWARE);
576 557
577 ret = request_firmware(&fw, XC5000_DEFAULT_FIRMWARE, 558 ret = request_firmware(&fw, XC5000_DEFAULT_FIRMWARE,
578 &priv->i2c_props.adap->dev); 559 priv->i2c_props.adap->dev.parent);
579 if (ret) { 560 if (ret) {
580 printk(KERN_ERR "xc5000: Upload failed. (file not found?)\n"); 561 printk(KERN_ERR "xc5000: Upload failed. (file not found?)\n");
581 ret = XC_RESULT_RESET_FAILURE; 562 ret = XC_RESULT_RESET_FAILURE;
582 goto out; 563 goto out;
583 } else { 564 } else {
584 printk(KERN_INFO "xc5000: firmware read %Zu bytes.\n", 565 printk(KERN_DEBUG "xc5000: firmware read %Zu bytes.\n",
585 fw->size); 566 fw->size);
586 ret = XC_RESULT_SUCCESS; 567 ret = XC_RESULT_SUCCESS;
587 } 568 }
@@ -590,8 +571,9 @@ static int xc5000_fwupload(struct dvb_frontend *fe)
590 printk(KERN_ERR "xc5000: firmware incorrect size\n"); 571 printk(KERN_ERR "xc5000: firmware incorrect size\n");
591 ret = XC_RESULT_RESET_FAILURE; 572 ret = XC_RESULT_RESET_FAILURE;
592 } else { 573 } else {
593 printk(KERN_INFO "xc5000: firmware upload\n"); 574 printk(KERN_INFO "xc5000: firmware uploading...\n");
594 ret = xc_load_i2c_sequence(fe, fw->data); 575 ret = xc_load_i2c_sequence(fe, fw->data);
576 printk(KERN_INFO "xc5000: firmware upload complete...\n");
595 } 577 }
596 578
597out: 579out:
@@ -609,6 +591,7 @@ static void xc_debug_dump(struct xc5000_priv *priv)
609 u16 quality; 591 u16 quality;
610 u8 hw_majorversion = 0, hw_minorversion = 0; 592 u8 hw_majorversion = 0, hw_minorversion = 0;
611 u8 fw_majorversion = 0, fw_minorversion = 0; 593 u8 fw_majorversion = 0, fw_minorversion = 0;
594 u16 fw_buildversion = 0;
612 595
613 /* Wait for stats to stabilize. 596 /* Wait for stats to stabilize.
614 * Frame Lines needs two frame times after initial lock 597 * Frame Lines needs two frame times after initial lock
@@ -628,9 +611,10 @@ static void xc_debug_dump(struct xc5000_priv *priv)
628 611
629 xc_get_version(priv, &hw_majorversion, &hw_minorversion, 612 xc_get_version(priv, &hw_majorversion, &hw_minorversion,
630 &fw_majorversion, &fw_minorversion); 613 &fw_majorversion, &fw_minorversion);
631 dprintk(1, "*** HW: V%02x.%02x, FW: V%02x.%02x\n", 614 xc_get_buildversion(priv, &fw_buildversion);
615 dprintk(1, "*** HW: V%02x.%02x, FW: V%02x.%02x.%04x\n",
632 hw_majorversion, hw_minorversion, 616 hw_majorversion, hw_minorversion,
633 fw_majorversion, fw_minorversion); 617 fw_majorversion, fw_minorversion, fw_buildversion);
634 618
635 xc_get_hsync_freq(priv, &hsync_freq_hz); 619 xc_get_hsync_freq(priv, &hsync_freq_hz);
636 dprintk(1, "*** Horizontal sync frequency = %d Hz\n", hsync_freq_hz); 620 dprintk(1, "*** Horizontal sync frequency = %d Hz\n", hsync_freq_hz);
@@ -648,27 +632,57 @@ static int xc5000_set_params(struct dvb_frontend *fe,
648 struct xc5000_priv *priv = fe->tuner_priv; 632 struct xc5000_priv *priv = fe->tuner_priv;
649 int ret; 633 int ret;
650 634
635 if (xc5000_is_firmware_loaded(fe) != XC_RESULT_SUCCESS)
636 xc_load_fw_and_init_tuner(fe);
637
651 dprintk(1, "%s() frequency=%d (Hz)\n", __func__, params->frequency); 638 dprintk(1, "%s() frequency=%d (Hz)\n", __func__, params->frequency);
652 639
653 switch (params->u.vsb.modulation) { 640 if (fe->ops.info.type == FE_ATSC) {
654 case VSB_8: 641 dprintk(1, "%s() ATSC\n", __func__);
655 case VSB_16: 642 switch (params->u.vsb.modulation) {
656 dprintk(1, "%s() VSB modulation\n", __func__); 643 case VSB_8:
644 case VSB_16:
645 dprintk(1, "%s() VSB modulation\n", __func__);
646 priv->rf_mode = XC_RF_MODE_AIR;
647 priv->freq_hz = params->frequency - 1750000;
648 priv->bandwidth = BANDWIDTH_6_MHZ;
649 priv->video_standard = DTV6;
650 break;
651 case QAM_64:
652 case QAM_256:
653 case QAM_AUTO:
654 dprintk(1, "%s() QAM modulation\n", __func__);
655 priv->rf_mode = XC_RF_MODE_CABLE;
656 priv->freq_hz = params->frequency - 1750000;
657 priv->bandwidth = BANDWIDTH_6_MHZ;
658 priv->video_standard = DTV6;
659 break;
660 default:
661 return -EINVAL;
662 }
663 } else if (fe->ops.info.type == FE_OFDM) {
664 dprintk(1, "%s() OFDM\n", __func__);
665 switch (params->u.ofdm.bandwidth) {
666 case BANDWIDTH_6_MHZ:
667 priv->bandwidth = BANDWIDTH_6_MHZ;
668 priv->video_standard = DTV6;
669 priv->freq_hz = params->frequency - 1750000;
670 break;
671 case BANDWIDTH_7_MHZ:
672 printk(KERN_ERR "xc5000 bandwidth 7MHz not supported\n");
673 return -EINVAL;
674 case BANDWIDTH_8_MHZ:
675 priv->bandwidth = BANDWIDTH_8_MHZ;
676 priv->video_standard = DTV8;
677 priv->freq_hz = params->frequency - 2750000;
678 break;
679 default:
680 printk(KERN_ERR "xc5000 bandwidth not set!\n");
681 return -EINVAL;
682 }
657 priv->rf_mode = XC_RF_MODE_AIR; 683 priv->rf_mode = XC_RF_MODE_AIR;
658 priv->freq_hz = params->frequency - 1750000; 684 } else {
659 priv->bandwidth = BANDWIDTH_6_MHZ; 685 printk(KERN_ERR "xc5000 modulation type not supported!\n");
660 priv->video_standard = DTV6;
661 break;
662 case QAM_64:
663 case QAM_256:
664 case QAM_AUTO:
665 dprintk(1, "%s() QAM modulation\n", __func__);
666 priv->rf_mode = XC_RF_MODE_CABLE;
667 priv->freq_hz = params->frequency - 1750000;
668 priv->bandwidth = BANDWIDTH_6_MHZ;
669 priv->video_standard = DTV6;
670 break;
671 default:
672 return -EINVAL; 686 return -EINVAL;
673 } 687 }
674 688
@@ -698,7 +712,7 @@ static int xc5000_set_params(struct dvb_frontend *fe,
698 return -EIO; 712 return -EIO;
699 } 713 }
700 714
701 xc_tune_channel(priv, priv->freq_hz); 715 xc_tune_channel(priv, priv->freq_hz, XC_TUNE_DIGITAL);
702 716
703 if (debug) 717 if (debug)
704 xc_debug_dump(priv); 718 xc_debug_dump(priv);
@@ -725,8 +739,6 @@ static int xc5000_is_firmware_loaded(struct dvb_frontend *fe)
725 return ret; 739 return ret;
726} 740}
727 741
728static int xc_load_fw_and_init_tuner(struct dvb_frontend *fe);
729
730static int xc5000_set_analog_params(struct dvb_frontend *fe, 742static int xc5000_set_analog_params(struct dvb_frontend *fe,
731 struct analog_parameters *params) 743 struct analog_parameters *params)
732{ 744{
@@ -807,7 +819,7 @@ tune_channel:
807 return -EREMOTEIO; 819 return -EREMOTEIO;
808 } 820 }
809 821
810 xc_tune_channel(priv, priv->freq_hz); 822 xc_tune_channel(priv, priv->freq_hz, XC_TUNE_ANALOG);
811 823
812 if (debug) 824 if (debug)
813 xc_debug_dump(priv); 825 xc_debug_dump(priv);
@@ -875,18 +887,18 @@ static int xc_load_fw_and_init_tuner(struct dvb_frontend *fe)
875 887
876static int xc5000_sleep(struct dvb_frontend *fe) 888static int xc5000_sleep(struct dvb_frontend *fe)
877{ 889{
878 struct xc5000_priv *priv = fe->tuner_priv;
879 int ret; 890 int ret;
880 891
881 dprintk(1, "%s()\n", __func__); 892 dprintk(1, "%s()\n", __func__);
882 893
883 /* On Pinnacle PCTV HD 800i, the tuner cannot be reinitialized 894 /* Avoid firmware reload on slow devices */
884 * once shutdown without reloading the driver. Maybe I am not 895 if (no_poweroff)
885 * doing something right. 896 return 0;
886 *
887 */
888 897
889 ret = xc_shutdown(priv); 898 /* According to Xceive technical support, the "powerdown" register
899 was removed in newer versions of the firmware. The "supported"
900 way to sleep the tuner is to pull the reset pin low for 10ms */
901 ret = xc5000_TunerReset(fe);
890 if (ret != XC_RESULT_SUCCESS) { 902 if (ret != XC_RESULT_SUCCESS) {
891 printk(KERN_ERR 903 printk(KERN_ERR
892 "xc5000: %s() unable to shutdown tuner\n", 904 "xc5000: %s() unable to shutdown tuner\n",
@@ -991,7 +1003,7 @@ struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
991 /* Check if firmware has been loaded. It is possible that another 1003 /* Check if firmware has been loaded. It is possible that another
992 instance of the driver has loaded the firmware. 1004 instance of the driver has loaded the firmware.
993 */ 1005 */
994 if (xc5000_readreg(priv, XREG_PRODUCT_ID, &id) != 0) 1006 if (xc5000_readreg(priv, XREG_PRODUCT_ID, &id) != XC_RESULT_SUCCESS)
995 goto fail; 1007 goto fail;
996 1008
997 switch (id) { 1009 switch (id) {
diff --git a/drivers/media/dvb/b2c2/flexcop-common.h b/drivers/media/dvb/b2c2/flexcop-common.h
index 3e1c472092ab..9e2148a19967 100644
--- a/drivers/media/dvb/b2c2/flexcop-common.h
+++ b/drivers/media/dvb/b2c2/flexcop-common.h
@@ -1,9 +1,7 @@
1/* 1/*
2 * This file is part of linux driver the digital TV devices equipped with B2C2 FlexcopII(b)/III 2 * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
3 * 3 * flexcop-common.h - common header file for device-specific source files
4 * flexcop-common.h - common header file for device-specific source files also. 4 * see flexcop.c for copyright information
5 *
6 * see flexcop.c for copyright information.
7 */ 5 */
8#ifndef __FLEXCOP_COMMON_H__ 6#ifndef __FLEXCOP_COMMON_H__
9#define __FLEXCOP_COMMON_H__ 7#define __FLEXCOP_COMMON_H__
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
index f7afab5944cf..efb4a6c2b57a 100644
--- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
@@ -1,34 +1,27 @@
1/* 1/*
2 * This file is part of linux driver the digital TV devices equipped with B2C2 FlexcopII(b)/III 2 * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
3 * 3 * flexcop-fe-tuner.c - methods for frontend attachment and DiSEqC controlling
4 * flexcop-fe-tuner.c - methods for attaching a frontend and controlling DiSEqC. 4 * see flexcop.c for copyright information
5 *
6 * see flexcop.c for copyright information.
7 */ 5 */
8#include <media/tuner.h> 6#include <media/tuner.h>
9
10#include "flexcop.h" 7#include "flexcop.h"
11
12#include "stv0299.h"
13#include "mt352.h"
14#include "nxt200x.h"
15#include "bcm3510.h"
16#include "stv0297.h"
17#include "mt312.h" 8#include "mt312.h"
18#include "lgdt330x.h" 9#include "stv0299.h"
19#include "dvb-pll.h"
20#include "tuner-simple.h"
21
22#include "s5h1420.h" 10#include "s5h1420.h"
23#include "itd1000.h" 11#include "itd1000.h"
24
25#include "cx24123.h"
26#include "cx24113.h" 12#include "cx24113.h"
27 13#include "cx24123.h"
28#include "isl6421.h" 14#include "isl6421.h"
15#include "mt352.h"
16#include "bcm3510.h"
17#include "nxt200x.h"
18#include "dvb-pll.h"
19#include "lgdt330x.h"
20#include "tuner-simple.h"
21#include "stv0297.h"
29 22
30/* lnb control */ 23/* lnb control */
31 24#if defined(CONFIG_DVB_MT312_MODULE) || defined(CONFIG_DVB_STV0299_MODULE)
32static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) 25static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
33{ 26{
34 struct flexcop_device *fc = fe->dvb->priv; 27 struct flexcop_device *fc = fe->dvb->priv;
@@ -37,65 +30,62 @@ static int flexcop_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage
37 30
38 v = fc->read_ibi_reg(fc, misc_204); 31 v = fc->read_ibi_reg(fc, misc_204);
39 switch (voltage) { 32 switch (voltage) {
40 case SEC_VOLTAGE_OFF: 33 case SEC_VOLTAGE_OFF:
41 v.misc_204.ACPI1_sig = 1; 34 v.misc_204.ACPI1_sig = 1;
42 break; 35 break;
43 case SEC_VOLTAGE_13: 36 case SEC_VOLTAGE_13:
44 v.misc_204.ACPI1_sig = 0; 37 v.misc_204.ACPI1_sig = 0;
45 v.misc_204.LNB_L_H_sig = 0; 38 v.misc_204.LNB_L_H_sig = 0;
46 break; 39 break;
47 case SEC_VOLTAGE_18: 40 case SEC_VOLTAGE_18:
48 v.misc_204.ACPI1_sig = 0; 41 v.misc_204.ACPI1_sig = 0;
49 v.misc_204.LNB_L_H_sig = 1; 42 v.misc_204.LNB_L_H_sig = 1;
50 break; 43 break;
51 default: 44 default:
52 err("unknown SEC_VOLTAGE value"); 45 err("unknown SEC_VOLTAGE value");
53 return -EINVAL; 46 return -EINVAL;
54 } 47 }
55 return fc->write_ibi_reg(fc, misc_204, v); 48 return fc->write_ibi_reg(fc, misc_204, v);
56} 49}
50#endif
57 51
52#if defined(CONFIG_DVB_S5H1420_MODULE) || defined(CONFIG_DVB_STV0299_MODULE) \
53 || defined(CONFIG_DVB_MT312_MODULE)
58static int flexcop_sleep(struct dvb_frontend* fe) 54static int flexcop_sleep(struct dvb_frontend* fe)
59{ 55{
60 struct flexcop_device *fc = fe->dvb->priv; 56 struct flexcop_device *fc = fe->dvb->priv;
61/* flexcop_ibi_value v = fc->read_ibi_reg(fc,misc_204); */
62
63 if (fc->fe_sleep) 57 if (fc->fe_sleep)
64 return fc->fe_sleep(fe); 58 return fc->fe_sleep(fe);
65
66/* v.misc_204.ACPI3_sig = 1;
67 fc->write_ibi_reg(fc,misc_204,v);*/
68
69 return 0; 59 return 0;
70} 60}
61#endif
71 62
63/* SkyStar2 DVB-S rev 2.3 */
64#if defined(CONFIG_DVB_MT312_MODULE)
72static int flexcop_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) 65static int flexcop_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
73{ 66{
74 /* u16 wz_half_period_for_45_mhz[] = { 0x01ff, 0x0154, 0x00ff, 0x00cc }; */ 67/* u16 wz_half_period_for_45_mhz[] = { 0x01ff, 0x0154, 0x00ff, 0x00cc }; */
75 struct flexcop_device *fc = fe->dvb->priv; 68 struct flexcop_device *fc = fe->dvb->priv;
76 flexcop_ibi_value v; 69 flexcop_ibi_value v;
77 u16 ax; 70 u16 ax;
78 v.raw = 0; 71 v.raw = 0;
79
80 deb_tuner("tone = %u\n",tone); 72 deb_tuner("tone = %u\n",tone);
81 73
82 switch (tone) { 74 switch (tone) {
83 case SEC_TONE_ON: 75 case SEC_TONE_ON:
84 ax = 0x01ff; 76 ax = 0x01ff;
85 break; 77 break;
86 case SEC_TONE_OFF: 78 case SEC_TONE_OFF:
87 ax = 0; 79 ax = 0;
88 break; 80 break;
89 default: 81 default:
90 err("unknown SEC_TONE value"); 82 err("unknown SEC_TONE value");
91 return -EINVAL; 83 return -EINVAL;
92 } 84 }
93 85
94 v.lnb_switch_freq_200.LNB_CTLPrescaler_sig = 1; /* divide by 2 */ 86 v.lnb_switch_freq_200.LNB_CTLPrescaler_sig = 1; /* divide by 2 */
95
96 v.lnb_switch_freq_200.LNB_CTLHighCount_sig = ax; 87 v.lnb_switch_freq_200.LNB_CTLHighCount_sig = ax;
97 v.lnb_switch_freq_200.LNB_CTLLowCount_sig = ax == 0 ? 0x1ff : ax; 88 v.lnb_switch_freq_200.LNB_CTLLowCount_sig = ax == 0 ? 0x1ff : ax;
98
99 return fc->write_ibi_reg(fc,lnb_switch_freq_200,v); 89 return fc->write_ibi_reg(fc,lnb_switch_freq_200,v);
100} 90}
101 91
@@ -110,17 +100,16 @@ static void flexcop_diseqc_send_bit(struct dvb_frontend* fe, int data)
110static void flexcop_diseqc_send_byte(struct dvb_frontend* fe, int data) 100static void flexcop_diseqc_send_byte(struct dvb_frontend* fe, int data)
111{ 101{
112 int i, par = 1, d; 102 int i, par = 1, d;
113
114 for (i = 7; i >= 0; i--) { 103 for (i = 7; i >= 0; i--) {
115 d = (data >> i) & 1; 104 d = (data >> i) & 1;
116 par ^= d; 105 par ^= d;
117 flexcop_diseqc_send_bit(fe, d); 106 flexcop_diseqc_send_bit(fe, d);
118 } 107 }
119
120 flexcop_diseqc_send_bit(fe, par); 108 flexcop_diseqc_send_bit(fe, par);
121} 109}
122 110
123static int flexcop_send_diseqc_msg(struct dvb_frontend* fe, int len, u8 *msg, unsigned long burst) 111static int flexcop_send_diseqc_msg(struct dvb_frontend *fe,
112 int len, u8 *msg, unsigned long burst)
124{ 113{
125 int i; 114 int i;
126 115
@@ -129,7 +118,6 @@ static int flexcop_send_diseqc_msg(struct dvb_frontend* fe, int len, u8 *msg, un
129 118
130 for (i = 0; i < len; i++) 119 for (i = 0; i < len; i++)
131 flexcop_diseqc_send_byte(fe,msg[i]); 120 flexcop_diseqc_send_byte(fe,msg[i]);
132
133 mdelay(16); 121 mdelay(16);
134 122
135 if (burst != -1) { 123 if (burst != -1) {
@@ -146,50 +134,110 @@ static int flexcop_send_diseqc_msg(struct dvb_frontend* fe, int len, u8 *msg, un
146 return 0; 134 return 0;
147} 135}
148 136
149static int flexcop_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd) 137static int flexcop_diseqc_send_master_cmd(struct dvb_frontend *fe,
138 struct dvb_diseqc_master_cmd *cmd)
150{ 139{
151 return flexcop_send_diseqc_msg(fe, cmd->msg_len, cmd->msg, 0); 140 return flexcop_send_diseqc_msg(fe, cmd->msg_len, cmd->msg, 0);
152} 141}
153 142
154static int flexcop_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_cmd_t minicmd) 143static int flexcop_diseqc_send_burst(struct dvb_frontend *fe,
144 fe_sec_mini_cmd_t minicmd)
155{ 145{
156 return flexcop_send_diseqc_msg(fe, 0, NULL, minicmd); 146 return flexcop_send_diseqc_msg(fe, 0, NULL, minicmd);
157} 147}
158 148
159/* dvb-s stv0299 */ 149static struct mt312_config skystar23_samsung_tbdu18132_config = {
160static int samsung_tbmu24112_set_symbol_rate(struct dvb_frontend* fe, u32 srate, u32 ratio) 150 .demod_address = 0x0e,
151};
152
153static int skystar23_samsung_tbdu18132_tuner_set_params(struct dvb_frontend *fe,
154 struct dvb_frontend_parameters *params)
155{
156 u8 buf[4];
157 u32 div;
158 struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf,
159 .len = sizeof(buf) };
160 struct flexcop_device *fc = fe->dvb->priv;
161 div = (params->frequency + (125/2)) / 125;
162
163 buf[0] = (div >> 8) & 0x7f;
164 buf[1] = (div >> 0) & 0xff;
165 buf[2] = 0x84 | ((div >> 10) & 0x60);
166 buf[3] = 0x80;
167
168 if (params->frequency < 1550000)
169 buf[3] |= 0x02;
170
171 if (fe->ops.i2c_gate_ctrl)
172 fe->ops.i2c_gate_ctrl(fe, 1);
173 if (i2c_transfer(&fc->fc_i2c_adap[0].i2c_adap, &msg, 1) != 1)
174 return -EIO;
175 return 0;
176}
177
178static int skystar2_rev23_attach(struct flexcop_device *fc,
179 struct i2c_adapter *i2c)
180{
181 fc->fe = dvb_attach(mt312_attach, &skystar23_samsung_tbdu18132_config, i2c);
182 if (fc->fe != NULL) {
183 struct dvb_frontend_ops *ops = &fc->fe->ops;
184 ops->tuner_ops.set_params =
185 skystar23_samsung_tbdu18132_tuner_set_params;
186 ops->diseqc_send_master_cmd = flexcop_diseqc_send_master_cmd;
187 ops->diseqc_send_burst = flexcop_diseqc_send_burst;
188 ops->set_tone = flexcop_set_tone;
189 ops->set_voltage = flexcop_set_voltage;
190 fc->fe_sleep = ops->sleep;
191 ops->sleep = flexcop_sleep;
192 return 1;
193 }
194 return 0;
195}
196#endif
197
198/* SkyStar2 DVB-S rev 2.6 */
199#if defined(CONFIG_DVB_STV0299_MODULE)
200static int samsung_tbmu24112_set_symbol_rate(struct dvb_frontend *fe,
201 u32 srate, u32 ratio)
161{ 202{
162 u8 aclk = 0; 203 u8 aclk = 0;
163 u8 bclk = 0; 204 u8 bclk = 0;
164 205
165 if (srate < 1500000) { aclk = 0xb7; bclk = 0x47; } 206 if (srate < 1500000) {
166 else if (srate < 3000000) { aclk = 0xb7; bclk = 0x4b; } 207 aclk = 0xb7; bclk = 0x47;
167 else if (srate < 7000000) { aclk = 0xb7; bclk = 0x4f; } 208 } else if (srate < 3000000) {
168 else if (srate < 14000000) { aclk = 0xb7; bclk = 0x53; } 209 aclk = 0xb7; bclk = 0x4b;
169 else if (srate < 30000000) { aclk = 0xb6; bclk = 0x53; } 210 } else if (srate < 7000000) {
170 else if (srate < 45000000) { aclk = 0xb4; bclk = 0x51; } 211 aclk = 0xb7; bclk = 0x4f;
171 212 } else if (srate < 14000000) {
172 stv0299_writereg (fe, 0x13, aclk); 213 aclk = 0xb7; bclk = 0x53;
173 stv0299_writereg (fe, 0x14, bclk); 214 } else if (srate < 30000000) {
174 stv0299_writereg (fe, 0x1f, (ratio >> 16) & 0xff); 215 aclk = 0xb6; bclk = 0x53;
175 stv0299_writereg (fe, 0x20, (ratio >> 8) & 0xff); 216 } else if (srate < 45000000) {
176 stv0299_writereg (fe, 0x21, (ratio ) & 0xf0); 217 aclk = 0xb4; bclk = 0x51;
218 }
177 219
220 stv0299_writereg(fe, 0x13, aclk);
221 stv0299_writereg(fe, 0x14, bclk);
222 stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
223 stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
224 stv0299_writereg(fe, 0x21, ratio & 0xf0);
178 return 0; 225 return 0;
179} 226}
180 227
181static int samsung_tbmu24112_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params) 228static int samsung_tbmu24112_tuner_set_params(struct dvb_frontend *fe,
229 struct dvb_frontend_parameters *params)
182{ 230{
183 u8 buf[4]; 231 u8 buf[4];
184 u32 div; 232 u32 div;
185 struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) }; 233 struct i2c_msg msg = {
234 .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
186 struct flexcop_device *fc = fe->dvb->priv; 235 struct flexcop_device *fc = fe->dvb->priv;
187
188 div = params->frequency / 125; 236 div = params->frequency / 125;
189 237
190 buf[0] = (div >> 8) & 0x7f; 238 buf[0] = (div >> 8) & 0x7f;
191 buf[1] = div & 0xff; 239 buf[1] = div & 0xff;
192 buf[2] = 0x84; /* 0xC4 */ 240 buf[2] = 0x84; /* 0xC4 */
193 buf[3] = 0x08; 241 buf[3] = 0x08;
194 242
195 if (params->frequency < 1500000) 243 if (params->frequency < 1500000)
@@ -203,48 +251,48 @@ static int samsung_tbmu24112_tuner_set_params(struct dvb_frontend* fe, struct dv
203} 251}
204 252
205static u8 samsung_tbmu24112_inittab[] = { 253static u8 samsung_tbmu24112_inittab[] = {
206 0x01, 0x15, 254 0x01, 0x15,
207 0x02, 0x30, 255 0x02, 0x30,
208 0x03, 0x00, 256 0x03, 0x00,
209 0x04, 0x7D, 257 0x04, 0x7D,
210 0x05, 0x35, 258 0x05, 0x35,
211 0x06, 0x02, 259 0x06, 0x02,
212 0x07, 0x00, 260 0x07, 0x00,
213 0x08, 0xC3, 261 0x08, 0xC3,
214 0x0C, 0x00, 262 0x0C, 0x00,
215 0x0D, 0x81, 263 0x0D, 0x81,
216 0x0E, 0x23, 264 0x0E, 0x23,
217 0x0F, 0x12, 265 0x0F, 0x12,
218 0x10, 0x7E, 266 0x10, 0x7E,
219 0x11, 0x84, 267 0x11, 0x84,
220 0x12, 0xB9, 268 0x12, 0xB9,
221 0x13, 0x88, 269 0x13, 0x88,
222 0x14, 0x89, 270 0x14, 0x89,
223 0x15, 0xC9, 271 0x15, 0xC9,
224 0x16, 0x00, 272 0x16, 0x00,
225 0x17, 0x5C, 273 0x17, 0x5C,
226 0x18, 0x00, 274 0x18, 0x00,
227 0x19, 0x00, 275 0x19, 0x00,
228 0x1A, 0x00, 276 0x1A, 0x00,
229 0x1C, 0x00, 277 0x1C, 0x00,
230 0x1D, 0x00, 278 0x1D, 0x00,
231 0x1E, 0x00, 279 0x1E, 0x00,
232 0x1F, 0x3A, 280 0x1F, 0x3A,
233 0x20, 0x2E, 281 0x20, 0x2E,
234 0x21, 0x80, 282 0x21, 0x80,
235 0x22, 0xFF, 283 0x22, 0xFF,
236 0x23, 0xC1, 284 0x23, 0xC1,
237 0x28, 0x00, 285 0x28, 0x00,
238 0x29, 0x1E, 286 0x29, 0x1E,
239 0x2A, 0x14, 287 0x2A, 0x14,
240 0x2B, 0x0F, 288 0x2B, 0x0F,
241 0x2C, 0x09, 289 0x2C, 0x09,
242 0x2D, 0x05, 290 0x2D, 0x05,
243 0x31, 0x1F, 291 0x31, 0x1F,
244 0x32, 0x19, 292 0x32, 0x19,
245 0x33, 0xFE, 293 0x33, 0xFE,
246 0x34, 0x93, 294 0x34, 0x93,
247 0xff, 0xff, 295 0xff, 0xff,
248}; 296};
249 297
250static struct stv0299_config samsung_tbmu24112_config = { 298static struct stv0299_config samsung_tbmu24112_config = {
@@ -259,27 +307,155 @@ static struct stv0299_config samsung_tbmu24112_config = {
259 .set_symbol_rate = samsung_tbmu24112_set_symbol_rate, 307 .set_symbol_rate = samsung_tbmu24112_set_symbol_rate,
260}; 308};
261 309
262/* dvb-t mt352 */ 310static int skystar2_rev26_attach(struct flexcop_device *fc,
263static int samsung_tdtc9251dh0_demod_init(struct dvb_frontend* fe) 311 struct i2c_adapter *i2c)
312{
313 fc->fe = dvb_attach(stv0299_attach, &samsung_tbmu24112_config, i2c);
314 if (fc->fe != NULL) {
315 struct dvb_frontend_ops *ops = &fc->fe->ops;
316 ops->tuner_ops.set_params = samsung_tbmu24112_tuner_set_params;
317 ops->set_voltage = flexcop_set_voltage;
318 fc->fe_sleep = ops->sleep;
319 ops->sleep = flexcop_sleep;
320 return 1;
321 }
322 return 0;
323}
324#endif
325
326/* SkyStar2 DVB-S rev 2.7 */
327#if defined(CONFIG_DVB_S5H1420_MODULE)
328static struct s5h1420_config skystar2_rev2_7_s5h1420_config = {
329 .demod_address = 0x53,
330 .invert = 1,
331 .repeated_start_workaround = 1,
332 .serial_mpeg = 1,
333};
334
335static struct itd1000_config skystar2_rev2_7_itd1000_config = {
336 .i2c_address = 0x61,
337};
338
339static int skystar2_rev27_attach(struct flexcop_device *fc,
340 struct i2c_adapter *i2c)
341{
342 flexcop_ibi_value r108;
343 struct i2c_adapter *i2c_tuner;
344
345 /* enable no_base_addr - no repeated start when reading */
346 fc->fc_i2c_adap[0].no_base_addr = 1;
347 fc->fe = dvb_attach(s5h1420_attach, &skystar2_rev2_7_s5h1420_config,
348 i2c);
349 if (!fc->fe)
350 goto fail;
351
352 i2c_tuner = s5h1420_get_tuner_i2c_adapter(fc->fe);
353 if (!i2c_tuner)
354 goto fail;
355
356 fc->fe_sleep = fc->fe->ops.sleep;
357 fc->fe->ops.sleep = flexcop_sleep;
358
359 /* enable no_base_addr - no repeated start when reading */
360 fc->fc_i2c_adap[2].no_base_addr = 1;
361 if (!dvb_attach(isl6421_attach, fc->fe, &fc->fc_i2c_adap[2].i2c_adap,
362 0x08, 1, 1)) {
363 err("ISL6421 could NOT be attached");
364 goto fail_isl;
365 }
366 info("ISL6421 successfully attached");
367
368 /* the ITD1000 requires a lower i2c clock - is it a problem ? */
369 r108.raw = 0x00000506;
370 fc->write_ibi_reg(fc, tw_sm_c_108, r108);
371 if (!dvb_attach(itd1000_attach, fc->fe, i2c_tuner,
372 &skystar2_rev2_7_itd1000_config)) {
373 err("ITD1000 could NOT be attached");
374 /* Should i2c clock be restored? */
375 goto fail_isl;
376 }
377 info("ITD1000 successfully attached");
378
379 return 1;
380
381fail_isl:
382 fc->fc_i2c_adap[2].no_base_addr = 0;
383fail:
384 /* for the next devices we need it again */
385 fc->fc_i2c_adap[0].no_base_addr = 0;
386 return 0;
387}
388#endif
389
390/* SkyStar2 rev 2.8 */
391#if defined(CONFIG_DVB_CX24123_MODULE)
392static struct cx24123_config skystar2_rev2_8_cx24123_config = {
393 .demod_address = 0x55,
394 .dont_use_pll = 1,
395 .agc_callback = cx24113_agc_callback,
396};
397
398static const struct cx24113_config skystar2_rev2_8_cx24113_config = {
399 .i2c_addr = 0x54,
400 .xtal_khz = 10111,
401};
402
403static int skystar2_rev28_attach(struct flexcop_device *fc,
404 struct i2c_adapter *i2c)
405{
406 struct i2c_adapter *i2c_tuner;
407
408 fc->fe = dvb_attach(cx24123_attach, &skystar2_rev2_8_cx24123_config,
409 i2c);
410 if (!fc->fe)
411 return 0;
412
413 i2c_tuner = cx24123_get_tuner_i2c_adapter(fc->fe);;
414 if (!i2c_tuner)
415 return 0;
416
417 if (!dvb_attach(cx24113_attach, fc->fe, &skystar2_rev2_8_cx24113_config,
418 i2c_tuner)) {
419 err("CX24113 could NOT be attached");
420 return 0;
421 }
422 info("CX24113 successfully attached");
423
424 fc->fc_i2c_adap[2].no_base_addr = 1;
425 if (!dvb_attach(isl6421_attach, fc->fe, &fc->fc_i2c_adap[2].i2c_adap,
426 0x08, 0, 0)) {
427 err("ISL6421 could NOT be attached");
428 fc->fc_i2c_adap[2].no_base_addr = 0;
429 return 0;
430 }
431 info("ISL6421 successfully attached");
432 /* TODO on i2c_adap[1] addr 0x11 (EEPROM) there seems to be an
433 * IR-receiver (PIC16F818) - but the card has no input for that ??? */
434 return 1;
435}
436#endif
437
438/* AirStar DVB-T */
439#if defined(CONFIG_DVB_MT352_MODULE)
440static int samsung_tdtc9251dh0_demod_init(struct dvb_frontend *fe)
264{ 441{
265 static u8 mt352_clock_config [] = { 0x89, 0x18, 0x2d }; 442 static u8 mt352_clock_config[] = { 0x89, 0x18, 0x2d };
266 static u8 mt352_reset [] = { 0x50, 0x80 }; 443 static u8 mt352_reset[] = { 0x50, 0x80 };
267 static u8 mt352_adc_ctl_1_cfg [] = { 0x8E, 0x40 }; 444 static u8 mt352_adc_ctl_1_cfg[] = { 0x8E, 0x40 };
268 static u8 mt352_agc_cfg [] = { 0x67, 0x28, 0xa1 }; 445 static u8 mt352_agc_cfg[] = { 0x67, 0x28, 0xa1 };
269 static u8 mt352_capt_range_cfg[] = { 0x75, 0x32 }; 446 static u8 mt352_capt_range_cfg[] = { 0x75, 0x32 };
270 447
271 mt352_write(fe, mt352_clock_config, sizeof(mt352_clock_config)); 448 mt352_write(fe, mt352_clock_config, sizeof(mt352_clock_config));
272 udelay(2000); 449 udelay(2000);
273 mt352_write(fe, mt352_reset, sizeof(mt352_reset)); 450 mt352_write(fe, mt352_reset, sizeof(mt352_reset));
274 mt352_write(fe, mt352_adc_ctl_1_cfg, sizeof(mt352_adc_ctl_1_cfg)); 451 mt352_write(fe, mt352_adc_ctl_1_cfg, sizeof(mt352_adc_ctl_1_cfg));
275
276 mt352_write(fe, mt352_agc_cfg, sizeof(mt352_agc_cfg)); 452 mt352_write(fe, mt352_agc_cfg, sizeof(mt352_agc_cfg));
277 mt352_write(fe, mt352_capt_range_cfg, sizeof(mt352_capt_range_cfg)); 453 mt352_write(fe, mt352_capt_range_cfg, sizeof(mt352_capt_range_cfg));
278
279 return 0; 454 return 0;
280} 455}
281 456
282static int samsung_tdtc9251dh0_calc_regs(struct dvb_frontend* fe, struct dvb_frontend_parameters *params, u8* pllbuf, int buf_len) 457static int samsung_tdtc9251dh0_calc_regs(struct dvb_frontend *fe,
458 struct dvb_frontend_parameters *params, u8* pllbuf, int buf_len)
283{ 459{
284 u32 div; 460 u32 div;
285 unsigned char bs = 0; 461 unsigned char bs = 0;
@@ -287,19 +463,20 @@ static int samsung_tdtc9251dh0_calc_regs(struct dvb_frontend* fe, struct dvb_fro
287 if (buf_len < 5) 463 if (buf_len < 5)
288 return -EINVAL; 464 return -EINVAL;
289 465
290 #define IF_FREQUENCYx6 217 /* 6 * 36.16666666667MHz */ 466#define IF_FREQUENCYx6 217 /* 6 * 36.16666666667MHz */
291 div = (((params->frequency + 83333) * 3) / 500000) + IF_FREQUENCYx6; 467 div = (((params->frequency + 83333) * 3) / 500000) + IF_FREQUENCYx6;
292 468 if (params->frequency >= 48000000 && params->frequency <= 154000000) \
293 if (params->frequency >= 48000000 && params->frequency <= 154000000) bs = 0x09; 469 bs = 0x09;
294 if (params->frequency >= 161000000 && params->frequency <= 439000000) bs = 0x0a; 470 if (params->frequency >= 161000000 && params->frequency <= 439000000) \
295 if (params->frequency >= 447000000 && params->frequency <= 863000000) bs = 0x08; 471 bs = 0x0a;
472 if (params->frequency >= 447000000 && params->frequency <= 863000000) \
473 bs = 0x08;
296 474
297 pllbuf[0] = 0x61; 475 pllbuf[0] = 0x61;
298 pllbuf[1] = div >> 8; 476 pllbuf[1] = div >> 8;
299 pllbuf[2] = div & 0xff; 477 pllbuf[2] = div & 0xff;
300 pllbuf[3] = 0xcc; 478 pllbuf[3] = 0xcc;
301 pllbuf[4] = bs; 479 pllbuf[4] = bs;
302
303 return 5; 480 return 5;
304} 481}
305 482
@@ -308,70 +485,95 @@ static struct mt352_config samsung_tdtc9251dh0_config = {
308 .demod_init = samsung_tdtc9251dh0_demod_init, 485 .demod_init = samsung_tdtc9251dh0_demod_init,
309}; 486};
310 487
311static int flexcop_fe_request_firmware(struct dvb_frontend* fe, const struct firmware **fw, char* name) 488static int airstar_dvbt_attach(struct flexcop_device *fc,
489 struct i2c_adapter *i2c)
490{
491 fc->fe = dvb_attach(mt352_attach, &samsung_tdtc9251dh0_config, i2c);
492 if (fc->fe != NULL) {
493 fc->fe->ops.tuner_ops.calc_regs = samsung_tdtc9251dh0_calc_regs;
494 return 1;
495 }
496 return 0;
497}
498#endif
499
500/* AirStar ATSC 1st generation */
501#if defined(CONFIG_DVB_BCM3510_MODULE)
502static int flexcop_fe_request_firmware(struct dvb_frontend *fe,
503 const struct firmware **fw, char* name)
312{ 504{
313 struct flexcop_device *fc = fe->dvb->priv; 505 struct flexcop_device *fc = fe->dvb->priv;
314 return request_firmware(fw, name, fc->dev); 506 return request_firmware(fw, name, fc->dev);
315} 507}
316 508
317static struct lgdt330x_config air2pc_atsc_hd5000_config = {
318 .demod_address = 0x59,
319 .demod_chip = LGDT3303,
320 .serial_mpeg = 0x04,
321 .clock_polarity_flip = 1,
322};
323
324static struct nxt200x_config samsung_tbmv_config = {
325 .demod_address = 0x0a,
326};
327
328static struct bcm3510_config air2pc_atsc_first_gen_config = { 509static struct bcm3510_config air2pc_atsc_first_gen_config = {
329 .demod_address = 0x0f, 510 .demod_address = 0x0f,
330 .request_firmware = flexcop_fe_request_firmware, 511 .request_firmware = flexcop_fe_request_firmware,
331}; 512};
332 513
333static int skystar23_samsung_tbdu18132_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params) 514static int airstar_atsc1_attach(struct flexcop_device *fc,
515 struct i2c_adapter *i2c)
334{ 516{
335 u8 buf[4]; 517 fc->fe = dvb_attach(bcm3510_attach, &air2pc_atsc_first_gen_config, i2c);
336 u32 div; 518 return fc->fe != NULL;
337 struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) }; 519}
338 struct flexcop_device *fc = fe->dvb->priv; 520#endif
339
340 div = (params->frequency + (125/2)) / 125;
341 521
342 buf[0] = (div >> 8) & 0x7f; 522/* AirStar ATSC 2nd generation */
343 buf[1] = (div >> 0) & 0xff; 523#if defined(CONFIG_DVB_NXT200X_MODULE)
344 buf[2] = 0x84 | ((div >> 10) & 0x60); 524static struct nxt200x_config samsung_tbmv_config = {
345 buf[3] = 0x80; 525 .demod_address = 0x0a,
526};
346 527
347 if (params->frequency < 1550000) 528static int airstar_atsc2_attach(struct flexcop_device *fc,
348 buf[3] |= 0x02; 529 struct i2c_adapter *i2c)
530{
531 fc->fe = dvb_attach(nxt200x_attach, &samsung_tbmv_config, i2c);
532 if (!fc->fe)
533 return 0;
349 534
350 if (fe->ops.i2c_gate_ctrl) 535 return !!dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL,
351 fe->ops.i2c_gate_ctrl(fe, 1); 536 DVB_PLL_SAMSUNG_TBMV);
352 if (i2c_transfer(&fc->fc_i2c_adap[0].i2c_adap, &msg, 1) != 1)
353 return -EIO;
354 return 0;
355} 537}
538#endif
356 539
357static struct mt312_config skystar23_samsung_tbdu18132_config = { 540/* AirStar ATSC 3rd generation */
358 541#if defined(CONFIG_DVB_LGDT330X_MODULE)
359 .demod_address = 0x0e, 542static struct lgdt330x_config air2pc_atsc_hd5000_config = {
543 .demod_address = 0x59,
544 .demod_chip = LGDT3303,
545 .serial_mpeg = 0x04,
546 .clock_polarity_flip = 1,
360}; 547};
361 548
549static int airstar_atsc3_attach(struct flexcop_device *fc,
550 struct i2c_adapter *i2c)
551{
552 fc->fe = dvb_attach(lgdt330x_attach, &air2pc_atsc_hd5000_config, i2c);
553 if (!fc->fe)
554 return 0;
555
556 return !!dvb_attach(simple_tuner_attach, fc->fe, i2c, 0x61,
557 TUNER_LG_TDVS_H06XF);
558}
559#endif
560
561/* CableStar2 DVB-C */
562#if defined(CONFIG_DVB_STV0297_MODULE)
362static int alps_tdee4_stv0297_tuner_set_params(struct dvb_frontend* fe, 563static int alps_tdee4_stv0297_tuner_set_params(struct dvb_frontend* fe,
363 struct dvb_frontend_parameters *fep) 564 struct dvb_frontend_parameters *fep)
364{ 565{
365 struct flexcop_device *fc = fe->dvb->priv; 566 struct flexcop_device *fc = fe->dvb->priv;
366 u8 buf[4]; 567 u8 buf[4];
367 u16 div; 568 u16 div;
368 int ret; 569 int ret;
369 570
370/* 62.5 kHz * 10 */ 571/* 62.5 kHz * 10 */
371#define REF_FREQ 625 572#define REF_FREQ 625
372#define FREQ_OFFSET 36125 573#define FREQ_OFFSET 36125
373 574
374 div = ((fep->frequency/1000 + FREQ_OFFSET ) * 10) / REF_FREQ; // 4 MHz = 4000 KHz 575 div = ((fep->frequency/1000 + FREQ_OFFSET) * 10) / REF_FREQ;
576/* 4 MHz = 4000 KHz */
375 577
376 buf[0] = (u8)( div >> 8) & 0x7f; 578 buf[0] = (u8)( div >> 8) & 0x7f;
377 buf[1] = (u8) div & 0xff; 579 buf[1] = (u8) div & 0xff;
@@ -384,11 +586,11 @@ static int alps_tdee4_stv0297_tuner_set_params(struct dvb_frontend* fe,
384 * AGD = 1, R3 R2 R1 R0 = 0 1 0 1 => byte 4 = 1**10101 = 0x95 */ 586 * AGD = 1, R3 R2 R1 R0 = 0 1 0 1 => byte 4 = 1**10101 = 0x95 */
385 buf[2] = 0x95; 587 buf[2] = 0x95;
386 588
387// Range(MHz) C1 * RE RTS BS4 BS3 BS2 BS1 Byte 5 589/* Range(MHz) C1 * RE RTS BS4 BS3 BS2 BS1 Byte 5
388// 47 - 153 0 * 0 0 0 0 0 1 0x01 590 * 47 - 153 0 * 0 0 0 0 0 1 0x01
389// 153 - 430 0 * 0 0 0 0 1 0 0x02 591 * 153 - 430 0 * 0 0 0 0 1 0 0x02
390// 430 - 822 0 * 0 0 1 0 0 0 0x08 592 * 430 - 822 0 * 0 0 1 0 0 0 0x08
391// 822 - 862 1 * 0 0 1 0 0 0 0x88 593 * 822 - 862 1 * 0 0 1 0 0 0 0x88 */
392 594
393 if (fep->frequency <= 153000000) buf[3] = 0x01; 595 if (fep->frequency <= 153000000) buf[3] = 0x01;
394 else if (fep->frequency <= 430000000) buf[3] = 0x02; 596 else if (fep->frequency <= 430000000) buf[3] = 0x02;
@@ -397,11 +599,11 @@ static int alps_tdee4_stv0297_tuner_set_params(struct dvb_frontend* fe,
397 599
398 if (fe->ops.i2c_gate_ctrl) 600 if (fe->ops.i2c_gate_ctrl)
399 fe->ops.i2c_gate_ctrl(fe, 0); 601 fe->ops.i2c_gate_ctrl(fe, 0);
400 deb_tuner("tuner buffer for %d Hz: %x %x %x %x\n",fep->frequency, buf[0],buf[1],buf[2],buf[3]); 602 deb_tuner("tuner buffer for %d Hz: %x %x %x %x\n", fep->frequency,
603 buf[0], buf[1], buf[2], buf[3]);
401 ret = fc->i2c_request(&fc->fc_i2c_adap[2], 604 ret = fc->i2c_request(&fc->fc_i2c_adap[2],
402 FC_WRITE, 0x61, buf[0], &buf[1], 3); 605 FC_WRITE, 0x61, buf[0], &buf[1], 3);
403 deb_tuner("tuner write returned: %d\n",ret); 606 deb_tuner("tuner write returned: %d\n",ret);
404
405 return ret; 607 return ret;
406} 608}
407 609
@@ -481,182 +683,73 @@ static u8 alps_tdee4_stv0297_inittab[] = {
481static struct stv0297_config alps_tdee4_stv0297_config = { 683static struct stv0297_config alps_tdee4_stv0297_config = {
482 .demod_address = 0x1c, 684 .demod_address = 0x1c,
483 .inittab = alps_tdee4_stv0297_inittab, 685 .inittab = alps_tdee4_stv0297_inittab,
484// .invert = 1,
485// .pll_set = alps_tdee4_stv0297_pll_set,
486};
487
488
489/* SkyStar2 rev2.7 (a/u) */
490static struct s5h1420_config skystar2_rev2_7_s5h1420_config = {
491 .demod_address = 0x53,
492 .invert = 1,
493 .repeated_start_workaround = 1,
494 .serial_mpeg = 1,
495};
496
497static struct itd1000_config skystar2_rev2_7_itd1000_config = {
498 .i2c_address = 0x61,
499}; 686};
500 687
501/* SkyStar2 rev2.8 */ 688static int cablestar2_attach(struct flexcop_device *fc,
502static struct cx24123_config skystar2_rev2_8_cx24123_config = { 689 struct i2c_adapter *i2c)
503 .demod_address = 0x55,
504 .dont_use_pll = 1,
505 .agc_callback = cx24113_agc_callback,
506};
507
508static const struct cx24113_config skystar2_rev2_8_cx24113_config = {
509 .i2c_addr = 0x54,
510 .xtal_khz = 10111,
511};
512
513/* try to figure out the frontend, each card/box can have on of the following list */
514int flexcop_frontend_init(struct flexcop_device *fc)
515{ 690{
516 struct dvb_frontend_ops *ops;
517 struct i2c_adapter *i2c = &fc->fc_i2c_adap[0].i2c_adap;
518 struct i2c_adapter *i2c_tuner;
519
520 /* enable no_base_addr - no repeated start when reading */
521 fc->fc_i2c_adap[0].no_base_addr = 1;
522 fc->fe = dvb_attach(s5h1420_attach, &skystar2_rev2_7_s5h1420_config, i2c);
523 if (fc->fe != NULL) {
524 flexcop_ibi_value r108;
525 i2c_tuner = s5h1420_get_tuner_i2c_adapter(fc->fe);
526 ops = &fc->fe->ops;
527
528 fc->fe_sleep = ops->sleep;
529 ops->sleep = flexcop_sleep;
530
531 fc->dev_type = FC_SKY_REV27;
532
533 /* enable no_base_addr - no repeated start when reading */
534 fc->fc_i2c_adap[2].no_base_addr = 1;
535 if (dvb_attach(isl6421_attach, fc->fe, &fc->fc_i2c_adap[2].i2c_adap, 0x08, 1, 1) == NULL)
536 err("ISL6421 could NOT be attached");
537 else
538 info("ISL6421 successfully attached");
539
540 /* the ITD1000 requires a lower i2c clock - it slows down the stuff for everyone - but is it a problem ? */
541 r108.raw = 0x00000506;
542 fc->write_ibi_reg(fc, tw_sm_c_108, r108);
543 if (i2c_tuner) {
544 if (dvb_attach(itd1000_attach, fc->fe, i2c_tuner, &skystar2_rev2_7_itd1000_config) == NULL)
545 err("ITD1000 could NOT be attached");
546 else
547 info("ITD1000 successfully attached");
548 }
549 goto fe_found;
550 }
551 fc->fc_i2c_adap[0].no_base_addr = 0; /* for the next devices we need it again */
552
553 /* try the sky v2.8 (cx24123, isl6421) */
554 fc->fe = dvb_attach(cx24123_attach,
555 &skystar2_rev2_8_cx24123_config, i2c);
556 if (fc->fe != NULL) {
557 i2c_tuner = cx24123_get_tuner_i2c_adapter(fc->fe);
558 if (i2c_tuner != NULL) {
559 if (dvb_attach(cx24113_attach, fc->fe,
560 &skystar2_rev2_8_cx24113_config,
561 i2c_tuner) == NULL)
562 err("CX24113 could NOT be attached");
563 else
564 info("CX24113 successfully attached");
565 }
566
567 fc->dev_type = FC_SKY_REV28;
568
569 fc->fc_i2c_adap[2].no_base_addr = 1;
570 if (dvb_attach(isl6421_attach, fc->fe,
571 &fc->fc_i2c_adap[2].i2c_adap, 0x08, 0, 0) == NULL)
572 err("ISL6421 could NOT be attached");
573 else
574 info("ISL6421 successfully attached");
575
576 /* TODO on i2c_adap[1] addr 0x11 (EEPROM) there seems to be an
577 * IR-receiver (PIC16F818) - but the card has no input for
578 * that ??? */
579
580 goto fe_found;
581 }
582
583 /* try the sky v2.6 (stv0299/Samsung tbmu24112(sl1935)) */
584 fc->fe = dvb_attach(stv0299_attach, &samsung_tbmu24112_config, i2c);
585 if (fc->fe != NULL) {
586 ops = &fc->fe->ops;
587
588 ops->tuner_ops.set_params = samsung_tbmu24112_tuner_set_params;
589
590 ops->set_voltage = flexcop_set_voltage;
591
592 fc->fe_sleep = ops->sleep;
593 ops->sleep = flexcop_sleep;
594
595 fc->dev_type = FC_SKY_REV26;
596 goto fe_found;
597 }
598
599 /* try the air dvb-t (mt352/Samsung tdtc9251dh0(??)) */
600 fc->fe = dvb_attach(mt352_attach, &samsung_tdtc9251dh0_config, i2c);
601 if (fc->fe != NULL) {
602 fc->dev_type = FC_AIR_DVBT;
603 fc->fe->ops.tuner_ops.calc_regs = samsung_tdtc9251dh0_calc_regs;
604 goto fe_found;
605 }
606
607 /* try the air atsc 2nd generation (nxt2002) */
608 fc->fe = dvb_attach(nxt200x_attach, &samsung_tbmv_config, i2c);
609 if (fc->fe != NULL) {
610 fc->dev_type = FC_AIR_ATSC2;
611 dvb_attach(dvb_pll_attach, fc->fe, 0x61, NULL, DVB_PLL_SAMSUNG_TBMV);
612 goto fe_found;
613 }
614
615 fc->fe = dvb_attach(lgdt330x_attach, &air2pc_atsc_hd5000_config, i2c);
616 if (fc->fe != NULL) {
617 fc->dev_type = FC_AIR_ATSC3;
618 dvb_attach(simple_tuner_attach, fc->fe, i2c, 0x61,
619 TUNER_LG_TDVS_H06XF);
620 goto fe_found;
621 }
622
623 /* try the air atsc 1nd generation (bcm3510)/panasonic ct10s */
624 fc->fe = dvb_attach(bcm3510_attach, &air2pc_atsc_first_gen_config, i2c);
625 if (fc->fe != NULL) {
626 fc->dev_type = FC_AIR_ATSC1;
627 goto fe_found;
628 }
629
630 /* try the cable dvb (stv0297) */
631 fc->fc_i2c_adap[0].no_base_addr = 1; 691 fc->fc_i2c_adap[0].no_base_addr = 1;
632 fc->fe = dvb_attach(stv0297_attach, &alps_tdee4_stv0297_config, i2c); 692 fc->fe = dvb_attach(stv0297_attach, &alps_tdee4_stv0297_config, i2c);
633 if (fc->fe != NULL) { 693 if (!fc->fe) {
634 fc->dev_type = FC_CABLE; 694 /* Reset for next frontend to try */
635 fc->fe->ops.tuner_ops.set_params = alps_tdee4_stv0297_tuner_set_params; 695 fc->fc_i2c_adap[0].no_base_addr = 0;
636 goto fe_found; 696 return 0;
637 } 697 }
638 fc->fc_i2c_adap[0].no_base_addr = 0; 698 fc->fe->ops.tuner_ops.set_params = alps_tdee4_stv0297_tuner_set_params;
639 699 return 1;
640 /* try the sky v2.3 (vp310/Samsung tbdu18132(tsa5059)) */ 700}
641 fc->fe = dvb_attach(mt312_attach, 701#endif
642 &skystar23_samsung_tbdu18132_config, i2c); 702
643 if (fc->fe != NULL) { 703static struct {
644 ops = &fc->fe->ops; 704 flexcop_device_type_t type;
645 705 int (*attach)(struct flexcop_device *, struct i2c_adapter *);
646 ops->tuner_ops.set_params = skystar23_samsung_tbdu18132_tuner_set_params; 706} flexcop_frontends[] = {
647 707#if defined(CONFIG_DVB_S5H1420_MODULE)
648 ops->diseqc_send_master_cmd = flexcop_diseqc_send_master_cmd; 708 { FC_SKY_REV27, skystar2_rev27_attach },
649 ops->diseqc_send_burst = flexcop_diseqc_send_burst; 709#endif
650 ops->set_tone = flexcop_set_tone; 710#if defined(CONFIG_DVB_CX24123_MODULE)
651 ops->set_voltage = flexcop_set_voltage; 711 { FC_SKY_REV28, skystar2_rev28_attach },
652 712#endif
653 fc->fe_sleep = ops->sleep; 713#if defined(CONFIG_DVB_STV0299_MODULE)
654 ops->sleep = flexcop_sleep; 714 { FC_SKY_REV26, skystar2_rev26_attach },
715#endif
716#if defined(CONFIG_DVB_MT352_MODULE)
717 { FC_AIR_DVBT, airstar_dvbt_attach },
718#endif
719#if defined(CONFIG_DVB_NXT200X_MODULE)
720 { FC_AIR_ATSC2, airstar_atsc2_attach },
721#endif
722#if defined(CONFIG_DVB_LGDT330X_MODULE)
723 { FC_AIR_ATSC3, airstar_atsc3_attach },
724#endif
725#if defined(CONFIG_DVB_BCM3510_MODULE)
726 { FC_AIR_ATSC1, airstar_atsc1_attach },
727#endif
728#if defined(CONFIG_DVB_STV0297_MODULE)
729 { FC_CABLE, cablestar2_attach },
730#endif
731#if defined(CONFIG_DVB_MT312_MODULE)
732 { FC_SKY_REV23, skystar2_rev23_attach },
733#endif
734};
655 735
656 fc->dev_type = FC_SKY_REV23; 736/* try to figure out the frontend */
657 goto fe_found; 737int flexcop_frontend_init(struct flexcop_device *fc)
738{
739 int i;
740 for (i = 0; i < ARRAY_SIZE(flexcop_frontends); i++) {
741 /* type needs to be set before, because of some workarounds
742 * done based on the probed card type */
743 fc->dev_type = flexcop_frontends[i].type;
744 if (flexcop_frontends[i].attach(fc, &fc->fc_i2c_adap[0].i2c_adap))
745 goto fe_found;
746 /* Clean up partially attached frontend */
747 if (fc->fe) {
748 dvb_frontend_detach(fc->fe);
749 fc->fe = NULL;
750 }
658 } 751 }
659 752 fc->dev_type = FC_UNK;
660 err("no frontend driver found for this B2C2/FlexCop adapter"); 753 err("no frontend driver found for this B2C2/FlexCop adapter");
661 return -ENODEV; 754 return -ENODEV;
662 755
@@ -664,9 +757,7 @@ fe_found:
664 info("found '%s' .", fc->fe->ops.info.name); 757 info("found '%s' .", fc->fe->ops.info.name);
665 if (dvb_register_frontend(&fc->dvb_adapter, fc->fe)) { 758 if (dvb_register_frontend(&fc->dvb_adapter, fc->fe)) {
666 err("frontend registration failed!"); 759 err("frontend registration failed!");
667 ops = &fc->fe->ops; 760 dvb_frontend_detach(fc->fe);
668 if (ops->release != NULL)
669 ops->release(fc->fe);
670 fc->fe = NULL; 761 fc->fe = NULL;
671 return -EINVAL; 762 return -EINVAL;
672 } 763 }
@@ -680,6 +771,5 @@ void flexcop_frontend_exit(struct flexcop_device *fc)
680 dvb_unregister_frontend(fc->fe); 771 dvb_unregister_frontend(fc->fe);
681 dvb_frontend_detach(fc->fe); 772 dvb_frontend_detach(fc->fe);
682 } 773 }
683
684 fc->init_state &= ~FC_STATE_FE_INIT; 774 fc->init_state &= ~FC_STATE_FE_INIT;
685} 775}
diff --git a/drivers/media/dvb/b2c2/flexcop-i2c.c b/drivers/media/dvb/b2c2/flexcop-i2c.c
index e2bed5076485..fd1df2352764 100644
--- a/drivers/media/dvb/b2c2/flexcop-i2c.c
+++ b/drivers/media/dvb/b2c2/flexcop-i2c.c
@@ -200,7 +200,7 @@ static int flexcop_master_xfer(struct i2c_adapter *i2c_adap,
200 msgs[i].buf[0], &msgs[i].buf[1], 200 msgs[i].buf[0], &msgs[i].buf[1],
201 msgs[i].len - 1); 201 msgs[i].len - 1);
202 if (ret < 0) { 202 if (ret < 0) {
203 err("i2c master_xfer failed"); 203 deb_i2c("i2c master_xfer failed");
204 break; 204 break;
205 } 205 }
206 } 206 }
diff --git a/drivers/media/dvb/b2c2/flexcop-misc.c b/drivers/media/dvb/b2c2/flexcop-misc.c
index e56627d2f0f4..f06f3a9070f5 100644
--- a/drivers/media/dvb/b2c2/flexcop-misc.c
+++ b/drivers/media/dvb/b2c2/flexcop-misc.c
@@ -46,16 +46,16 @@ static const char *flexcop_revision_names[] = {
46}; 46};
47 47
48static const char *flexcop_device_names[] = { 48static const char *flexcop_device_names[] = {
49 "Unknown device", 49 [FC_UNK] = "Unknown device",
50 "Air2PC/AirStar 2 DVB-T", 50 [FC_CABLE] = "Cable2PC/CableStar 2 DVB-C",
51 "Air2PC/AirStar 2 ATSC 1st generation", 51 [FC_AIR_DVBT] = "Air2PC/AirStar 2 DVB-T",
52 "Air2PC/AirStar 2 ATSC 2nd generation", 52 [FC_AIR_ATSC1] = "Air2PC/AirStar 2 ATSC 1st generation",
53 "Sky2PC/SkyStar 2 DVB-S", 53 [FC_AIR_ATSC2] = "Air2PC/AirStar 2 ATSC 2nd generation",
54 "Sky2PC/SkyStar 2 DVB-S (old version)", 54 [FC_AIR_ATSC3] = "Air2PC/AirStar 2 ATSC 3rd generation (HD5000)",
55 "Cable2PC/CableStar 2 DVB-C", 55 [FC_SKY_REV23] = "Sky2PC/SkyStar 2 DVB-S rev 2.3 (old version)",
56 "Air2PC/AirStar 2 ATSC 3rd generation (HD5000)", 56 [FC_SKY_REV26] = "Sky2PC/SkyStar 2 DVB-S rev 2.6",
57 "Sky2PC/SkyStar 2 DVB-S rev 2.7a/u", 57 [FC_SKY_REV27] = "Sky2PC/SkyStar 2 DVB-S rev 2.7a/u",
58 "Sky2PC/SkyStar 2 DVB-S rev 2.8", 58 [FC_SKY_REV28] = "Sky2PC/SkyStar 2 DVB-S rev 2.8",
59}; 59};
60 60
61static const char *flexcop_bus_names[] = { 61static const char *flexcop_bus_names[] = {
diff --git a/drivers/media/dvb/bt8xx/bt878.c b/drivers/media/dvb/bt8xx/bt878.c
index 56d8fab688bb..a24c125331f0 100644
--- a/drivers/media/dvb/bt8xx/bt878.c
+++ b/drivers/media/dvb/bt8xx/bt878.c
@@ -508,12 +508,6 @@ static int __devinit bt878_probe(struct pci_dev *dev,
508 pci_set_master(dev); 508 pci_set_master(dev);
509 pci_set_drvdata(dev, bt); 509 pci_set_drvdata(dev, bt);
510 510
511/* if(init_bt878(btv) < 0) {
512 bt878_remove(dev);
513 return -EIO;
514 }
515*/
516
517 if ((result = bt878_mem_alloc(bt))) { 511 if ((result = bt878_mem_alloc(bt))) {
518 printk(KERN_ERR "bt878: failed to allocate memory!\n"); 512 printk(KERN_ERR "bt878: failed to allocate memory!\n");
519 goto fail2; 513 goto fail2;
@@ -579,7 +573,7 @@ static struct pci_driver bt878_pci_driver = {
579 .name = "bt878", 573 .name = "bt878",
580 .id_table = bt878_pci_tbl, 574 .id_table = bt878_pci_tbl,
581 .probe = bt878_probe, 575 .probe = bt878_probe,
582 .remove = bt878_remove, 576 .remove = __devexit_p(bt878_remove),
583}; 577};
584 578
585static int bt878_pci_driver_registered; 579static int bt878_pci_driver_registered;
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index 971a8b18f6dd..4dbd7d4185af 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -51,6 +51,9 @@
51#ifndef PCI_VENDOR_ID_TRIGEM 51#ifndef PCI_VENDOR_ID_TRIGEM
52#define PCI_VENDOR_ID_TRIGEM 0x109f 52#define PCI_VENDOR_ID_TRIGEM 0x109f
53#endif 53#endif
54#ifndef PCI_VENDOR_ID_AXESS
55#define PCI_VENDOR_ID_AXESS 0x195d
56#endif
54#ifndef PCI_DEVICE_ID_DM1105 57#ifndef PCI_DEVICE_ID_DM1105
55#define PCI_DEVICE_ID_DM1105 0x036f 58#define PCI_DEVICE_ID_DM1105 0x036f
56#endif 59#endif
@@ -60,6 +63,9 @@
60#ifndef PCI_DEVICE_ID_DW2004 63#ifndef PCI_DEVICE_ID_DW2004
61#define PCI_DEVICE_ID_DW2004 0x2004 64#define PCI_DEVICE_ID_DW2004 0x2004
62#endif 65#endif
66#ifndef PCI_DEVICE_ID_DM05
67#define PCI_DEVICE_ID_DM05 0x1105
68#endif
63/* ----------------------------------------------- */ 69/* ----------------------------------------------- */
64/* sdmc dm1105 registers */ 70/* sdmc dm1105 registers */
65 71
@@ -150,6 +156,11 @@
150#define DM1105_LNB_13V 0x00010100 156#define DM1105_LNB_13V 0x00010100
151#define DM1105_LNB_18V 0x00000100 157#define DM1105_LNB_18V 0x00000100
152 158
159/* GPIO's for LNB power control for Axess DM05 */
160#define DM05_LNB_MASK 0x00000000
161#define DM05_LNB_13V 0x00020000
162#define DM05_LNB_18V 0x00030000
163
153static int ir_debug; 164static int ir_debug;
154module_param(ir_debug, int, 0644); 165module_param(ir_debug, int, 0644);
155MODULE_PARM_DESC(ir_debug, "enable debugging information for IR decoding"); 166MODULE_PARM_DESC(ir_debug, "enable debugging information for IR decoding");
@@ -188,6 +199,8 @@ struct dm1105dvb {
188 199
189 /* irq */ 200 /* irq */
190 struct work_struct work; 201 struct work_struct work;
202 struct workqueue_struct *wq;
203 char wqn[16];
191 204
192 /* dma */ 205 /* dma */
193 dma_addr_t dma_addr; 206 dma_addr_t dma_addr;
@@ -313,15 +326,25 @@ static inline struct dm1105dvb *frontend_to_dm1105dvb(struct dvb_frontend *fe)
313static int dm1105dvb_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) 326static int dm1105dvb_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
314{ 327{
315 struct dm1105dvb *dm1105dvb = frontend_to_dm1105dvb(fe); 328 struct dm1105dvb *dm1105dvb = frontend_to_dm1105dvb(fe);
329 u32 lnb_mask, lnb_13v, lnb_18v;
316 330
317 if (voltage == SEC_VOLTAGE_18) { 331 switch (dm1105dvb->pdev->subsystem_device) {
318 outl(DM1105_LNB_MASK, dm_io_mem(DM1105_GPIOCTR)); 332 case PCI_DEVICE_ID_DM05:
319 outl(DM1105_LNB_18V, dm_io_mem(DM1105_GPIOVAL)); 333 lnb_mask = DM05_LNB_MASK;
320 } else { 334 lnb_13v = DM05_LNB_13V;
321 /*LNB ON-13V by default!*/ 335 lnb_18v = DM05_LNB_18V;
322 outl(DM1105_LNB_MASK, dm_io_mem(DM1105_GPIOCTR)); 336 break;
323 outl(DM1105_LNB_13V, dm_io_mem(DM1105_GPIOVAL)); 337 default:
324 } 338 lnb_mask = DM1105_LNB_MASK;
339 lnb_13v = DM1105_LNB_13V;
340 lnb_18v = DM1105_LNB_18V;
341 }
342
343 outl(lnb_mask, dm_io_mem(DM1105_GPIOCTR));
344 if (voltage == SEC_VOLTAGE_18)
345 outl(lnb_18v , dm_io_mem(DM1105_GPIOVAL));
346 else
347 outl(lnb_13v, dm_io_mem(DM1105_GPIOVAL));
325 348
326 return 0; 349 return 0;
327} 350}
@@ -440,7 +463,7 @@ static irqreturn_t dm1105dvb_irq(int irq, void *dev_id)
440 case (INTSTS_TSIRQ | INTSTS_IR): 463 case (INTSTS_TSIRQ | INTSTS_IR):
441 dm1105dvb->nextwrp = inl(dm_io_mem(DM1105_WRP)) - 464 dm1105dvb->nextwrp = inl(dm_io_mem(DM1105_WRP)) -
442 inl(dm_io_mem(DM1105_STADR)); 465 inl(dm_io_mem(DM1105_STADR));
443 schedule_work(&dm1105dvb->work); 466 queue_work(dm1105dvb->wq, &dm1105dvb->work);
444 break; 467 break;
445 case INTSTS_IR: 468 case INTSTS_IR:
446 dm1105dvb->ir.ir_command = inl(dm_io_mem(DM1105_IRCODE)); 469 dm1105dvb->ir.ir_command = inl(dm_io_mem(DM1105_IRCODE));
@@ -567,46 +590,44 @@ static int __devinit frontend_init(struct dm1105dvb *dm1105dvb)
567 int ret; 590 int ret;
568 591
569 switch (dm1105dvb->pdev->subsystem_device) { 592 switch (dm1105dvb->pdev->subsystem_device) {
570 case PCI_DEVICE_ID_DW2002: 593 case PCI_DEVICE_ID_DW2004:
571 dm1105dvb->fe = dvb_attach( 594 dm1105dvb->fe = dvb_attach(
572 stv0299_attach, &sharp_z0194a_config, 595 cx24116_attach, &serit_sp2633_config,
573 &dm1105dvb->i2c_adap); 596 &dm1105dvb->i2c_adap);
597 if (dm1105dvb->fe)
598 dm1105dvb->fe->ops.set_voltage = dm1105dvb_set_voltage;
574 599
600 break;
601 default:
602 dm1105dvb->fe = dvb_attach(
603 stv0299_attach, &sharp_z0194a_config,
604 &dm1105dvb->i2c_adap);
575 if (dm1105dvb->fe) { 605 if (dm1105dvb->fe) {
576 dm1105dvb->fe->ops.set_voltage = 606 dm1105dvb->fe->ops.set_voltage =
577 dm1105dvb_set_voltage; 607 dm1105dvb_set_voltage;
578 dvb_attach(dvb_pll_attach, dm1105dvb->fe, 0x60, 608 dvb_attach(dvb_pll_attach, dm1105dvb->fe, 0x60,
579 &dm1105dvb->i2c_adap, DVB_PLL_OPERA1); 609 &dm1105dvb->i2c_adap, DVB_PLL_OPERA1);
610 break;
580 } 611 }
581 612
582 if (!dm1105dvb->fe) { 613 dm1105dvb->fe = dvb_attach(
583 dm1105dvb->fe = dvb_attach( 614 stv0288_attach, &earda_config,
584 stv0288_attach, &earda_config, 615 &dm1105dvb->i2c_adap);
585 &dm1105dvb->i2c_adap); 616 if (dm1105dvb->fe) {
586 if (dm1105dvb->fe) { 617 dm1105dvb->fe->ops.set_voltage =
587 dm1105dvb->fe->ops.set_voltage = 618 dm1105dvb_set_voltage;
588 dm1105dvb_set_voltage; 619 dvb_attach(stb6000_attach, dm1105dvb->fe, 0x61,
589 dvb_attach(stb6000_attach, dm1105dvb->fe, 0x61, 620 &dm1105dvb->i2c_adap);
590 &dm1105dvb->i2c_adap); 621 break;
591 }
592 } 622 }
593 623
594 if (!dm1105dvb->fe) {
595 dm1105dvb->fe = dvb_attach(
596 si21xx_attach, &serit_config,
597 &dm1105dvb->i2c_adap);
598 if (dm1105dvb->fe)
599 dm1105dvb->fe->ops.set_voltage =
600 dm1105dvb_set_voltage;
601 }
602 break;
603 case PCI_DEVICE_ID_DW2004:
604 dm1105dvb->fe = dvb_attach( 624 dm1105dvb->fe = dvb_attach(
605 cx24116_attach, &serit_sp2633_config, 625 si21xx_attach, &serit_config,
606 &dm1105dvb->i2c_adap); 626 &dm1105dvb->i2c_adap);
607 if (dm1105dvb->fe) 627 if (dm1105dvb->fe)
608 dm1105dvb->fe->ops.set_voltage = dm1105dvb_set_voltage; 628 dm1105dvb->fe->ops.set_voltage =
609 break; 629 dm1105dvb_set_voltage;
630
610 } 631 }
611 632
612 if (!dm1105dvb->fe) { 633 if (!dm1105dvb->fe) {
@@ -630,10 +651,17 @@ static void __devinit dm1105dvb_read_mac(struct dm1105dvb *dm1105dvb, u8 *mac)
630 static u8 command[1] = { 0x28 }; 651 static u8 command[1] = { 0x28 };
631 652
632 struct i2c_msg msg[] = { 653 struct i2c_msg msg[] = {
633 { .addr = IIC_24C01_addr >> 1, .flags = 0, 654 {
634 .buf = command, .len = 1 }, 655 .addr = IIC_24C01_addr >> 1,
635 { .addr = IIC_24C01_addr >> 1, .flags = I2C_M_RD, 656 .flags = 0,
636 .buf = mac, .len = 6 }, 657 .buf = command,
658 .len = 1
659 }, {
660 .addr = IIC_24C01_addr >> 1,
661 .flags = I2C_M_RD,
662 .buf = mac,
663 .len = 6
664 },
637 }; 665 };
638 666
639 dm1105_i2c_xfer(&dm1105dvb->i2c_adap, msg , 2); 667 dm1105_i2c_xfer(&dm1105dvb->i2c_adap, msg , 2);
@@ -752,14 +780,22 @@ static int __devinit dm1105_probe(struct pci_dev *pdev,
752 dm1105_ir_init(dm1105dvb); 780 dm1105_ir_init(dm1105dvb);
753 781
754 INIT_WORK(&dm1105dvb->work, dm1105_dmx_buffer); 782 INIT_WORK(&dm1105dvb->work, dm1105_dmx_buffer);
783 sprintf(dm1105dvb->wqn, "%s/%d", dvb_adapter->name, dvb_adapter->num);
784 dm1105dvb->wq = create_singlethread_workqueue(dm1105dvb->wqn);
785 if (!dm1105dvb->wq)
786 goto err_dvb_net;
755 787
756 ret = request_irq(pdev->irq, dm1105dvb_irq, IRQF_SHARED, 788 ret = request_irq(pdev->irq, dm1105dvb_irq, IRQF_SHARED,
757 DRIVER_NAME, dm1105dvb); 789 DRIVER_NAME, dm1105dvb);
758 if (ret < 0) 790 if (ret < 0)
759 goto err_free_irq; 791 goto err_workqueue;
760 792
761 return 0; 793 return 0;
762 794
795err_workqueue:
796 destroy_workqueue(dm1105dvb->wq);
797err_dvb_net:
798 dvb_net_release(&dm1105dvb->dvbnet);
763err_disconnect_frontend: 799err_disconnect_frontend:
764 dmx->disconnect_frontend(dmx); 800 dmx->disconnect_frontend(dmx);
765err_remove_mem_frontend: 801err_remove_mem_frontend:
@@ -776,8 +812,6 @@ err_i2c_del_adapter:
776 i2c_del_adapter(&dm1105dvb->i2c_adap); 812 i2c_del_adapter(&dm1105dvb->i2c_adap);
777err_dm1105dvb_hw_exit: 813err_dm1105dvb_hw_exit:
778 dm1105dvb_hw_exit(dm1105dvb); 814 dm1105dvb_hw_exit(dm1105dvb);
779err_free_irq:
780 free_irq(pdev->irq, dm1105dvb);
781err_pci_iounmap: 815err_pci_iounmap:
782 pci_iounmap(pdev, dm1105dvb->io_mem); 816 pci_iounmap(pdev, dm1105dvb->io_mem);
783err_pci_release_regions: 817err_pci_release_regions:
@@ -834,6 +868,11 @@ static struct pci_device_id dm1105_id_table[] __devinitdata = {
834 .subvendor = PCI_ANY_ID, 868 .subvendor = PCI_ANY_ID,
835 .subdevice = PCI_DEVICE_ID_DW2004, 869 .subdevice = PCI_DEVICE_ID_DW2004,
836 }, { 870 }, {
871 .vendor = PCI_VENDOR_ID_AXESS,
872 .device = PCI_DEVICE_ID_DM05,
873 .subvendor = PCI_VENDOR_ID_AXESS,
874 .subdevice = PCI_DEVICE_ID_DM05,
875 }, {
837 /* empty */ 876 /* empty */
838 }, 877 },
839}; 878};
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index c35fbb8d8f4a..6d6121eb5d59 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -244,19 +244,13 @@ static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count,
244{ 244{
245 struct dvb_device *dvbdev = file->private_data; 245 struct dvb_device *dvbdev = file->private_data;
246 struct dmxdev *dmxdev = dvbdev->priv; 246 struct dmxdev *dmxdev = dvbdev->priv;
247 int ret;
248 247
249 if (dmxdev->exit) { 248 if (dmxdev->exit)
250 mutex_unlock(&dmxdev->mutex);
251 return -ENODEV; 249 return -ENODEV;
252 }
253 250
254 //mutex_lock(&dmxdev->mutex); 251 return dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer,
255 ret = dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer, 252 file->f_flags & O_NONBLOCK,
256 file->f_flags & O_NONBLOCK, 253 buf, count, ppos);
257 buf, count, ppos);
258 //mutex_unlock(&dmxdev->mutex);
259 return ret;
260} 254}
261 255
262static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev, 256static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index e2eca0b1fe7c..cfe2768d24af 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -38,6 +38,16 @@
38*/ 38*/
39// #define DVB_DEMUX_SECTION_LOSS_LOG 39// #define DVB_DEMUX_SECTION_LOSS_LOG
40 40
41static int dvb_demux_tscheck;
42module_param(dvb_demux_tscheck, int, 0644);
43MODULE_PARM_DESC(dvb_demux_tscheck,
44 "enable transport stream continuity and TEI check");
45
46#define dprintk_tscheck(x...) do { \
47 if (dvb_demux_tscheck && printk_ratelimit()) \
48 printk(x); \
49 } while (0)
50
41/****************************************************************************** 51/******************************************************************************
42 * static inlined helper functions 52 * static inlined helper functions
43 ******************************************************************************/ 53 ******************************************************************************/
@@ -376,6 +386,36 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
376 u16 pid = ts_pid(buf); 386 u16 pid = ts_pid(buf);
377 int dvr_done = 0; 387 int dvr_done = 0;
378 388
389 if (dvb_demux_tscheck) {
390 if (!demux->cnt_storage)
391 demux->cnt_storage = vmalloc(MAX_PID + 1);
392
393 if (!demux->cnt_storage) {
394 printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
395 dvb_demux_tscheck = 0;
396 goto no_dvb_demux_tscheck;
397 }
398
399 /* check pkt counter */
400 if (pid < MAX_PID) {
401 if (buf[1] & 0x80)
402 dprintk_tscheck("TEI detected. "
403 "PID=0x%x data1=0x%x\n",
404 pid, buf[1]);
405
406 if ((buf[3] & 0xf) != demux->cnt_storage[pid])
407 dprintk_tscheck("TS packet counter mismatch. "
408 "PID=0x%x expected 0x%x "
409 "got 0x%x\n",
410 pid, demux->cnt_storage[pid],
411 buf[3] & 0xf);
412
413 demux->cnt_storage[pid] = ((buf[3] & 0xf) + 1)&0xf;
414 };
415 /* end check */
416 };
417no_dvb_demux_tscheck:
418
379 list_for_each_entry(feed, &demux->feed_list, list_head) { 419 list_for_each_entry(feed, &demux->feed_list, list_head) {
380 if ((feed->pid != pid) && (feed->pid != 0x2000)) 420 if ((feed->pid != pid) && (feed->pid != 0x2000))
381 continue; 421 continue;
@@ -1160,6 +1200,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
1160 int i; 1200 int i;
1161 struct dmx_demux *dmx = &dvbdemux->dmx; 1201 struct dmx_demux *dmx = &dvbdemux->dmx;
1162 1202
1203 dvbdemux->cnt_storage = NULL;
1163 dvbdemux->users = 0; 1204 dvbdemux->users = 0;
1164 dvbdemux->filter = vmalloc(dvbdemux->filternum * sizeof(struct dvb_demux_filter)); 1205 dvbdemux->filter = vmalloc(dvbdemux->filternum * sizeof(struct dvb_demux_filter));
1165 1206
@@ -1226,6 +1267,7 @@ EXPORT_SYMBOL(dvb_dmx_init);
1226 1267
1227void dvb_dmx_release(struct dvb_demux *dvbdemux) 1268void dvb_dmx_release(struct dvb_demux *dvbdemux)
1228{ 1269{
1270 vfree(dvbdemux->cnt_storage);
1229 vfree(dvbdemux->filter); 1271 vfree(dvbdemux->filter);
1230 vfree(dvbdemux->feed); 1272 vfree(dvbdemux->feed);
1231} 1273}
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index 2c5f915329ca..2fe05d03240d 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -42,6 +42,8 @@
42 42
43#define DVB_DEMUX_MASK_MAX 18 43#define DVB_DEMUX_MASK_MAX 18
44 44
45#define MAX_PID 0x1fff
46
45struct dvb_demux_filter { 47struct dvb_demux_filter {
46 struct dmx_section_filter filter; 48 struct dmx_section_filter filter;
47 u8 maskandmode[DMX_MAX_FILTER_SIZE]; 49 u8 maskandmode[DMX_MAX_FILTER_SIZE];
@@ -127,6 +129,8 @@ struct dvb_demux {
127 129
128 struct mutex mutex; 130 struct mutex mutex;
129 spinlock_t lock; 131 spinlock_t lock;
132
133 uint8_t *cnt_storage; /* for TS continuity check */
130}; 134};
131 135
132int dvb_dmx_init(struct dvb_demux *dvbdemux); 136int dvb_dmx_init(struct dvb_demux *dvbdemux);
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index ebc78157b9b8..f50ca7292a7d 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -543,6 +543,7 @@ restart:
543 543
544 if (kthread_should_stop() || dvb_frontend_is_exiting(fe)) { 544 if (kthread_should_stop() || dvb_frontend_is_exiting(fe)) {
545 /* got signal or quitting */ 545 /* got signal or quitting */
546 fepriv->exit = 1;
546 break; 547 break;
547 } 548 }
548 549
@@ -656,6 +657,7 @@ restart:
656 } 657 }
657 658
658 fepriv->thread = NULL; 659 fepriv->thread = NULL;
660 fepriv->exit = 0;
659 mb(); 661 mb();
660 662
661 dvb_frontend_wakeup(fe); 663 dvb_frontend_wakeup(fe);
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index a454ee8f1e43..479dd05762a5 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -447,6 +447,15 @@ static int dvb_uevent(struct device *dev, struct kobj_uevent_env *env)
447 return 0; 447 return 0;
448} 448}
449 449
450static char *dvb_nodename(struct device *dev)
451{
452 struct dvb_device *dvbdev = dev_get_drvdata(dev);
453
454 return kasprintf(GFP_KERNEL, "dvb/adapter%d/%s%d",
455 dvbdev->adapter->num, dnames[dvbdev->type], dvbdev->id);
456}
457
458
450static int __init init_dvbdev(void) 459static int __init init_dvbdev(void)
451{ 460{
452 int retval; 461 int retval;
@@ -469,6 +478,7 @@ static int __init init_dvbdev(void)
469 goto error; 478 goto error;
470 } 479 }
471 dvb_class->dev_uevent = dvb_uevent; 480 dvb_class->dev_uevent = dvb_uevent;
481 dvb_class->nodename = dvb_nodename;
472 return 0; 482 return 0;
473 483
474error: 484error:
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 1bb66e1ed5a7..496c1a37034c 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -261,6 +261,7 @@ config DVB_USB_DW2102
261 select DVB_STB6000 if !DVB_FE_CUSTOMISE 261 select DVB_STB6000 if !DVB_FE_CUSTOMISE
262 select DVB_CX24116 if !DVB_FE_CUSTOMISE 262 select DVB_CX24116 if !DVB_FE_CUSTOMISE
263 select DVB_SI21XX if !DVB_FE_CUSTOMISE 263 select DVB_SI21XX if !DVB_FE_CUSTOMISE
264 select DVB_TDA10021 if !DVB_FE_CUSTOMISE
264 help 265 help
265 Say Y here to support the DvbWorld DVB-S/S2 USB2.0 receivers 266 Say Y here to support the DvbWorld DVB-S/S2 USB2.0 receivers
266 and the TeVii S650. 267 and the TeVii S650.
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index 53bfc8e42fb9..4cb31e7c13c2 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -40,7 +40,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
40static DEFINE_MUTEX(af9015_usb_mutex); 40static DEFINE_MUTEX(af9015_usb_mutex);
41 41
42static struct af9015_config af9015_config; 42static struct af9015_config af9015_config;
43static struct dvb_usb_device_properties af9015_properties[2]; 43static struct dvb_usb_device_properties af9015_properties[3];
44static int af9015_properties_count = ARRAY_SIZE(af9015_properties); 44static int af9015_properties_count = ARRAY_SIZE(af9015_properties);
45 45
46static struct af9013_config af9015_af9013_config[] = { 46static struct af9013_config af9015_af9013_config[] = {
@@ -538,7 +538,7 @@ exit:
538/* dump eeprom */ 538/* dump eeprom */
539static int af9015_eeprom_dump(struct dvb_usb_device *d) 539static int af9015_eeprom_dump(struct dvb_usb_device *d)
540{ 540{
541 char buf[52], buf2[4]; 541 char buf[4+3*16+1], buf2[4];
542 u8 reg, val; 542 u8 reg, val;
543 543
544 for (reg = 0; ; reg++) { 544 for (reg = 0; ; reg++) {
@@ -1261,7 +1261,11 @@ static struct usb_device_id af9015_usb_table[] = {
1261 {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_2)}, 1261 {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_2)},
1262 {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_3)}, 1262 {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_395U_3)},
1263 {USB_DEVICE(USB_VID_AFATECH, USB_PID_TREKSTOR_DVBT)}, 1263 {USB_DEVICE(USB_VID_AFATECH, USB_PID_TREKSTOR_DVBT)},
1264 {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850)}, 1264/* 20 */{USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850)},
1265 {USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A805)},
1266 {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CONCEPTRONIC_CTVDIGRCU)},
1267 {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_MC810)},
1268 {USB_DEVICE(USB_VID_KYE, USB_PID_GENIUS_TVGO_DVB_T03)},
1265 {0}, 1269 {0},
1266}; 1270};
1267MODULE_DEVICE_TABLE(usb, af9015_usb_table); 1271MODULE_DEVICE_TABLE(usb, af9015_usb_table);
@@ -1321,7 +1325,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
1321 1325
1322 .i2c_algo = &af9015_i2c_algo, 1326 .i2c_algo = &af9015_i2c_algo,
1323 1327
1324 .num_device_descs = 9, 1328 .num_device_descs = 9, /* max 9 */
1325 .devices = { 1329 .devices = {
1326 { 1330 {
1327 .name = "Afatech AF9015 DVB-T USB2.0 stick", 1331 .name = "Afatech AF9015 DVB-T USB2.0 stick",
@@ -1426,7 +1430,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
1426 1430
1427 .i2c_algo = &af9015_i2c_algo, 1431 .i2c_algo = &af9015_i2c_algo,
1428 1432
1429 .num_device_descs = 9, 1433 .num_device_descs = 9, /* max 9 */
1430 .devices = { 1434 .devices = {
1431 { 1435 {
1432 .name = "Xtensions XD-380", 1436 .name = "Xtensions XD-380",
@@ -1478,7 +1482,85 @@ static struct dvb_usb_device_properties af9015_properties[] = {
1478 .warm_ids = {NULL}, 1482 .warm_ids = {NULL},
1479 }, 1483 },
1480 } 1484 }
1481 } 1485 }, {
1486 .caps = DVB_USB_IS_AN_I2C_ADAPTER,
1487
1488 .usb_ctrl = DEVICE_SPECIFIC,
1489 .download_firmware = af9015_download_firmware,
1490 .firmware = "dvb-usb-af9015.fw",
1491 .no_reconnect = 1,
1492
1493 .size_of_priv = sizeof(struct af9015_state), \
1494
1495 .num_adapters = 2,
1496 .adapter = {
1497 {
1498 .caps = DVB_USB_ADAP_HAS_PID_FILTER |
1499 DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
1500
1501 .pid_filter_count = 32,
1502 .pid_filter = af9015_pid_filter,
1503 .pid_filter_ctrl = af9015_pid_filter_ctrl,
1504
1505 .frontend_attach =
1506 af9015_af9013_frontend_attach,
1507 .tuner_attach = af9015_tuner_attach,
1508 .stream = {
1509 .type = USB_BULK,
1510 .count = 6,
1511 .endpoint = 0x84,
1512 },
1513 },
1514 {
1515 .frontend_attach =
1516 af9015_af9013_frontend_attach,
1517 .tuner_attach = af9015_tuner_attach,
1518 .stream = {
1519 .type = USB_BULK,
1520 .count = 6,
1521 .endpoint = 0x85,
1522 .u = {
1523 .bulk = {
1524 .buffersize =
1525 TS_USB20_MAX_PACKET_SIZE,
1526 }
1527 }
1528 },
1529 }
1530 },
1531
1532 .identify_state = af9015_identify_state,
1533
1534 .rc_query = af9015_rc_query,
1535 .rc_interval = 150,
1536
1537 .i2c_algo = &af9015_i2c_algo,
1538
1539 .num_device_descs = 4, /* max 9 */
1540 .devices = {
1541 {
1542 .name = "AverMedia AVerTV Volar GPS 805 (A805)",
1543 .cold_ids = {&af9015_usb_table[21], NULL},
1544 .warm_ids = {NULL},
1545 },
1546 {
1547 .name = "Conceptronic USB2.0 DVB-T CTVDIGRCU " \
1548 "V3.0",
1549 .cold_ids = {&af9015_usb_table[22], NULL},
1550 .warm_ids = {NULL},
1551 },
1552 {
1553 .name = "KWorld Digial MC-810",
1554 .cold_ids = {&af9015_usb_table[23], NULL},
1555 .warm_ids = {NULL},
1556 },
1557 {
1558 .name = "Genius TVGo DVB-T03",
1559 .cold_ids = {&af9015_usb_table[24], NULL},
1560 .warm_ids = {NULL},
1561 },
1562 }
1563 },
1482}; 1564};
1483 1565
1484static int af9015_usb_probe(struct usb_interface *intf, 1566static int af9015_usb_probe(struct usb_interface *intf,
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 8ddbadf62194..818b2ab584bf 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -1346,9 +1346,9 @@ static int dib0700_xc5000_tuner_callback(void *priv, int component,
1346 if (command == XC5000_TUNER_RESET) { 1346 if (command == XC5000_TUNER_RESET) {
1347 /* Reset the tuner */ 1347 /* Reset the tuner */
1348 dib0700_set_gpio(adap->dev, GPIO1, GPIO_OUT, 0); 1348 dib0700_set_gpio(adap->dev, GPIO1, GPIO_OUT, 0);
1349 msleep(330); /* from Windows USB trace */ 1349 msleep(10);
1350 dib0700_set_gpio(adap->dev, GPIO1, GPIO_OUT, 1); 1350 dib0700_set_gpio(adap->dev, GPIO1, GPIO_OUT, 1);
1351 msleep(330); /* from Windows USB trace */ 1351 msleep(10);
1352 } else { 1352 } else {
1353 err("xc5000: unknown tuner callback command: %d\n", command); 1353 err("xc5000: unknown tuner callback command: %d\n", command);
1354 return -EINVAL; 1354 return -EINVAL;
@@ -1493,6 +1493,10 @@ struct usb_device_id dib0700_usb_id_table[] = {
1493 { USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_HAUPPAUGE_TIGER_ATSC_B210) }, 1493 { USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_HAUPPAUGE_TIGER_ATSC_B210) },
1494 { USB_DEVICE(USB_VID_YUAN, USB_PID_YUAN_MC770) }, 1494 { USB_DEVICE(USB_VID_YUAN, USB_PID_YUAN_MC770) },
1495 { USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_DTT) }, 1495 { USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_DTT) },
1496/* 50 */{ USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_DTT_Dlx) },
1497 { USB_DEVICE(USB_VID_LEADTEK, USB_PID_WINFAST_DTV_DONGLE_H) },
1498 { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_T3) },
1499 { USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_T5) },
1496 { 0 } /* Terminating entry */ 1500 { 0 } /* Terminating entry */
1497}; 1501};
1498MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table); 1502MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -1692,7 +1696,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
1692 }, 1696 },
1693 }, 1697 },
1694 1698
1695 .num_device_descs = 11, 1699 .num_device_descs = 12,
1696 .devices = { 1700 .devices = {
1697 { "DiBcom STK7070P reference design", 1701 { "DiBcom STK7070P reference design",
1698 { &dib0700_usb_id_table[15], NULL }, 1702 { &dib0700_usb_id_table[15], NULL },
@@ -1726,8 +1730,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
1726 { &dib0700_usb_id_table[30], NULL }, 1730 { &dib0700_usb_id_table[30], NULL },
1727 { NULL }, 1731 { NULL },
1728 }, 1732 },
1729 { "Terratec Cinergy T USB XXS", 1733 { "Terratec Cinergy T USB XXS/ T3",
1730 { &dib0700_usb_id_table[33], NULL }, 1734 { &dib0700_usb_id_table[33],
1735 &dib0700_usb_id_table[52], NULL },
1731 { NULL }, 1736 { NULL },
1732 }, 1737 },
1733 { "Elgato EyeTV DTT", 1738 { "Elgato EyeTV DTT",
@@ -1738,6 +1743,10 @@ struct dvb_usb_device_properties dib0700_devices[] = {
1738 { &dib0700_usb_id_table[45], NULL }, 1743 { &dib0700_usb_id_table[45], NULL },
1739 { NULL }, 1744 { NULL },
1740 }, 1745 },
1746 { "Elgato EyeTV Dtt Dlx PD378S",
1747 { &dib0700_usb_id_table[50], NULL },
1748 { NULL },
1749 },
1741 }, 1750 },
1742 1751
1743 .rc_interval = DEFAULT_RC_INTERVAL, 1752 .rc_interval = DEFAULT_RC_INTERVAL,
@@ -1784,8 +1793,9 @@ struct dvb_usb_device_properties dib0700_devices[] = {
1784 { &dib0700_usb_id_table[36], NULL }, 1793 { &dib0700_usb_id_table[36], NULL },
1785 { NULL }, 1794 { NULL },
1786 }, 1795 },
1787 { "Terratec Cinergy DT USB XS Diversity", 1796 { "Terratec Cinergy DT USB XS Diversity/ T5",
1788 { &dib0700_usb_id_table[43], NULL }, 1797 { &dib0700_usb_id_table[43],
1798 &dib0700_usb_id_table[53], NULL},
1789 { NULL }, 1799 { NULL },
1790 }, 1800 },
1791 { "Sony PlayTV", 1801 { "Sony PlayTV",
@@ -1812,7 +1822,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
1812 }, 1822 },
1813 }, 1823 },
1814 1824
1815 .num_device_descs = 7, 1825 .num_device_descs = 8,
1816 .devices = { 1826 .devices = {
1817 { "Terratec Cinergy HT USB XE", 1827 { "Terratec Cinergy HT USB XE",
1818 { &dib0700_usb_id_table[27], NULL }, 1828 { &dib0700_usb_id_table[27], NULL },
@@ -1842,6 +1852,11 @@ struct dvb_usb_device_properties dib0700_devices[] = {
1842 { &dib0700_usb_id_table[48], NULL }, 1852 { &dib0700_usb_id_table[48], NULL },
1843 { NULL }, 1853 { NULL },
1844 }, 1854 },
1855 { "Leadtek WinFast DTV Dongle H",
1856 { &dib0700_usb_id_table[51], NULL },
1857 { NULL },
1858 },
1859
1845 }, 1860 },
1846 .rc_interval = DEFAULT_RC_INTERVAL, 1861 .rc_interval = DEFAULT_RC_INTERVAL,
1847 .rc_key_map = dib0700_rc_keys, 1862 .rc_key_map = dib0700_rc_keys,
diff --git a/drivers/media/dvb/dvb-usb/dibusb-common.c b/drivers/media/dvb/dvb-usb/dibusb-common.c
index 8ee6cd4da9e7..8dbad1ec53c4 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-common.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-common.c
@@ -133,14 +133,17 @@ static int dibusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
133 133
134 for (i = 0; i < num; i++) { 134 for (i = 0; i < num; i++) {
135 /* write/read request */ 135 /* write/read request */
136 if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) { 136 if (i+1 < num && (msg[i].flags & I2C_M_RD) == 0
137 && (msg[i+1].flags & I2C_M_RD)) {
137 if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len, 138 if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len,
138 msg[i+1].buf,msg[i+1].len) < 0) 139 msg[i+1].buf,msg[i+1].len) < 0)
139 break; 140 break;
140 i++; 141 i++;
141 } else 142 } else if ((msg[i].flags & I2C_M_RD) == 0) {
142 if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len,NULL,0) < 0) 143 if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len,NULL,0) < 0)
143 break; 144 break;
145 } else
146 break;
144 } 147 }
145 148
146 mutex_unlock(&d->i2c_mutex); 149 mutex_unlock(&d->i2c_mutex);
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index f506c74119f3..9593b7289994 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -80,6 +80,7 @@
80#define USB_PID_COMPRO_DVBU2000_UNK_WARM 0x010d 80#define USB_PID_COMPRO_DVBU2000_UNK_WARM 0x010d
81#define USB_PID_COMPRO_VIDEOMATE_U500 0x1e78 81#define USB_PID_COMPRO_VIDEOMATE_U500 0x1e78
82#define USB_PID_COMPRO_VIDEOMATE_U500_PC 0x1e80 82#define USB_PID_COMPRO_VIDEOMATE_U500_PC 0x1e80
83#define USB_PID_CONCEPTRONIC_CTVDIGRCU 0xe397
83#define USB_PID_CONEXANT_D680_DMB 0x86d6 84#define USB_PID_CONEXANT_D680_DMB 0x86d6
84#define USB_PID_DIBCOM_HOOK_DEFAULT 0x0064 85#define USB_PID_DIBCOM_HOOK_DEFAULT 0x0064
85#define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM 0x0065 86#define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM 0x0065
@@ -97,6 +98,7 @@
97#define USB_PID_DPOSH_M9206_COLD 0x9206 98#define USB_PID_DPOSH_M9206_COLD 0x9206
98#define USB_PID_DPOSH_M9206_WARM 0xa090 99#define USB_PID_DPOSH_M9206_WARM 0xa090
99#define USB_PID_UNIWILL_STK7700P 0x6003 100#define USB_PID_UNIWILL_STK7700P 0x6003
101#define USB_PID_GENIUS_TVGO_DVB_T03 0x4012
100#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0 102#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0
101#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1 103#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1
102#define USB_PID_INTEL_CE9500 0x9500 104#define USB_PID_INTEL_CE9500 0x9500
@@ -104,6 +106,7 @@
104#define USB_PID_KWORLD_395U 0xe396 106#define USB_PID_KWORLD_395U 0xe396
105#define USB_PID_KWORLD_395U_2 0xe39b 107#define USB_PID_KWORLD_395U_2 0xe39b
106#define USB_PID_KWORLD_395U_3 0xe395 108#define USB_PID_KWORLD_395U_3 0xe395
109#define USB_PID_KWORLD_MC810 0xc810
107#define USB_PID_KWORLD_PC160_2T 0xc160 110#define USB_PID_KWORLD_PC160_2T 0xc160
108#define USB_PID_KWORLD_VSTREAM_COLD 0x17de 111#define USB_PID_KWORLD_VSTREAM_COLD 0x17de
109#define USB_PID_KWORLD_VSTREAM_WARM 0x17df 112#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
@@ -171,6 +174,7 @@
171#define USB_PID_AVERMEDIA_A309 0xa309 174#define USB_PID_AVERMEDIA_A309 0xa309
172#define USB_PID_AVERMEDIA_A310 0xa310 175#define USB_PID_AVERMEDIA_A310 0xa310
173#define USB_PID_AVERMEDIA_A850 0x850a 176#define USB_PID_AVERMEDIA_A850 0x850a
177#define USB_PID_AVERMEDIA_A805 0xa805
174#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006 178#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
175#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a 179#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a
176#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081 180#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081
@@ -178,6 +182,8 @@
178#define USB_PID_TERRATEC_CINERGY_HT_EXPRESS 0x0060 182#define USB_PID_TERRATEC_CINERGY_HT_EXPRESS 0x0060
179#define USB_PID_TERRATEC_CINERGY_T_EXPRESS 0x0062 183#define USB_PID_TERRATEC_CINERGY_T_EXPRESS 0x0062
180#define USB_PID_TERRATEC_CINERGY_T_XXS 0x0078 184#define USB_PID_TERRATEC_CINERGY_T_XXS 0x0078
185#define USB_PID_TERRATEC_T3 0x10a0
186#define USB_PID_TERRATEC_T5 0x10a1
181#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e 187#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e
182#define USB_PID_PINNACLE_PCTV2000E 0x022c 188#define USB_PID_PINNACLE_PCTV2000E 0x022c
183#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228 189#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228
@@ -222,6 +228,7 @@
222#define USB_PID_WINFAST_DTV_DONGLE_COLD 0x6025 228#define USB_PID_WINFAST_DTV_DONGLE_COLD 0x6025
223#define USB_PID_WINFAST_DTV_DONGLE_WARM 0x6026 229#define USB_PID_WINFAST_DTV_DONGLE_WARM 0x6026
224#define USB_PID_WINFAST_DTV_DONGLE_STK7700P 0x6f00 230#define USB_PID_WINFAST_DTV_DONGLE_STK7700P 0x6f00
231#define USB_PID_WINFAST_DTV_DONGLE_H 0x60f6
225#define USB_PID_WINFAST_DTV_DONGLE_STK7700P_2 0x6f01 232#define USB_PID_WINFAST_DTV_DONGLE_STK7700P_2 0x6f01
226#define USB_PID_WINFAST_DTV_DONGLE_GOLD 0x6029 233#define USB_PID_WINFAST_DTV_DONGLE_GOLD 0x6029
227#define USB_PID_GENPIX_8PSK_REV_1_COLD 0x0200 234#define USB_PID_GENPIX_8PSK_REV_1_COLD 0x0200
@@ -251,5 +258,6 @@
251#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807 258#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807
252#define USB_PID_SONY_PLAYTV 0x0003 259#define USB_PID_SONY_PLAYTV 0x0003
253#define USB_PID_ELGATO_EYETV_DTT 0x0021 260#define USB_PID_ELGATO_EYETV_DTT 0x0021
261#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020
254 262
255#endif 263#endif
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 2d5352e54dc0..e441d274e6c1 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -196,7 +196,7 @@ struct dvb_usb_device_properties {
196#define CYPRESS_FX2 3 196#define CYPRESS_FX2 3
197 int usb_ctrl; 197 int usb_ctrl;
198 int (*download_firmware) (struct usb_device *, const struct firmware *); 198 int (*download_firmware) (struct usb_device *, const struct firmware *);
199 const char firmware[FIRMWARE_NAME_MAX]; 199 const char *firmware;
200 int no_reconnect; 200 int no_reconnect;
201 201
202 int size_of_priv; 202 int size_of_priv;
@@ -223,7 +223,7 @@ struct dvb_usb_device_properties {
223 int generic_bulk_ctrl_endpoint; 223 int generic_bulk_ctrl_endpoint;
224 224
225 int num_device_descs; 225 int num_device_descs;
226 struct dvb_usb_device_description devices[11]; 226 struct dvb_usb_device_description devices[12];
227}; 227};
228 228
229/** 229/**
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index c65f273ff313..75de49c0d943 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -1,7 +1,7 @@
1/* DVB USB framework compliant Linux driver for the 1/* DVB USB framework compliant Linux driver for the
2* DVBWorld DVB-S 2101, 2102, DVB-S2 2104 Card 2* DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101,
3* 3* TeVii S600, S650 Cards
4* Copyright (C) 2008 Igor M. Liplianin (liplianin@me.by) 4* Copyright (C) 2008,2009 Igor M. Liplianin (liplianin@me.by)
5* 5*
6* This program is free software; you can redistribute it and/or modify it 6* This program is free software; you can redistribute it and/or modify it
7* under the terms of the GNU General Public License as published by the 7* under the terms of the GNU General Public License as published by the
@@ -17,6 +17,7 @@
17#include "stb6000.h" 17#include "stb6000.h"
18#include "eds1547.h" 18#include "eds1547.h"
19#include "cx24116.h" 19#include "cx24116.h"
20#include "tda1002x.h"
20 21
21#ifndef USB_PID_DW2102 22#ifndef USB_PID_DW2102
22#define USB_PID_DW2102 0x2102 23#define USB_PID_DW2102 0x2102
@@ -26,10 +27,18 @@
26#define USB_PID_DW2104 0x2104 27#define USB_PID_DW2104 0x2104
27#endif 28#endif
28 29
30#ifndef USB_PID_DW3101
31#define USB_PID_DW3101 0x3101
32#endif
33
29#ifndef USB_PID_CINERGY_S 34#ifndef USB_PID_CINERGY_S
30#define USB_PID_CINERGY_S 0x0064 35#define USB_PID_CINERGY_S 0x0064
31#endif 36#endif
32 37
38#ifndef USB_PID_TEVII_S650
39#define USB_PID_TEVII_S650 0xd650
40#endif
41
33#define DW210X_READ_MSG 0 42#define DW210X_READ_MSG 0
34#define DW210X_WRITE_MSG 1 43#define DW210X_WRITE_MSG 1
35 44
@@ -40,18 +49,21 @@
40#define DW2102_VOLTAGE_CTRL (0x1800) 49#define DW2102_VOLTAGE_CTRL (0x1800)
41#define DW2102_RC_QUERY (0x1a00) 50#define DW2102_RC_QUERY (0x1a00)
42 51
43struct dw210x_state { 52struct dvb_usb_rc_keys_table {
44 u32 last_key_pressed; 53 struct dvb_usb_rc_key *rc_keys;
45}; 54 int rc_keys_size;
46struct dw210x_rc_keys {
47 u32 keycode;
48 u32 event;
49}; 55};
50 56
51/* debug */ 57/* debug */
52static int dvb_usb_dw2102_debug; 58static int dvb_usb_dw2102_debug;
53module_param_named(debug, dvb_usb_dw2102_debug, int, 0644); 59module_param_named(debug, dvb_usb_dw2102_debug, int, 0644);
54MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer (or-able))." DVB_USB_DEBUG_STATUS); 60MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer 4=rc(or-able))."
61 DVB_USB_DEBUG_STATUS);
62
63/* keymaps */
64static int ir_keymap;
65module_param_named(keymap, ir_keymap, int, 0644);
66MODULE_PARM_DESC(keymap, "set keymap 0=default 1=dvbworld 2=tevii 3=tbs ...");
55 67
56DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 68DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
57 69
@@ -79,7 +91,7 @@ static int dw210x_op_rw(struct usb_device *dev, u8 request, u16 value,
79static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], 91static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
80 int num) 92 int num)
81{ 93{
82struct dvb_usb_device *d = i2c_get_adapdata(adap); 94 struct dvb_usb_device *d = i2c_get_adapdata(adap);
83 int i = 0, ret = 0; 95 int i = 0, ret = 0;
84 u8 buf6[] = {0x2c, 0x05, 0xc0, 0, 0, 0, 0}; 96 u8 buf6[] = {0x2c, 0x05, 0xc0, 0, 0, 0, 0};
85 u16 value; 97 u16 value;
@@ -205,6 +217,7 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
205 mutex_unlock(&d->i2c_mutex); 217 mutex_unlock(&d->i2c_mutex);
206 return num; 218 return num;
207} 219}
220
208static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) 221static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num)
209{ 222{
210 struct dvb_usb_device *d = i2c_get_adapdata(adap); 223 struct dvb_usb_device *d = i2c_get_adapdata(adap);
@@ -219,7 +232,7 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
219 case 2: { 232 case 2: {
220 /* read */ 233 /* read */
221 /* first write first register number */ 234 /* first write first register number */
222 u8 ibuf [msg[1].len + 2], obuf[3]; 235 u8 ibuf[msg[1].len + 2], obuf[3];
223 obuf[0] = 0xd0; 236 obuf[0] = 0xd0;
224 obuf[1] = msg[0].len; 237 obuf[1] = msg[0].len;
225 obuf[2] = msg[0].buf[0]; 238 obuf[2] = msg[0].buf[0];
@@ -293,7 +306,7 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
293 case 2: { 306 case 2: {
294 /* read */ 307 /* read */
295 /* first write first register number */ 308 /* first write first register number */
296 u8 ibuf [msg[1].len + 2], obuf[3]; 309 u8 ibuf[msg[1].len + 2], obuf[3];
297 obuf[0] = 0xaa; 310 obuf[0] = 0xaa;
298 obuf[1] = msg[0].len; 311 obuf[1] = msg[0].len;
299 obuf[2] = msg[0].buf[0]; 312 obuf[2] = msg[0].buf[0];
@@ -360,6 +373,69 @@ static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], i
360 return num; 373 return num;
361} 374}
362 375
376static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
377 int num)
378{
379 struct dvb_usb_device *d = i2c_get_adapdata(adap);
380 int ret = 0, i;
381
382 if (!d)
383 return -ENODEV;
384 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
385 return -EAGAIN;
386
387 switch (num) {
388 case 2: {
389 /* read */
390 /* first write first register number */
391 u8 ibuf[msg[1].len + 2], obuf[3];
392 obuf[0] = msg[0].addr << 1;
393 obuf[1] = msg[0].len;
394 obuf[2] = msg[0].buf[0];
395 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
396 obuf, msg[0].len + 2, DW210X_WRITE_MSG);
397 /* second read registers */
398 ret = dw210x_op_rw(d->udev, 0xc3, 0x19 , 0,
399 ibuf, msg[1].len + 2, DW210X_READ_MSG);
400 memcpy(msg[1].buf, ibuf + 2, msg[1].len);
401
402 break;
403 }
404 case 1:
405 switch (msg[0].addr) {
406 case 0x60:
407 case 0x0c: {
408 /* write to register */
409 u8 obuf[msg[0].len + 2];
410 obuf[0] = msg[0].addr << 1;
411 obuf[1] = msg[0].len;
412 memcpy(obuf + 2, msg[0].buf, msg[0].len);
413 ret = dw210x_op_rw(d->udev, 0xc2, 0, 0,
414 obuf, msg[0].len + 2, DW210X_WRITE_MSG);
415 break;
416 }
417 case(DW2102_RC_QUERY): {
418 u8 ibuf[2];
419 ret = dw210x_op_rw(d->udev, 0xb8, 0, 0,
420 ibuf, 2, DW210X_READ_MSG);
421 memcpy(msg[0].buf, ibuf , 2);
422 break;
423 }
424 }
425
426 break;
427 }
428
429 for (i = 0; i < num; i++) {
430 deb_xfer("%02x:%02x: %s ", i, msg[i].addr,
431 msg[i].flags == 0 ? ">>>" : "<<<");
432 debug_dump(msg[i].buf, msg[i].len, deb_xfer);
433 }
434
435 mutex_unlock(&d->i2c_mutex);
436 return num;
437}
438
363static u32 dw210x_i2c_func(struct i2c_adapter *adapter) 439static u32 dw210x_i2c_func(struct i2c_adapter *adapter)
364{ 440{
365 return I2C_FUNC_I2C; 441 return I2C_FUNC_I2C;
@@ -385,6 +461,11 @@ static struct i2c_algorithm dw2104_i2c_algo = {
385 .functionality = dw210x_i2c_func, 461 .functionality = dw210x_i2c_func,
386}; 462};
387 463
464static struct i2c_algorithm dw3101_i2c_algo = {
465 .master_xfer = dw3101_i2c_transfer,
466 .functionality = dw210x_i2c_func,
467};
468
388static int dw210x_read_mac_address(struct dvb_usb_device *d, u8 mac[6]) 469static int dw210x_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
389{ 470{
390 int i; 471 int i;
@@ -404,6 +485,7 @@ static int dw210x_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
404 debug_dump(eepromline, 16, deb_xfer); 485 debug_dump(eepromline, 16, deb_xfer);
405 } 486 }
406 } 487 }
488
407 memcpy(mac, eeprom + 8, 6); 489 memcpy(mac, eeprom + 8, 6);
408 return 0; 490 return 0;
409}; 491};
@@ -448,6 +530,11 @@ static struct si21xx_config serit_sp1511lhb_config = {
448 530
449}; 531};
450 532
533static struct tda10023_config dw3101_tda10023_config = {
534 .demod_address = 0x0c,
535 .invert = 1,
536};
537
451static int dw2104_frontend_attach(struct dvb_usb_adapter *d) 538static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
452{ 539{
453 if ((d->fe = dvb_attach(cx24116_attach, &dw2104_config, 540 if ((d->fe = dvb_attach(cx24116_attach, &dw2104_config,
@@ -460,6 +547,7 @@ static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
460} 547}
461 548
462static struct dvb_usb_device_properties dw2102_properties; 549static struct dvb_usb_device_properties dw2102_properties;
550static struct dvb_usb_device_properties dw2104_properties;
463 551
464static int dw2102_frontend_attach(struct dvb_usb_adapter *d) 552static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
465{ 553{
@@ -497,6 +585,17 @@ static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
497 return -EIO; 585 return -EIO;
498} 586}
499 587
588static int dw3101_frontend_attach(struct dvb_usb_adapter *d)
589{
590 d->fe = dvb_attach(tda10023_attach, &dw3101_tda10023_config,
591 &d->dev->i2c_adap, 0x48);
592 if (d->fe != NULL) {
593 info("Attached tda10023!\n");
594 return 0;
595 }
596 return -EIO;
597}
598
500static int dw2102_tuner_attach(struct dvb_usb_adapter *adap) 599static int dw2102_tuner_attach(struct dvb_usb_adapter *adap)
501{ 600{
502 dvb_attach(dvb_pll_attach, adap->fe, 0x60, 601 dvb_attach(dvb_pll_attach, adap->fe, 0x60,
@@ -512,6 +611,14 @@ static int dw2102_earda_tuner_attach(struct dvb_usb_adapter *adap)
512 return 0; 611 return 0;
513} 612}
514 613
614static int dw3101_tuner_attach(struct dvb_usb_adapter *adap)
615{
616 dvb_attach(dvb_pll_attach, adap->fe, 0x60,
617 &adap->dev->i2c_adap, DVB_PLL_TUA6034);
618
619 return 0;
620}
621
515static struct dvb_usb_rc_key dw210x_rc_keys[] = { 622static struct dvb_usb_rc_key dw210x_rc_keys[] = {
516 { 0xf8, 0x0a, KEY_Q }, /*power*/ 623 { 0xf8, 0x0a, KEY_Q }, /*power*/
517 { 0xf8, 0x0c, KEY_M }, /*mute*/ 624 { 0xf8, 0x0c, KEY_M }, /*mute*/
@@ -544,44 +651,147 @@ static struct dvb_usb_rc_key dw210x_rc_keys[] = {
544 { 0xf8, 0x40, KEY_F }, /*full*/ 651 { 0xf8, 0x40, KEY_F }, /*full*/
545 { 0xf8, 0x1e, KEY_W }, /*tvmode*/ 652 { 0xf8, 0x1e, KEY_W }, /*tvmode*/
546 { 0xf8, 0x1b, KEY_B }, /*recall*/ 653 { 0xf8, 0x1b, KEY_B }, /*recall*/
654};
547 655
656static struct dvb_usb_rc_key tevii_rc_keys[] = {
657 { 0xf8, 0x0a, KEY_POWER },
658 { 0xf8, 0x0c, KEY_MUTE },
659 { 0xf8, 0x11, KEY_1 },
660 { 0xf8, 0x12, KEY_2 },
661 { 0xf8, 0x13, KEY_3 },
662 { 0xf8, 0x14, KEY_4 },
663 { 0xf8, 0x15, KEY_5 },
664 { 0xf8, 0x16, KEY_6 },
665 { 0xf8, 0x17, KEY_7 },
666 { 0xf8, 0x18, KEY_8 },
667 { 0xf8, 0x19, KEY_9 },
668 { 0xf8, 0x10, KEY_0 },
669 { 0xf8, 0x1c, KEY_MENU },
670 { 0xf8, 0x0f, KEY_VOLUMEDOWN },
671 { 0xf8, 0x1a, KEY_LAST },
672 { 0xf8, 0x0e, KEY_OPEN },
673 { 0xf8, 0x04, KEY_RECORD },
674 { 0xf8, 0x09, KEY_VOLUMEUP },
675 { 0xf8, 0x08, KEY_CHANNELUP },
676 { 0xf8, 0x07, KEY_PVR },
677 { 0xf8, 0x0b, KEY_TIME },
678 { 0xf8, 0x02, KEY_RIGHT },
679 { 0xf8, 0x03, KEY_LEFT },
680 { 0xf8, 0x00, KEY_UP },
681 { 0xf8, 0x1f, KEY_OK },
682 { 0xf8, 0x01, KEY_DOWN },
683 { 0xf8, 0x05, KEY_TUNER },
684 { 0xf8, 0x06, KEY_CHANNELDOWN },
685 { 0xf8, 0x40, KEY_PLAYPAUSE },
686 { 0xf8, 0x1e, KEY_REWIND },
687 { 0xf8, 0x1b, KEY_FAVORITES },
688 { 0xf8, 0x1d, KEY_BACK },
689 { 0xf8, 0x4d, KEY_FASTFORWARD },
690 { 0xf8, 0x44, KEY_EPG },
691 { 0xf8, 0x4c, KEY_INFO },
692 { 0xf8, 0x41, KEY_AB },
693 { 0xf8, 0x43, KEY_AUDIO },
694 { 0xf8, 0x45, KEY_SUBTITLE },
695 { 0xf8, 0x4a, KEY_LIST },
696 { 0xf8, 0x46, KEY_F1 },
697 { 0xf8, 0x47, KEY_F2 },
698 { 0xf8, 0x5e, KEY_F3 },
699 { 0xf8, 0x5c, KEY_F4 },
700 { 0xf8, 0x52, KEY_F5 },
701 { 0xf8, 0x5a, KEY_F6 },
702 { 0xf8, 0x56, KEY_MODE },
703 { 0xf8, 0x58, KEY_SWITCHVIDEOMODE },
548}; 704};
549 705
706static struct dvb_usb_rc_key tbs_rc_keys[] = {
707 { 0xf8, 0x84, KEY_POWER },
708 { 0xf8, 0x94, KEY_MUTE },
709 { 0xf8, 0x87, KEY_1 },
710 { 0xf8, 0x86, KEY_2 },
711 { 0xf8, 0x85, KEY_3 },
712 { 0xf8, 0x8b, KEY_4 },
713 { 0xf8, 0x8a, KEY_5 },
714 { 0xf8, 0x89, KEY_6 },
715 { 0xf8, 0x8f, KEY_7 },
716 { 0xf8, 0x8e, KEY_8 },
717 { 0xf8, 0x8d, KEY_9 },
718 { 0xf8, 0x92, KEY_0 },
719 { 0xf8, 0x96, KEY_CHANNELUP },
720 { 0xf8, 0x91, KEY_CHANNELDOWN },
721 { 0xf8, 0x93, KEY_VOLUMEUP },
722 { 0xf8, 0x8c, KEY_VOLUMEDOWN },
723 { 0xf8, 0x83, KEY_RECORD },
724 { 0xf8, 0x98, KEY_PAUSE },
725 { 0xf8, 0x99, KEY_OK },
726 { 0xf8, 0x9a, KEY_SHUFFLE },
727 { 0xf8, 0x81, KEY_UP },
728 { 0xf8, 0x90, KEY_LEFT },
729 { 0xf8, 0x82, KEY_RIGHT },
730 { 0xf8, 0x88, KEY_DOWN },
731 { 0xf8, 0x95, KEY_FAVORITES },
732 { 0xf8, 0x97, KEY_SUBTITLE },
733 { 0xf8, 0x9d, KEY_ZOOM },
734 { 0xf8, 0x9f, KEY_EXIT },
735 { 0xf8, 0x9e, KEY_MENU },
736 { 0xf8, 0x9c, KEY_EPG },
737 { 0xf8, 0x80, KEY_PREVIOUS },
738 { 0xf8, 0x9b, KEY_MODE }
739};
550 740
741static struct dvb_usb_rc_keys_table keys_tables[] = {
742 { dw210x_rc_keys, ARRAY_SIZE(dw210x_rc_keys) },
743 { tevii_rc_keys, ARRAY_SIZE(tevii_rc_keys) },
744 { tbs_rc_keys, ARRAY_SIZE(tbs_rc_keys) },
745};
551 746
552static int dw2102_rc_query(struct dvb_usb_device *d, u32 *event, int *state) 747static int dw2102_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
553{ 748{
554 struct dw210x_state *st = d->priv; 749 struct dvb_usb_rc_key *keymap = d->props.rc_key_map;
750 int keymap_size = d->props.rc_key_map_size;
555 u8 key[2]; 751 u8 key[2];
556 struct i2c_msg msg[] = { 752 struct i2c_msg msg = {
557 {.addr = DW2102_RC_QUERY, .flags = I2C_M_RD, .buf = key, 753 .addr = DW2102_RC_QUERY,
558 .len = 2}, 754 .flags = I2C_M_RD,
755 .buf = key,
756 .len = 2
559 }; 757 };
560 int i; 758 int i;
759 /* override keymap */
760 if ((ir_keymap > 0) && (ir_keymap <= ARRAY_SIZE(keys_tables))) {
761 keymap = keys_tables[ir_keymap - 1].rc_keys ;
762 keymap_size = keys_tables[ir_keymap - 1].rc_keys_size;
763 }
561 764
562 *state = REMOTE_NO_KEY_PRESSED; 765 *state = REMOTE_NO_KEY_PRESSED;
563 if (dw2102_i2c_transfer(&d->i2c_adap, msg, 1) == 1) { 766 if (dw2102_i2c_transfer(&d->i2c_adap, &msg, 1) == 1) {
564 for (i = 0; i < ARRAY_SIZE(dw210x_rc_keys); i++) { 767 for (i = 0; i < keymap_size ; i++) {
565 if (dw210x_rc_keys[i].data == msg[0].buf[0]) { 768 if (keymap[i].data == msg.buf[0]) {
566 *state = REMOTE_KEY_PRESSED; 769 *state = REMOTE_KEY_PRESSED;
567 *event = dw210x_rc_keys[i].event; 770 *event = keymap[i].event;
568 st->last_key_pressed =
569 dw210x_rc_keys[i].event;
570 break; 771 break;
571 } 772 }
572 st->last_key_pressed = 0; 773
573 } 774 }
775
776 if ((*state) == REMOTE_KEY_PRESSED)
777 deb_rc("%s: found rc key: %x, %x, event: %x\n",
778 __func__, key[0], key[1], (*event));
779 else if (key[0] != 0xff)
780 deb_rc("%s: unknown rc key: %x, %x\n",
781 __func__, key[0], key[1]);
782
574 } 783 }
575 /* info("key: %x %x\n",key[0],key[1]); */ 784
576 return 0; 785 return 0;
577} 786}
578 787
579static struct usb_device_id dw2102_table[] = { 788static struct usb_device_id dw2102_table[] = {
580 {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2102)}, 789 {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2102)},
581 {USB_DEVICE(USB_VID_CYPRESS, 0x2101)}, 790 {USB_DEVICE(USB_VID_CYPRESS, 0x2101)},
582 {USB_DEVICE(USB_VID_CYPRESS, 0x2104)}, 791 {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2104)},
583 {USB_DEVICE(0x9022, 0xd650)}, 792 {USB_DEVICE(0x9022, USB_PID_TEVII_S650)},
584 {USB_DEVICE(USB_VID_TERRATEC, USB_PID_CINERGY_S)}, 793 {USB_DEVICE(USB_VID_TERRATEC, USB_PID_CINERGY_S)},
794 {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW3101)},
585 { } 795 { }
586}; 796};
587 797
@@ -642,11 +852,16 @@ static int dw2102_load_firmware(struct usb_device *dev,
642 } 852 }
643 /* init registers */ 853 /* init registers */
644 switch (dev->descriptor.idProduct) { 854 switch (dev->descriptor.idProduct) {
855 case USB_PID_TEVII_S650:
856 dw2104_properties.rc_key_map = tevii_rc_keys;
857 dw2104_properties.rc_key_map_size =
858 ARRAY_SIZE(tevii_rc_keys);
645 case USB_PID_DW2104: 859 case USB_PID_DW2104:
646 case 0xd650:
647 reset = 1; 860 reset = 1;
648 dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1, 861 dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1,
649 DW210X_WRITE_MSG); 862 DW210X_WRITE_MSG);
863 /* break omitted intentionally */
864 case USB_PID_DW3101:
650 reset = 0; 865 reset = 0;
651 dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0, 866 dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0,
652 DW210X_WRITE_MSG); 867 DW210X_WRITE_MSG);
@@ -690,6 +905,7 @@ static int dw2102_load_firmware(struct usb_device *dev,
690 DW210X_READ_MSG); 905 DW210X_READ_MSG);
691 break; 906 break;
692 } 907 }
908
693 msleep(100); 909 msleep(100);
694 kfree(p); 910 kfree(p);
695 } 911 }
@@ -700,7 +916,6 @@ static struct dvb_usb_device_properties dw2102_properties = {
700 .caps = DVB_USB_IS_AN_I2C_ADAPTER, 916 .caps = DVB_USB_IS_AN_I2C_ADAPTER,
701 .usb_ctrl = DEVICE_SPECIFIC, 917 .usb_ctrl = DEVICE_SPECIFIC,
702 .firmware = "dvb-usb-dw2102.fw", 918 .firmware = "dvb-usb-dw2102.fw",
703 .size_of_priv = sizeof(struct dw210x_state),
704 .no_reconnect = 1, 919 .no_reconnect = 1,
705 920
706 .i2c_algo = &dw2102_serit_i2c_algo, 921 .i2c_algo = &dw2102_serit_i2c_algo,
@@ -714,7 +929,7 @@ static struct dvb_usb_device_properties dw2102_properties = {
714 .num_adapters = 1, 929 .num_adapters = 1,
715 .download_firmware = dw2102_load_firmware, 930 .download_firmware = dw2102_load_firmware,
716 .read_mac_address = dw210x_read_mac_address, 931 .read_mac_address = dw210x_read_mac_address,
717 .adapter = { 932 .adapter = {
718 { 933 {
719 .frontend_attach = dw2102_frontend_attach, 934 .frontend_attach = dw2102_frontend_attach,
720 .streaming_ctrl = NULL, 935 .streaming_ctrl = NULL,
@@ -752,7 +967,6 @@ static struct dvb_usb_device_properties dw2104_properties = {
752 .caps = DVB_USB_IS_AN_I2C_ADAPTER, 967 .caps = DVB_USB_IS_AN_I2C_ADAPTER,
753 .usb_ctrl = DEVICE_SPECIFIC, 968 .usb_ctrl = DEVICE_SPECIFIC,
754 .firmware = "dvb-usb-dw2104.fw", 969 .firmware = "dvb-usb-dw2104.fw",
755 .size_of_priv = sizeof(struct dw210x_state),
756 .no_reconnect = 1, 970 .no_reconnect = 1,
757 971
758 .i2c_algo = &dw2104_i2c_algo, 972 .i2c_algo = &dw2104_i2c_algo,
@@ -796,12 +1010,57 @@ static struct dvb_usb_device_properties dw2104_properties = {
796 } 1010 }
797}; 1011};
798 1012
1013static struct dvb_usb_device_properties dw3101_properties = {
1014 .caps = DVB_USB_IS_AN_I2C_ADAPTER,
1015 .usb_ctrl = DEVICE_SPECIFIC,
1016 .firmware = "dvb-usb-dw3101.fw",
1017 .no_reconnect = 1,
1018
1019 .i2c_algo = &dw3101_i2c_algo,
1020 .rc_key_map = dw210x_rc_keys,
1021 .rc_key_map_size = ARRAY_SIZE(dw210x_rc_keys),
1022 .rc_interval = 150,
1023 .rc_query = dw2102_rc_query,
1024
1025 .generic_bulk_ctrl_endpoint = 0x81,
1026 /* parameter for the MPEG2-data transfer */
1027 .num_adapters = 1,
1028 .download_firmware = dw2102_load_firmware,
1029 .read_mac_address = dw210x_read_mac_address,
1030 .adapter = {
1031 {
1032 .frontend_attach = dw3101_frontend_attach,
1033 .streaming_ctrl = NULL,
1034 .tuner_attach = dw3101_tuner_attach,
1035 .stream = {
1036 .type = USB_BULK,
1037 .count = 8,
1038 .endpoint = 0x82,
1039 .u = {
1040 .bulk = {
1041 .buffersize = 4096,
1042 }
1043 }
1044 },
1045 }
1046 },
1047 .num_device_descs = 1,
1048 .devices = {
1049 { "DVBWorld DVB-C 3101 USB2.0",
1050 {&dw2102_table[5], NULL},
1051 {NULL},
1052 },
1053 }
1054};
1055
799static int dw2102_probe(struct usb_interface *intf, 1056static int dw2102_probe(struct usb_interface *intf,
800 const struct usb_device_id *id) 1057 const struct usb_device_id *id)
801{ 1058{
802 if (0 == dvb_usb_device_init(intf, &dw2102_properties, 1059 if (0 == dvb_usb_device_init(intf, &dw2102_properties,
803 THIS_MODULE, NULL, adapter_nr) || 1060 THIS_MODULE, NULL, adapter_nr) ||
804 0 == dvb_usb_device_init(intf, &dw2104_properties, 1061 0 == dvb_usb_device_init(intf, &dw2104_properties,
1062 THIS_MODULE, NULL, adapter_nr) ||
1063 0 == dvb_usb_device_init(intf, &dw3101_properties,
805 THIS_MODULE, NULL, adapter_nr)) { 1064 THIS_MODULE, NULL, adapter_nr)) {
806 return 0; 1065 return 0;
807 } 1066 }
@@ -833,6 +1092,8 @@ module_init(dw2102_module_init);
833module_exit(dw2102_module_exit); 1092module_exit(dw2102_module_exit);
834 1093
835MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by"); 1094MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by");
836MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104 USB2.0 device"); 1095MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
1096 " DVB-C 3101 USB2.0,"
1097 " TeVii S600, S650 USB2.0 devices");
837MODULE_VERSION("0.1"); 1098MODULE_VERSION("0.1");
838MODULE_LICENSE("GPL"); 1099MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/dw2102.h b/drivers/media/dvb/dvb-usb/dw2102.h
index e3370734e95a..5cd0b0eb6ce1 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.h
+++ b/drivers/media/dvb/dvb-usb/dw2102.h
@@ -5,4 +5,5 @@
5#include "dvb-usb.h" 5#include "dvb-usb.h"
6 6
7#define deb_xfer(args...) dprintk(dvb_usb_dw2102_debug, 0x02, args) 7#define deb_xfer(args...) dprintk(dvb_usb_dw2102_debug, 0x02, args)
8#define deb_rc(args...) dprintk(dvb_usb_dw2102_debug, 0x04, args)
8#endif 9#endif
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.c b/drivers/media/dvb/dvb-usb/gp8psk.c
index 3dd6843864ed..afb444db43ad 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk.c
@@ -223,7 +223,7 @@ static struct usb_device_id gp8psk_usb_table [] = {
223 { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_8PSK_REV_1_WARM) }, 223 { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_8PSK_REV_1_WARM) },
224 { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_8PSK_REV_2) }, 224 { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_8PSK_REV_2) },
225 { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_1) }, 225 { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_1) },
226 { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_CW3K) }, 226/* { USB_DEVICE(USB_VID_GENPIX, USB_PID_GENPIX_SKYWALKER_CW3K) }, */
227 { 0 }, 227 { 0 },
228}; 228};
229MODULE_DEVICE_TABLE(usb, gp8psk_usb_table); 229MODULE_DEVICE_TABLE(usb, gp8psk_usb_table);
@@ -254,7 +254,7 @@ static struct dvb_usb_device_properties gp8psk_properties = {
254 254
255 .generic_bulk_ctrl_endpoint = 0x01, 255 .generic_bulk_ctrl_endpoint = 0x01,
256 256
257 .num_device_descs = 4, 257 .num_device_descs = 3,
258 .devices = { 258 .devices = {
259 { .name = "Genpix 8PSK-to-USB2 Rev.1 DVB-S receiver", 259 { .name = "Genpix 8PSK-to-USB2 Rev.1 DVB-S receiver",
260 .cold_ids = { &gp8psk_usb_table[0], NULL }, 260 .cold_ids = { &gp8psk_usb_table[0], NULL },
@@ -268,10 +268,6 @@ static struct dvb_usb_device_properties gp8psk_properties = {
268 .cold_ids = { NULL }, 268 .cold_ids = { NULL },
269 .warm_ids = { &gp8psk_usb_table[3], NULL }, 269 .warm_ids = { &gp8psk_usb_table[3], NULL },
270 }, 270 },
271 { .name = "Genpix SkyWalker-CW3K DVB-S receiver",
272 .cold_ids = { NULL },
273 .warm_ids = { &gp8psk_usb_table[4], NULL },
274 },
275 { NULL }, 271 { NULL },
276 } 272 }
277}; 273};
diff --git a/drivers/media/dvb/firewire/firedtv-1394.c b/drivers/media/dvb/firewire/firedtv-1394.c
index 4e207658c5d9..2b6eeeab5b25 100644
--- a/drivers/media/dvb/firewire/firedtv-1394.c
+++ b/drivers/media/dvb/firewire/firedtv-1394.c
@@ -225,7 +225,7 @@ fail_free:
225 225
226static int node_remove(struct device *dev) 226static int node_remove(struct device *dev)
227{ 227{
228 struct firedtv *fdtv = dev->driver_data; 228 struct firedtv *fdtv = dev_get_drvdata(dev);
229 229
230 fdtv_dvb_unregister(fdtv); 230 fdtv_dvb_unregister(fdtv);
231 231
@@ -242,7 +242,7 @@ static int node_remove(struct device *dev)
242 242
243static int node_update(struct unit_directory *ud) 243static int node_update(struct unit_directory *ud)
244{ 244{
245 struct firedtv *fdtv = ud->device.driver_data; 245 struct firedtv *fdtv = dev_get_drvdata(&ud->device);
246 246
247 if (fdtv->isochannel >= 0) 247 if (fdtv->isochannel >= 0)
248 cmp_establish_pp_connection(fdtv, fdtv->subunit, 248 cmp_establish_pp_connection(fdtv, fdtv->subunit,
diff --git a/drivers/media/dvb/firewire/firedtv-dvb.c b/drivers/media/dvb/firewire/firedtv-dvb.c
index 9d308dd32a5c..5742fde79d99 100644
--- a/drivers/media/dvb/firewire/firedtv-dvb.c
+++ b/drivers/media/dvb/firewire/firedtv-dvb.c
@@ -268,7 +268,7 @@ struct firedtv *fdtv_alloc(struct device *dev,
268 if (!fdtv) 268 if (!fdtv)
269 return NULL; 269 return NULL;
270 270
271 dev->driver_data = fdtv; 271 dev_set_drvdata(dev, fdtv);
272 fdtv->device = dev; 272 fdtv->device = dev;
273 fdtv->isochannel = -1; 273 fdtv->isochannel = -1;
274 fdtv->voltage = 0xff; 274 fdtv->voltage = 0xff;
diff --git a/drivers/media/dvb/firewire/firedtv-rc.c b/drivers/media/dvb/firewire/firedtv-rc.c
index 46a6324d7b73..27bca2e283df 100644
--- a/drivers/media/dvb/firewire/firedtv-rc.c
+++ b/drivers/media/dvb/firewire/firedtv-rc.c
@@ -18,7 +18,7 @@
18#include "firedtv.h" 18#include "firedtv.h"
19 19
20/* fixed table with older keycodes, geared towards MythTV */ 20/* fixed table with older keycodes, geared towards MythTV */
21const static u16 oldtable[] = { 21static const u16 oldtable[] = {
22 22
23 /* code from device: 0x4501...0x451f */ 23 /* code from device: 0x4501...0x451f */
24 24
@@ -62,7 +62,7 @@ const static u16 oldtable[] = {
62}; 62};
63 63
64/* user-modifiable table for a remote as sold in 2008 */ 64/* user-modifiable table for a remote as sold in 2008 */
65const static u16 keytable[] = { 65static const u16 keytable[] = {
66 66
67 /* code from device: 0x0300...0x031f */ 67 /* code from device: 0x0300...0x031f */
68 68
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 23e4cffeba38..be967ac09a39 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -35,6 +35,21 @@ config DVB_STB6100
35 A Silicon tuner from ST used in conjunction with the STB0899 35 A Silicon tuner from ST used in conjunction with the STB0899
36 demodulator. Say Y when you want to support this tuner. 36 demodulator. Say Y when you want to support this tuner.
37 37
38config DVB_STV090x
39 tristate "STV0900/STV0903(A/B) based"
40 depends on DVB_CORE && I2C
41 default m if DVB_FE_CUSTOMISE
42 help
43 DVB-S/S2/DSS Multistandard Professional/Broadcast demodulators.
44 Say Y when you want to support these frontends.
45
46config DVB_STV6110x
47 tristate "STV6110/(A) based tuners"
48 depends on DVB_CORE && I2C
49 default m if DVB_FE_CUSTOMISE
50 help
51 A Silicon tuner that supports DVB-S and DVB-S2 modes
52
38comment "DVB-S (satellite) frontends" 53comment "DVB-S (satellite) frontends"
39 depends on DVB_CORE 54 depends on DVB_CORE
40 55
@@ -506,6 +521,13 @@ config DVB_ISL6421
506 help 521 help
507 An SEC control chip. 522 An SEC control chip.
508 523
524config DVB_ISL6423
525 tristate "ISL6423 SEC controller"
526 depends on DVB_CORE && I2C
527 default m if DVB_FE_CUSTOMISE
528 help
529 A SEC controller chip from Intersil
530
509config DVB_LGS8GL5 531config DVB_LGS8GL5
510 tristate "Silicon Legend LGS-8GL5 demodulator (OFDM)" 532 tristate "Silicon Legend LGS-8GL5 demodulator (OFDM)"
511 depends on DVB_CORE && I2C 533 depends on DVB_CORE && I2C
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index bc2b00abd106..832473c1e512 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -71,4 +71,6 @@ obj-$(CONFIG_DVB_STB6000) += stb6000.o
71obj-$(CONFIG_DVB_S921) += s921.o 71obj-$(CONFIG_DVB_S921) += s921.o
72obj-$(CONFIG_DVB_STV6110) += stv6110.o 72obj-$(CONFIG_DVB_STV6110) += stv6110.o
73obj-$(CONFIG_DVB_STV0900) += stv0900.o 73obj-$(CONFIG_DVB_STV0900) += stv0900.o
74 74obj-$(CONFIG_DVB_STV090x) += stv090x.o
75obj-$(CONFIG_DVB_STV6110x) += stv6110x.o
76obj-$(CONFIG_DVB_ISL6423) += isl6423.o
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c
index b2b50fb4cfd3..136c5863d81b 100644
--- a/drivers/media/dvb/frontends/af9013.c
+++ b/drivers/media/dvb/frontends/af9013.c
@@ -1455,7 +1455,7 @@ static int af9013_download_firmware(struct af9013_state *state)
1455 af9013_ops.info.name); 1455 af9013_ops.info.name);
1456 1456
1457 /* request the firmware, this will block and timeout */ 1457 /* request the firmware, this will block and timeout */
1458 ret = request_firmware(&fw, fw_file, &state->i2c->dev); 1458 ret = request_firmware(&fw, fw_file, state->i2c->dev.parent);
1459 if (ret) { 1459 if (ret) {
1460 err("did not find the firmware file. (%s) " 1460 err("did not find the firmware file. (%s) "
1461 "Please see linux/Documentation/dvb/ for more details" \ 1461 "Please see linux/Documentation/dvb/ for more details" \
diff --git a/drivers/media/dvb/frontends/au8522_dig.c b/drivers/media/dvb/frontends/au8522_dig.c
index 35731258bb0a..956b80f4979c 100644
--- a/drivers/media/dvb/frontends/au8522_dig.c
+++ b/drivers/media/dvb/frontends/au8522_dig.c
@@ -367,11 +367,90 @@ static struct {
367 { 0x8231, 0x13 }, 367 { 0x8231, 0x13 },
368}; 368};
369 369
370/* QAM Modulation table */ 370/* QAM64 Modulation table */
371static struct { 371static struct {
372 u16 reg; 372 u16 reg;
373 u16 data; 373 u16 data;
374} QAM_mod_tab[] = { 374} QAM64_mod_tab[] = {
375 { 0x00a3, 0x09 },
376 { 0x00a4, 0x00 },
377 { 0x0081, 0xc4 },
378 { 0x00a5, 0x40 },
379 { 0x00aa, 0x77 },
380 { 0x00ad, 0x77 },
381 { 0x00a6, 0x67 },
382 { 0x0262, 0x20 },
383 { 0x021c, 0x30 },
384 { 0x00b8, 0x3e },
385 { 0x00b9, 0xf0 },
386 { 0x00ba, 0x01 },
387 { 0x00bb, 0x18 },
388 { 0x00bc, 0x50 },
389 { 0x00bd, 0x00 },
390 { 0x00be, 0xea },
391 { 0x00bf, 0xef },
392 { 0x00c0, 0xfc },
393 { 0x00c1, 0xbd },
394 { 0x00c2, 0x1f },
395 { 0x00c3, 0xfc },
396 { 0x00c4, 0xdd },
397 { 0x00c5, 0xaf },
398 { 0x00c6, 0x00 },
399 { 0x00c7, 0x38 },
400 { 0x00c8, 0x30 },
401 { 0x00c9, 0x05 },
402 { 0x00ca, 0x4a },
403 { 0x00cb, 0xd0 },
404 { 0x00cc, 0x01 },
405 { 0x00cd, 0xd9 },
406 { 0x00ce, 0x6f },
407 { 0x00cf, 0xf9 },
408 { 0x00d0, 0x70 },
409 { 0x00d1, 0xdf },
410 { 0x00d2, 0xf7 },
411 { 0x00d3, 0xc2 },
412 { 0x00d4, 0xdf },
413 { 0x00d5, 0x02 },
414 { 0x00d6, 0x9a },
415 { 0x00d7, 0xd0 },
416 { 0x0250, 0x0d },
417 { 0x0251, 0xcd },
418 { 0x0252, 0xe0 },
419 { 0x0253, 0x05 },
420 { 0x0254, 0xa7 },
421 { 0x0255, 0xff },
422 { 0x0256, 0xed },
423 { 0x0257, 0x5b },
424 { 0x0258, 0xae },
425 { 0x0259, 0xe6 },
426 { 0x025a, 0x3d },
427 { 0x025b, 0x0f },
428 { 0x025c, 0x0d },
429 { 0x025d, 0xea },
430 { 0x025e, 0xf2 },
431 { 0x025f, 0x51 },
432 { 0x0260, 0xf5 },
433 { 0x0261, 0x06 },
434 { 0x021a, 0x00 },
435 { 0x0546, 0x40 },
436 { 0x0210, 0xc7 },
437 { 0x0211, 0xaa },
438 { 0x0212, 0xab },
439 { 0x0213, 0x02 },
440 { 0x0502, 0x00 },
441 { 0x0121, 0x04 },
442 { 0x0122, 0x04 },
443 { 0x052e, 0x10 },
444 { 0x00a4, 0xca },
445 { 0x00a7, 0x40 },
446 { 0x0526, 0x01 },
447};
448
449/* QAM256 Modulation table */
450static struct {
451 u16 reg;
452 u16 data;
453} QAM256_mod_tab[] = {
375 { 0x80a3, 0x09 }, 454 { 0x80a3, 0x09 },
376 { 0x80a4, 0x00 }, 455 { 0x80a4, 0x00 },
377 { 0x8081, 0xc4 }, 456 { 0x8081, 0xc4 },
@@ -464,12 +543,19 @@ static int au8522_enable_modulation(struct dvb_frontend *fe,
464 au8522_set_if(fe, state->config->vsb_if); 543 au8522_set_if(fe, state->config->vsb_if);
465 break; 544 break;
466 case QAM_64: 545 case QAM_64:
546 dprintk("%s() QAM 64\n", __func__);
547 for (i = 0; i < ARRAY_SIZE(QAM64_mod_tab); i++)
548 au8522_writereg(state,
549 QAM64_mod_tab[i].reg,
550 QAM64_mod_tab[i].data);
551 au8522_set_if(fe, state->config->qam_if);
552 break;
467 case QAM_256: 553 case QAM_256:
468 dprintk("%s() QAM 64/256\n", __func__); 554 dprintk("%s() QAM 256\n", __func__);
469 for (i = 0; i < ARRAY_SIZE(QAM_mod_tab); i++) 555 for (i = 0; i < ARRAY_SIZE(QAM256_mod_tab); i++)
470 au8522_writereg(state, 556 au8522_writereg(state,
471 QAM_mod_tab[i].reg, 557 QAM256_mod_tab[i].reg,
472 QAM_mod_tab[i].data); 558 QAM256_mod_tab[i].data);
473 au8522_set_if(fe, state->config->qam_if); 559 au8522_set_if(fe, state->config->qam_if);
474 break; 560 break;
475 default: 561 default:
diff --git a/drivers/media/dvb/frontends/cx24116.c b/drivers/media/dvb/frontends/cx24116.c
index 9b9f57264cef..2410d8b59b6b 100644
--- a/drivers/media/dvb/frontends/cx24116.c
+++ b/drivers/media/dvb/frontends/cx24116.c
@@ -492,7 +492,7 @@ static int cx24116_firmware_ondemand(struct dvb_frontend *fe)
492 printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n", 492 printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n",
493 __func__, CX24116_DEFAULT_FIRMWARE); 493 __func__, CX24116_DEFAULT_FIRMWARE);
494 ret = request_firmware(&fw, CX24116_DEFAULT_FIRMWARE, 494 ret = request_firmware(&fw, CX24116_DEFAULT_FIRMWARE,
495 &state->i2c->dev); 495 state->i2c->dev.parent);
496 printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n", 496 printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n",
497 __func__); 497 __func__);
498 if (ret) { 498 if (ret) {
diff --git a/drivers/media/dvb/frontends/drx397xD.c b/drivers/media/dvb/frontends/drx397xD.c
index 172f1f928f02..010075535221 100644
--- a/drivers/media/dvb/frontends/drx397xD.c
+++ b/drivers/media/dvb/frontends/drx397xD.c
@@ -123,10 +123,10 @@ static int drx_load_fw(struct drx397xD_state *s, enum fw_ix ix)
123 } 123 }
124 memset(&fw[ix].data[0], 0, sizeof(fw[0].data)); 124 memset(&fw[ix].data[0], 0, sizeof(fw[0].data));
125 125
126 if (request_firmware(&fw[ix].file, fw[ix].name, &s->i2c->dev) != 0) { 126 rc = request_firmware(&fw[ix].file, fw[ix].name, s->i2c->dev.parent);
127 if (rc != 0) {
127 printk(KERN_ERR "%s: Firmware \"%s\" not available\n", 128 printk(KERN_ERR "%s: Firmware \"%s\" not available\n",
128 mod_name, fw[ix].name); 129 mod_name, fw[ix].name);
129 rc = -ENOENT;
130 goto exit_err; 130 goto exit_err;
131 } 131 }
132 132
diff --git a/drivers/media/dvb/frontends/isl6423.c b/drivers/media/dvb/frontends/isl6423.c
new file mode 100644
index 000000000000..dca5bebfeeb5
--- /dev/null
+++ b/drivers/media/dvb/frontends/isl6423.c
@@ -0,0 +1,308 @@
1/*
2 Intersil ISL6423 SEC and LNB Power supply controller
3
4 Copyright (C) Manu Abraham <abraham.manu@gmail.com>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/string.h>
27#include <linux/slab.h>
28
29#include "dvb_frontend.h"
30#include "isl6423.h"
31
32static unsigned int verbose;
33module_param(verbose, int, 0644);
34MODULE_PARM_DESC(verbose, "Set Verbosity level");
35
36#define FE_ERROR 0
37#define FE_NOTICE 1
38#define FE_INFO 2
39#define FE_DEBUG 3
40#define FE_DEBUGREG 4
41
42#define dprintk(__y, __z, format, arg...) do { \
43 if (__z) { \
44 if ((verbose > FE_ERROR) && (verbose > __y)) \
45 printk(KERN_ERR "%s: " format "\n", __func__ , ##arg); \
46 else if ((verbose > FE_NOTICE) && (verbose > __y)) \
47 printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg); \
48 else if ((verbose > FE_INFO) && (verbose > __y)) \
49 printk(KERN_INFO "%s: " format "\n", __func__ , ##arg); \
50 else if ((verbose > FE_DEBUG) && (verbose > __y)) \
51 printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg); \
52 } else { \
53 if (verbose > __y) \
54 printk(format, ##arg); \
55 } \
56} while (0)
57
58struct isl6423_dev {
59 const struct isl6423_config *config;
60 struct i2c_adapter *i2c;
61
62 u8 reg_3;
63 u8 reg_4;
64
65 unsigned int verbose;
66};
67
68static int isl6423_write(struct isl6423_dev *isl6423, u8 reg)
69{
70 struct i2c_adapter *i2c = isl6423->i2c;
71 u8 addr = isl6423->config->addr;
72 int err = 0;
73
74 struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = &reg, .len = 1 };
75
76 dprintk(FE_DEBUG, 1, "write reg %02X", reg);
77 err = i2c_transfer(i2c, &msg, 1);
78 if (err < 0)
79 goto exit;
80 return 0;
81
82exit:
83 dprintk(FE_ERROR, 1, "I/O error <%d>", err);
84 return err;
85}
86
87static int isl6423_set_modulation(struct dvb_frontend *fe)
88{
89 struct isl6423_dev *isl6423 = (struct isl6423_dev *) fe->sec_priv;
90 const struct isl6423_config *config = isl6423->config;
91 int err = 0;
92 u8 reg_2 = 0;
93
94 reg_2 = 0x01 << 5;
95
96 if (config->mod_extern)
97 reg_2 |= (1 << 3);
98 else
99 reg_2 |= (1 << 4);
100
101 err = isl6423_write(isl6423, reg_2);
102 if (err < 0)
103 goto exit;
104 return 0;
105
106exit:
107 dprintk(FE_ERROR, 1, "I/O error <%d>", err);
108 return err;
109}
110
111static int isl6423_voltage_boost(struct dvb_frontend *fe, long arg)
112{
113 struct isl6423_dev *isl6423 = (struct isl6423_dev *) fe->sec_priv;
114 u8 reg_3 = isl6423->reg_3;
115 u8 reg_4 = isl6423->reg_4;
116 int err = 0;
117
118 if (arg) {
119 /* EN = 1, VSPEN = 1, VBOT = 1 */
120 reg_4 |= (1 << 4);
121 reg_4 |= 0x1;
122 reg_3 |= (1 << 3);
123 } else {
124 /* EN = 1, VSPEN = 1, VBOT = 0 */
125 reg_4 |= (1 << 4);
126 reg_4 &= ~0x1;
127 reg_3 |= (1 << 3);
128 }
129 err = isl6423_write(isl6423, reg_3);
130 if (err < 0)
131 goto exit;
132
133 err = isl6423_write(isl6423, reg_4);
134 if (err < 0)
135 goto exit;
136
137 isl6423->reg_3 = reg_3;
138 isl6423->reg_4 = reg_4;
139
140 return 0;
141exit:
142 dprintk(FE_ERROR, 1, "I/O error <%d>", err);
143 return err;
144}
145
146
147static int isl6423_set_voltage(struct dvb_frontend *fe,
148 enum fe_sec_voltage voltage)
149{
150 struct isl6423_dev *isl6423 = (struct isl6423_dev *) fe->sec_priv;
151 u8 reg_3 = isl6423->reg_3;
152 u8 reg_4 = isl6423->reg_4;
153 int err = 0;
154
155 switch (voltage) {
156 case SEC_VOLTAGE_OFF:
157 /* EN = 0 */
158 reg_4 &= ~(1 << 4);
159 break;
160
161 case SEC_VOLTAGE_13:
162 /* EN = 1, VSPEN = 1, VTOP = 0, VBOT = 0 */
163 reg_4 |= (1 << 4);
164 reg_4 &= ~0x3;
165 reg_3 |= (1 << 3);
166 break;
167
168 case SEC_VOLTAGE_18:
169 /* EN = 1, VSPEN = 1, VTOP = 1, VBOT = 0 */
170 reg_4 |= (1 << 4);
171 reg_4 |= 0x2;
172 reg_4 &= ~0x1;
173 reg_3 |= (1 << 3);
174 break;
175
176 default:
177 break;
178 }
179 err = isl6423_write(isl6423, reg_3);
180 if (err < 0)
181 goto exit;
182
183 err = isl6423_write(isl6423, reg_4);
184 if (err < 0)
185 goto exit;
186
187 isl6423->reg_3 = reg_3;
188 isl6423->reg_4 = reg_4;
189
190 return 0;
191exit:
192 dprintk(FE_ERROR, 1, "I/O error <%d>", err);
193 return err;
194}
195
196static int isl6423_set_current(struct dvb_frontend *fe)
197{
198 struct isl6423_dev *isl6423 = (struct isl6423_dev *) fe->sec_priv;
199 u8 reg_3 = isl6423->reg_3;
200 const struct isl6423_config *config = isl6423->config;
201 int err = 0;
202
203 switch (config->current_max) {
204 case SEC_CURRENT_275m:
205 /* 275mA */
206 /* ISELH = 0, ISELL = 0 */
207 reg_3 &= ~0x3;
208 break;
209
210 case SEC_CURRENT_515m:
211 /* 515mA */
212 /* ISELH = 0, ISELL = 1 */
213 reg_3 &= ~0x2;
214 reg_3 |= 0x1;
215 break;
216
217 case SEC_CURRENT_635m:
218 /* 635mA */
219 /* ISELH = 1, ISELL = 0 */
220 reg_3 &= ~0x1;
221 reg_3 |= 0x2;
222 break;
223
224 case SEC_CURRENT_800m:
225 /* 800mA */
226 /* ISELH = 1, ISELL = 1 */
227 reg_3 |= 0x3;
228 break;
229 }
230
231 err = isl6423_write(isl6423, reg_3);
232 if (err < 0)
233 goto exit;
234
235 switch (config->curlim) {
236 case SEC_CURRENT_LIM_ON:
237 /* DCL = 0 */
238 reg_3 &= ~0x10;
239 break;
240
241 case SEC_CURRENT_LIM_OFF:
242 /* DCL = 1 */
243 reg_3 |= 0x10;
244 break;
245 }
246
247 err = isl6423_write(isl6423, reg_3);
248 if (err < 0)
249 goto exit;
250
251 isl6423->reg_3 = reg_3;
252
253 return 0;
254exit:
255 dprintk(FE_ERROR, 1, "I/O error <%d>", err);
256 return err;
257}
258
259static void isl6423_release(struct dvb_frontend *fe)
260{
261 isl6423_set_voltage(fe, SEC_VOLTAGE_OFF);
262
263 kfree(fe->sec_priv);
264 fe->sec_priv = NULL;
265}
266
267struct dvb_frontend *isl6423_attach(struct dvb_frontend *fe,
268 struct i2c_adapter *i2c,
269 const struct isl6423_config *config)
270{
271 struct isl6423_dev *isl6423;
272
273 isl6423 = kzalloc(sizeof(struct isl6423_dev), GFP_KERNEL);
274 if (!isl6423)
275 return NULL;
276
277 isl6423->config = config;
278 isl6423->i2c = i2c;
279 fe->sec_priv = isl6423;
280
281 /* SR3H = 0, SR3M = 1, SR3L = 0 */
282 isl6423->reg_3 = 0x02 << 5;
283 /* SR4H = 0, SR4M = 1, SR4L = 1 */
284 isl6423->reg_4 = 0x03 << 5;
285
286 if (isl6423_set_current(fe))
287 goto exit;
288
289 if (isl6423_set_modulation(fe))
290 goto exit;
291
292 fe->ops.release_sec = isl6423_release;
293 fe->ops.set_voltage = isl6423_set_voltage;
294 fe->ops.enable_high_lnb_voltage = isl6423_voltage_boost;
295 isl6423->verbose = verbose;
296
297 return fe;
298
299exit:
300 kfree(isl6423);
301 fe->sec_priv = NULL;
302 return NULL;
303}
304EXPORT_SYMBOL(isl6423_attach);
305
306MODULE_DESCRIPTION("ISL6423 SEC");
307MODULE_AUTHOR("Manu Abraham");
308MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/isl6423.h b/drivers/media/dvb/frontends/isl6423.h
new file mode 100644
index 000000000000..e1a37fba01ca
--- /dev/null
+++ b/drivers/media/dvb/frontends/isl6423.h
@@ -0,0 +1,63 @@
1/*
2 Intersil ISL6423 SEC and LNB Power supply controller
3
4 Copyright (C) Manu Abraham <abraham.manu@gmail.com>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef __ISL_6423_H
22#define __ISL_6423_H
23
24#include <linux/dvb/frontend.h>
25
26enum isl6423_current {
27 SEC_CURRENT_275m = 0,
28 SEC_CURRENT_515m,
29 SEC_CURRENT_635m,
30 SEC_CURRENT_800m,
31};
32
33enum isl6423_curlim {
34 SEC_CURRENT_LIM_ON = 1,
35 SEC_CURRENT_LIM_OFF
36};
37
38struct isl6423_config {
39 enum isl6423_current current_max;
40 enum isl6423_curlim curlim;
41 u8 addr;
42 u8 mod_extern;
43};
44
45#if defined(CONFIG_DVB_ISL6423) || (defined(CONFIG_DVB_ISL6423_MODULE) && defined(MODULE))
46
47
48extern struct dvb_frontend *isl6423_attach(struct dvb_frontend *fe,
49 struct i2c_adapter *i2c,
50 const struct isl6423_config *config);
51
52#else
53static inline struct dvb_frontend *isl6423_attach(struct dvb_frontend *fe,
54 struct i2c_adapter *i2c,
55 const struct isl6423_config *config)
56{
57 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
58 return NULL;
59}
60
61#endif /* CONFIG_DVB_ISL6423 */
62
63#endif /* __ISL_6423_H */
diff --git a/drivers/media/dvb/frontends/lgdt3305.c b/drivers/media/dvb/frontends/lgdt3305.c
index d92d0557a80b..fde8c59700fb 100644
--- a/drivers/media/dvb/frontends/lgdt3305.c
+++ b/drivers/media/dvb/frontends/lgdt3305.c
@@ -19,6 +19,7 @@
19 * 19 *
20 */ 20 */
21 21
22#include <asm/div64.h>
22#include <linux/dvb/frontend.h> 23#include <linux/dvb/frontend.h>
23#include "dvb_math.h" 24#include "dvb_math.h"
24#include "lgdt3305.h" 25#include "lgdt3305.h"
@@ -496,27 +497,15 @@ static int lgdt3305_set_if(struct lgdt3305_state *state,
496 497
497 nco = if_freq_khz / 10; 498 nco = if_freq_khz / 10;
498 499
499#define LGDT3305_64BIT_DIVISION_ENABLED 0
500 /* FIXME: 64bit division disabled to avoid linking error:
501 * WARNING: "__udivdi3" [lgdt3305.ko] undefined!
502 */
503 switch (param->u.vsb.modulation) { 500 switch (param->u.vsb.modulation) {
504 case VSB_8: 501 case VSB_8:
505#if LGDT3305_64BIT_DIVISION_ENABLED
506 nco <<= 24; 502 nco <<= 24;
507 nco /= 625; 503 do_div(nco, 625);
508#else
509 nco *= ((1 << 24) / 625);
510#endif
511 break; 504 break;
512 case QAM_64: 505 case QAM_64:
513 case QAM_256: 506 case QAM_256:
514#if LGDT3305_64BIT_DIVISION_ENABLED
515 nco <<= 28; 507 nco <<= 28;
516 nco /= 625; 508 do_div(nco, 625);
517#else
518 nco *= ((1 << 28) / 625);
519#endif
520 break; 509 break;
521 default: 510 default:
522 return -EINVAL; 511 return -EINVAL;
diff --git a/drivers/media/dvb/frontends/lgs8gxx.c b/drivers/media/dvb/frontends/lgs8gxx.c
index f9785dfe735b..fde27645bbed 100644
--- a/drivers/media/dvb/frontends/lgs8gxx.c
+++ b/drivers/media/dvb/frontends/lgs8gxx.c
@@ -37,14 +37,14 @@
37 } while (0) 37 } while (0)
38 38
39static int debug; 39static int debug;
40static int fake_signal_str; 40static int fake_signal_str = 1;
41 41
42module_param(debug, int, 0644); 42module_param(debug, int, 0644);
43MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); 43MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
44 44
45module_param(fake_signal_str, int, 0644); 45module_param(fake_signal_str, int, 0644);
46MODULE_PARM_DESC(fake_signal_str, "fake signal strength for LGS8913." 46MODULE_PARM_DESC(fake_signal_str, "fake signal strength for LGS8913."
47"Signal strength calculation is slow.(default:off)."); 47"Signal strength calculation is slow.(default:on).");
48 48
49/* LGS8GXX internal helper functions */ 49/* LGS8GXX internal helper functions */
50 50
@@ -610,7 +610,7 @@ static int lgs8gxx_read_signal_agc(struct lgs8gxx_state *priv, u16 *signal)
610 else 610 else
611 cat = 0; 611 cat = 0;
612 612
613 *signal = cat; 613 *signal = cat * 65535 / 5;
614 614
615 return 0; 615 return 0;
616} 616}
@@ -630,8 +630,8 @@ static int lgs8913_read_signal_strength(struct lgs8gxx_state *priv, u16 *signal)
630 630
631 if (fake_signal_str) { 631 if (fake_signal_str) {
632 if ((t & 0xC0) == 0xC0) { 632 if ((t & 0xC0) == 0xC0) {
633 dprintk("Fake signal strength as 50\n"); 633 dprintk("Fake signal strength\n");
634 *signal = 0x32; 634 *signal = 0x7FFF;
635 } else 635 } else
636 *signal = 0; 636 *signal = 0;
637 return 0; 637 return 0;
diff --git a/drivers/media/dvb/frontends/lnbp21.c b/drivers/media/dvb/frontends/lnbp21.c
index 1dcc56f32bff..71f607fe8fc7 100644
--- a/drivers/media/dvb/frontends/lnbp21.c
+++ b/drivers/media/dvb/frontends/lnbp21.c
@@ -133,7 +133,7 @@ static struct dvb_frontend *lnbx2x_attach(struct dvb_frontend *fe,
133 /* override frontend ops */ 133 /* override frontend ops */
134 fe->ops.set_voltage = lnbp21_set_voltage; 134 fe->ops.set_voltage = lnbp21_set_voltage;
135 fe->ops.enable_high_lnb_voltage = lnbp21_enable_high_lnb_voltage; 135 fe->ops.enable_high_lnb_voltage = lnbp21_enable_high_lnb_voltage;
136 printk(KERN_INFO "LNBx2x attached on addr=%x", lnbp21->i2c_addr); 136 printk(KERN_INFO "LNBx2x attached on addr=%x\n", lnbp21->i2c_addr);
137 137
138 return fe; 138 return fe;
139} 139}
diff --git a/drivers/media/dvb/frontends/mt312.c b/drivers/media/dvb/frontends/mt312.c
index 5ac9b15920f8..a621f727935f 100644
--- a/drivers/media/dvb/frontends/mt312.c
+++ b/drivers/media/dvb/frontends/mt312.c
@@ -77,7 +77,7 @@ static int mt312_read(struct mt312_state *state, const enum mt312_reg_addr reg,
77 ret = i2c_transfer(state->i2c, msg, 2); 77 ret = i2c_transfer(state->i2c, msg, 2);
78 78
79 if (ret != 2) { 79 if (ret != 2) {
80 printk(KERN_ERR "%s: ret == %d\n", __func__, ret); 80 printk(KERN_DEBUG "%s: ret == %d\n", __func__, ret);
81 return -EREMOTEIO; 81 return -EREMOTEIO;
82 } 82 }
83 83
diff --git a/drivers/media/dvb/frontends/nxt200x.c b/drivers/media/dvb/frontends/nxt200x.c
index a8429ebfa8a2..eac20650499f 100644
--- a/drivers/media/dvb/frontends/nxt200x.c
+++ b/drivers/media/dvb/frontends/nxt200x.c
@@ -879,7 +879,8 @@ static int nxt2002_init(struct dvb_frontend* fe)
879 879
880 /* request the firmware, this will block until someone uploads it */ 880 /* request the firmware, this will block until someone uploads it */
881 printk("nxt2002: Waiting for firmware upload (%s)...\n", NXT2002_DEFAULT_FIRMWARE); 881 printk("nxt2002: Waiting for firmware upload (%s)...\n", NXT2002_DEFAULT_FIRMWARE);
882 ret = request_firmware(&fw, NXT2002_DEFAULT_FIRMWARE, &state->i2c->dev); 882 ret = request_firmware(&fw, NXT2002_DEFAULT_FIRMWARE,
883 state->i2c->dev.parent);
883 printk("nxt2002: Waiting for firmware upload(2)...\n"); 884 printk("nxt2002: Waiting for firmware upload(2)...\n");
884 if (ret) { 885 if (ret) {
885 printk("nxt2002: No firmware uploaded (timeout or file not found?)\n"); 886 printk("nxt2002: No firmware uploaded (timeout or file not found?)\n");
@@ -943,7 +944,8 @@ static int nxt2004_init(struct dvb_frontend* fe)
943 944
944 /* request the firmware, this will block until someone uploads it */ 945 /* request the firmware, this will block until someone uploads it */
945 printk("nxt2004: Waiting for firmware upload (%s)...\n", NXT2004_DEFAULT_FIRMWARE); 946 printk("nxt2004: Waiting for firmware upload (%s)...\n", NXT2004_DEFAULT_FIRMWARE);
946 ret = request_firmware(&fw, NXT2004_DEFAULT_FIRMWARE, &state->i2c->dev); 947 ret = request_firmware(&fw, NXT2004_DEFAULT_FIRMWARE,
948 state->i2c->dev.parent);
947 printk("nxt2004: Waiting for firmware upload(2)...\n"); 949 printk("nxt2004: Waiting for firmware upload(2)...\n");
948 if (ret) { 950 if (ret) {
949 printk("nxt2004: No firmware uploaded (timeout or file not found?)\n"); 951 printk("nxt2004: No firmware uploaded (timeout or file not found?)\n");
diff --git a/drivers/media/dvb/frontends/or51132.c b/drivers/media/dvb/frontends/or51132.c
index 5ed32544de39..8133ea3cddd7 100644
--- a/drivers/media/dvb/frontends/or51132.c
+++ b/drivers/media/dvb/frontends/or51132.c
@@ -340,7 +340,7 @@ static int or51132_set_parameters(struct dvb_frontend* fe,
340 } 340 }
341 printk("or51132: Waiting for firmware upload(%s)...\n", 341 printk("or51132: Waiting for firmware upload(%s)...\n",
342 fwname); 342 fwname);
343 ret = request_firmware(&fw, fwname, &state->i2c->dev); 343 ret = request_firmware(&fw, fwname, state->i2c->dev.parent);
344 if (ret) { 344 if (ret) {
345 printk(KERN_WARNING "or51132: No firmware up" 345 printk(KERN_WARNING "or51132: No firmware up"
346 "loaded(timeout or file not found?)\n"); 346 "loaded(timeout or file not found?)\n");
diff --git a/drivers/media/dvb/frontends/stv0900_priv.h b/drivers/media/dvb/frontends/stv0900_priv.h
index 762d5af62d7a..67dc8ec634e2 100644
--- a/drivers/media/dvb/frontends/stv0900_priv.h
+++ b/drivers/media/dvb/frontends/stv0900_priv.h
@@ -60,8 +60,6 @@
60 } \ 60 } \
61 } while (0) 61 } while (0)
62 62
63#define dmd_choose(a, b) (demod = STV0900_DEMOD_2 ? b : a))
64
65static int stvdebug; 63static int stvdebug;
66 64
67#define dprintk(args...) \ 65#define dprintk(args...) \
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
new file mode 100644
index 000000000000..96ef745a2e4e
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -0,0 +1,4299 @@
1/*
2 STV0900/0903 Multistandard Broadcast Frontend driver
3 Copyright (C) Manu Abraham <abraham.manu@gmail.com>
4
5 Copyright (C) ST Microelectronics
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/string.h>
26#include <linux/mutex.h>
27
28#include <linux/dvb/frontend.h>
29#include "dvb_frontend.h"
30
31#include "stv6110x.h" /* for demodulator internal modes */
32
33#include "stv090x_reg.h"
34#include "stv090x.h"
35#include "stv090x_priv.h"
36
37static unsigned int verbose;
38module_param(verbose, int, 0644);
39
40struct mutex demod_lock;
41
42/* DVBS1 and DSS C/N Lookup table */
43static const struct stv090x_tab stv090x_s1cn_tab[] = {
44 { 0, 8917 }, /* 0.0dB */
45 { 5, 8801 }, /* 0.5dB */
46 { 10, 8667 }, /* 1.0dB */
47 { 15, 8522 }, /* 1.5dB */
48 { 20, 8355 }, /* 2.0dB */
49 { 25, 8175 }, /* 2.5dB */
50 { 30, 7979 }, /* 3.0dB */
51 { 35, 7763 }, /* 3.5dB */
52 { 40, 7530 }, /* 4.0dB */
53 { 45, 7282 }, /* 4.5dB */
54 { 50, 7026 }, /* 5.0dB */
55 { 55, 6781 }, /* 5.5dB */
56 { 60, 6514 }, /* 6.0dB */
57 { 65, 6241 }, /* 6.5dB */
58 { 70, 5965 }, /* 7.0dB */
59 { 75, 5690 }, /* 7.5dB */
60 { 80, 5424 }, /* 8.0dB */
61 { 85, 5161 }, /* 8.5dB */
62 { 90, 4902 }, /* 9.0dB */
63 { 95, 4654 }, /* 9.5dB */
64 { 100, 4417 }, /* 10.0dB */
65 { 105, 4186 }, /* 10.5dB */
66 { 110, 3968 }, /* 11.0dB */
67 { 115, 3757 }, /* 11.5dB */
68 { 120, 3558 }, /* 12.0dB */
69 { 125, 3366 }, /* 12.5dB */
70 { 130, 3185 }, /* 13.0dB */
71 { 135, 3012 }, /* 13.5dB */
72 { 140, 2850 }, /* 14.0dB */
73 { 145, 2698 }, /* 14.5dB */
74 { 150, 2550 }, /* 15.0dB */
75 { 160, 2283 }, /* 16.0dB */
76 { 170, 2042 }, /* 17.0dB */
77 { 180, 1827 }, /* 18.0dB */
78 { 190, 1636 }, /* 19.0dB */
79 { 200, 1466 }, /* 20.0dB */
80 { 210, 1315 }, /* 21.0dB */
81 { 220, 1181 }, /* 22.0dB */
82 { 230, 1064 }, /* 23.0dB */
83 { 240, 960 }, /* 24.0dB */
84 { 250, 869 }, /* 25.0dB */
85 { 260, 792 }, /* 26.0dB */
86 { 270, 724 }, /* 27.0dB */
87 { 280, 665 }, /* 28.0dB */
88 { 290, 616 }, /* 29.0dB */
89 { 300, 573 }, /* 30.0dB */
90 { 310, 537 }, /* 31.0dB */
91 { 320, 507 }, /* 32.0dB */
92 { 330, 483 }, /* 33.0dB */
93 { 400, 398 }, /* 40.0dB */
94 { 450, 381 }, /* 45.0dB */
95 { 500, 377 } /* 50.0dB */
96};
97
98/* DVBS2 C/N Lookup table */
99static const struct stv090x_tab stv090x_s2cn_tab[] = {
100 { -30, 13348 }, /* -3.0dB */
101 { -20, 12640 }, /* -2d.0B */
102 { -10, 11883 }, /* -1.0dB */
103 { 0, 11101 }, /* -0.0dB */
104 { 5, 10718 }, /* 0.5dB */
105 { 10, 10339 }, /* 1.0dB */
106 { 15, 9947 }, /* 1.5dB */
107 { 20, 9552 }, /* 2.0dB */
108 { 25, 9183 }, /* 2.5dB */
109 { 30, 8799 }, /* 3.0dB */
110 { 35, 8422 }, /* 3.5dB */
111 { 40, 8062 }, /* 4.0dB */
112 { 45, 7707 }, /* 4.5dB */
113 { 50, 7353 }, /* 5.0dB */
114 { 55, 7025 }, /* 5.5dB */
115 { 60, 6684 }, /* 6.0dB */
116 { 65, 6331 }, /* 6.5dB */
117 { 70, 6036 }, /* 7.0dB */
118 { 75, 5727 }, /* 7.5dB */
119 { 80, 5437 }, /* 8.0dB */
120 { 85, 5164 }, /* 8.5dB */
121 { 90, 4902 }, /* 9.0dB */
122 { 95, 4653 }, /* 9.5dB */
123 { 100, 4408 }, /* 10.0dB */
124 { 105, 4187 }, /* 10.5dB */
125 { 110, 3961 }, /* 11.0dB */
126 { 115, 3751 }, /* 11.5dB */
127 { 120, 3558 }, /* 12.0dB */
128 { 125, 3368 }, /* 12.5dB */
129 { 130, 3191 }, /* 13.0dB */
130 { 135, 3017 }, /* 13.5dB */
131 { 140, 2862 }, /* 14.0dB */
132 { 145, 2710 }, /* 14.5dB */
133 { 150, 2565 }, /* 15.0dB */
134 { 160, 2300 }, /* 16.0dB */
135 { 170, 2058 }, /* 17.0dB */
136 { 180, 1849 }, /* 18.0dB */
137 { 190, 1663 }, /* 19.0dB */
138 { 200, 1495 }, /* 20.0dB */
139 { 210, 1349 }, /* 21.0dB */
140 { 220, 1222 }, /* 22.0dB */
141 { 230, 1110 }, /* 23.0dB */
142 { 240, 1011 }, /* 24.0dB */
143 { 250, 925 }, /* 25.0dB */
144 { 260, 853 }, /* 26.0dB */
145 { 270, 789 }, /* 27.0dB */
146 { 280, 734 }, /* 28.0dB */
147 { 290, 690 }, /* 29.0dB */
148 { 300, 650 }, /* 30.0dB */
149 { 310, 619 }, /* 31.0dB */
150 { 320, 593 }, /* 32.0dB */
151 { 330, 571 }, /* 33.0dB */
152 { 400, 498 }, /* 40.0dB */
153 { 450, 484 }, /* 45.0dB */
154 { 500, 481 } /* 50.0dB */
155};
156
157/* RF level C/N lookup table */
158static const struct stv090x_tab stv090x_rf_tab[] = {
159 { -5, 0xcaa1 }, /* -5dBm */
160 { -10, 0xc229 }, /* -10dBm */
161 { -15, 0xbb08 }, /* -15dBm */
162 { -20, 0xb4bc }, /* -20dBm */
163 { -25, 0xad5a }, /* -25dBm */
164 { -30, 0xa298 }, /* -30dBm */
165 { -35, 0x98a8 }, /* -35dBm */
166 { -40, 0x8389 }, /* -40dBm */
167 { -45, 0x59be }, /* -45dBm */
168 { -50, 0x3a14 }, /* -50dBm */
169 { -55, 0x2d11 }, /* -55dBm */
170 { -60, 0x210d }, /* -60dBm */
171 { -65, 0xa14f }, /* -65dBm */
172 { -70, 0x07aa } /* -70dBm */
173};
174
175
176static struct stv090x_reg stv0900_initval[] = {
177
178 { STV090x_OUTCFG, 0x00 },
179 { STV090x_MODECFG, 0xff },
180 { STV090x_AGCRF1CFG, 0x11 },
181 { STV090x_AGCRF2CFG, 0x13 },
182 { STV090x_TSGENERAL1X, 0x14 },
183 { STV090x_TSTTNR2, 0x21 },
184 { STV090x_TSTTNR4, 0x21 },
185 { STV090x_P2_DISTXCTL, 0x22 },
186 { STV090x_P2_F22TX, 0xc0 },
187 { STV090x_P2_F22RX, 0xc0 },
188 { STV090x_P2_DISRXCTL, 0x00 },
189 { STV090x_P2_DMDCFGMD, 0xF9 },
190 { STV090x_P2_DEMOD, 0x08 },
191 { STV090x_P2_DMDCFG3, 0xc4 },
192 { STV090x_P2_CARFREQ, 0xed },
193 { STV090x_P2_LDT, 0xd0 },
194 { STV090x_P2_LDT2, 0xb8 },
195 { STV090x_P2_TMGCFG, 0xd2 },
196 { STV090x_P2_TMGTHRISE, 0x20 },
197 { STV090x_P1_TMGCFG, 0xd2 },
198
199 { STV090x_P2_TMGTHFALL, 0x00 },
200 { STV090x_P2_FECSPY, 0x88 },
201 { STV090x_P2_FSPYDATA, 0x3a },
202 { STV090x_P2_FBERCPT4, 0x00 },
203 { STV090x_P2_FSPYBER, 0x10 },
204 { STV090x_P2_ERRCTRL1, 0x35 },
205 { STV090x_P2_ERRCTRL2, 0xc1 },
206 { STV090x_P2_CFRICFG, 0xf8 },
207 { STV090x_P2_NOSCFG, 0x1c },
208 { STV090x_P2_DMDTOM, 0x20 },
209 { STV090x_P2_CORRELMANT, 0x70 },
210 { STV090x_P2_CORRELABS, 0x88 },
211 { STV090x_P2_AGC2O, 0x5b },
212 { STV090x_P2_AGC2REF, 0x38 },
213 { STV090x_P2_CARCFG, 0xe4 },
214 { STV090x_P2_ACLC, 0x1A },
215 { STV090x_P2_BCLC, 0x09 },
216 { STV090x_P2_CARHDR, 0x08 },
217 { STV090x_P2_KREFTMG, 0xc1 },
218 { STV090x_P2_SFRUPRATIO, 0xf0 },
219 { STV090x_P2_SFRLOWRATIO, 0x70 },
220 { STV090x_P2_SFRSTEP, 0x58 },
221 { STV090x_P2_TMGCFG2, 0x01 },
222 { STV090x_P2_CAR2CFG, 0x26 },
223 { STV090x_P2_BCLC2S2Q, 0x86 },
224 { STV090x_P2_BCLC2S28, 0x86 },
225 { STV090x_P2_SMAPCOEF7, 0x77 },
226 { STV090x_P2_SMAPCOEF6, 0x85 },
227 { STV090x_P2_SMAPCOEF5, 0x77 },
228 { STV090x_P2_TSCFGL, 0x20 },
229 { STV090x_P2_DMDCFG2, 0x3b },
230 { STV090x_P2_MODCODLST0, 0xff },
231 { STV090x_P2_MODCODLST1, 0xff },
232 { STV090x_P2_MODCODLST2, 0xff },
233 { STV090x_P2_MODCODLST3, 0xff },
234 { STV090x_P2_MODCODLST4, 0xff },
235 { STV090x_P2_MODCODLST5, 0xff },
236 { STV090x_P2_MODCODLST6, 0xff },
237 { STV090x_P2_MODCODLST7, 0xcc },
238 { STV090x_P2_MODCODLST8, 0xcc },
239 { STV090x_P2_MODCODLST9, 0xcc },
240 { STV090x_P2_MODCODLSTA, 0xcc },
241 { STV090x_P2_MODCODLSTB, 0xcc },
242 { STV090x_P2_MODCODLSTC, 0xcc },
243 { STV090x_P2_MODCODLSTD, 0xcc },
244 { STV090x_P2_MODCODLSTE, 0xcc },
245 { STV090x_P2_MODCODLSTF, 0xcf },
246 { STV090x_P1_DISTXCTL, 0x22 },
247 { STV090x_P1_F22TX, 0xc0 },
248 { STV090x_P1_F22RX, 0xc0 },
249 { STV090x_P1_DISRXCTL, 0x00 },
250 { STV090x_P1_DMDCFGMD, 0xf9 },
251 { STV090x_P1_DEMOD, 0x08 },
252 { STV090x_P1_DMDCFG3, 0xc4 },
253 { STV090x_P1_DMDTOM, 0x20 },
254 { STV090x_P1_CARFREQ, 0xed },
255 { STV090x_P1_LDT, 0xd0 },
256 { STV090x_P1_LDT2, 0xb8 },
257 { STV090x_P1_TMGCFG, 0xd2 },
258 { STV090x_P1_TMGTHRISE, 0x20 },
259 { STV090x_P1_TMGTHFALL, 0x00 },
260 { STV090x_P1_SFRUPRATIO, 0xf0 },
261 { STV090x_P1_SFRLOWRATIO, 0x70 },
262 { STV090x_P1_TSCFGL, 0x20 },
263 { STV090x_P1_FECSPY, 0x88 },
264 { STV090x_P1_FSPYDATA, 0x3a },
265 { STV090x_P1_FBERCPT4, 0x00 },
266 { STV090x_P1_FSPYBER, 0x10 },
267 { STV090x_P1_ERRCTRL1, 0x35 },
268 { STV090x_P1_ERRCTRL2, 0xc1 },
269 { STV090x_P1_CFRICFG, 0xf8 },
270 { STV090x_P1_NOSCFG, 0x1c },
271 { STV090x_P1_CORRELMANT, 0x70 },
272 { STV090x_P1_CORRELABS, 0x88 },
273 { STV090x_P1_AGC2O, 0x5b },
274 { STV090x_P1_AGC2REF, 0x38 },
275 { STV090x_P1_CARCFG, 0xe4 },
276 { STV090x_P1_ACLC, 0x1A },
277 { STV090x_P1_BCLC, 0x09 },
278 { STV090x_P1_CARHDR, 0x08 },
279 { STV090x_P1_KREFTMG, 0xc1 },
280 { STV090x_P1_SFRSTEP, 0x58 },
281 { STV090x_P1_TMGCFG2, 0x01 },
282 { STV090x_P1_CAR2CFG, 0x26 },
283 { STV090x_P1_BCLC2S2Q, 0x86 },
284 { STV090x_P1_BCLC2S28, 0x86 },
285 { STV090x_P1_SMAPCOEF7, 0x77 },
286 { STV090x_P1_SMAPCOEF6, 0x85 },
287 { STV090x_P1_SMAPCOEF5, 0x77 },
288 { STV090x_P1_DMDCFG2, 0x3b },
289 { STV090x_P1_MODCODLST0, 0xff },
290 { STV090x_P1_MODCODLST1, 0xff },
291 { STV090x_P1_MODCODLST2, 0xff },
292 { STV090x_P1_MODCODLST3, 0xff },
293 { STV090x_P1_MODCODLST4, 0xff },
294 { STV090x_P1_MODCODLST5, 0xff },
295 { STV090x_P1_MODCODLST6, 0xff },
296 { STV090x_P1_MODCODLST7, 0xcc },
297 { STV090x_P1_MODCODLST8, 0xcc },
298 { STV090x_P1_MODCODLST9, 0xcc },
299 { STV090x_P1_MODCODLSTA, 0xcc },
300 { STV090x_P1_MODCODLSTB, 0xcc },
301 { STV090x_P1_MODCODLSTC, 0xcc },
302 { STV090x_P1_MODCODLSTD, 0xcc },
303 { STV090x_P1_MODCODLSTE, 0xcc },
304 { STV090x_P1_MODCODLSTF, 0xcf },
305 { STV090x_GENCFG, 0x1d },
306 { STV090x_NBITER_NF4, 0x37 },
307 { STV090x_NBITER_NF5, 0x29 },
308 { STV090x_NBITER_NF6, 0x37 },
309 { STV090x_NBITER_NF7, 0x33 },
310 { STV090x_NBITER_NF8, 0x31 },
311 { STV090x_NBITER_NF9, 0x2f },
312 { STV090x_NBITER_NF10, 0x39 },
313 { STV090x_NBITER_NF11, 0x3a },
314 { STV090x_NBITER_NF12, 0x29 },
315 { STV090x_NBITER_NF13, 0x37 },
316 { STV090x_NBITER_NF14, 0x33 },
317 { STV090x_NBITER_NF15, 0x2f },
318 { STV090x_NBITER_NF16, 0x39 },
319 { STV090x_NBITER_NF17, 0x3a },
320 { STV090x_NBITERNOERR, 0x04 },
321 { STV090x_GAINLLR_NF4, 0x0C },
322 { STV090x_GAINLLR_NF5, 0x0F },
323 { STV090x_GAINLLR_NF6, 0x11 },
324 { STV090x_GAINLLR_NF7, 0x14 },
325 { STV090x_GAINLLR_NF8, 0x17 },
326 { STV090x_GAINLLR_NF9, 0x19 },
327 { STV090x_GAINLLR_NF10, 0x20 },
328 { STV090x_GAINLLR_NF11, 0x21 },
329 { STV090x_GAINLLR_NF12, 0x0D },
330 { STV090x_GAINLLR_NF13, 0x0F },
331 { STV090x_GAINLLR_NF14, 0x13 },
332 { STV090x_GAINLLR_NF15, 0x1A },
333 { STV090x_GAINLLR_NF16, 0x1F },
334 { STV090x_GAINLLR_NF17, 0x21 },
335 { STV090x_RCCFGH, 0x20 },
336 { STV090x_P1_FECM, 0x01 }, /* disable DSS modes */
337 { STV090x_P2_FECM, 0x01 }, /* disable DSS modes */
338 { STV090x_P1_PRVIT, 0x2F }, /* disable PR 6/7 */
339 { STV090x_P2_PRVIT, 0x2F }, /* disable PR 6/7 */
340};
341
342static struct stv090x_reg stv0903_initval[] = {
343 { STV090x_OUTCFG, 0x00 },
344 { STV090x_AGCRF1CFG, 0x11 },
345 { STV090x_STOPCLK1, 0x48 },
346 { STV090x_STOPCLK2, 0x14 },
347 { STV090x_TSTTNR1, 0x27 },
348 { STV090x_TSTTNR2, 0x21 },
349 { STV090x_P1_DISTXCTL, 0x22 },
350 { STV090x_P1_F22TX, 0xc0 },
351 { STV090x_P1_F22RX, 0xc0 },
352 { STV090x_P1_DISRXCTL, 0x00 },
353 { STV090x_P1_DMDCFGMD, 0xF9 },
354 { STV090x_P1_DEMOD, 0x08 },
355 { STV090x_P1_DMDCFG3, 0xc4 },
356 { STV090x_P1_CARFREQ, 0xed },
357 { STV090x_P1_TNRCFG2, 0x82 },
358 { STV090x_P1_LDT, 0xd0 },
359 { STV090x_P1_LDT2, 0xb8 },
360 { STV090x_P1_TMGCFG, 0xd2 },
361 { STV090x_P1_TMGTHRISE, 0x20 },
362 { STV090x_P1_TMGTHFALL, 0x00 },
363 { STV090x_P1_SFRUPRATIO, 0xf0 },
364 { STV090x_P1_SFRLOWRATIO, 0x70 },
365 { STV090x_P1_TSCFGL, 0x20 },
366 { STV090x_P1_FECSPY, 0x88 },
367 { STV090x_P1_FSPYDATA, 0x3a },
368 { STV090x_P1_FBERCPT4, 0x00 },
369 { STV090x_P1_FSPYBER, 0x10 },
370 { STV090x_P1_ERRCTRL1, 0x35 },
371 { STV090x_P1_ERRCTRL2, 0xc1 },
372 { STV090x_P1_CFRICFG, 0xf8 },
373 { STV090x_P1_NOSCFG, 0x1c },
374 { STV090x_P1_DMDTOM, 0x20 },
375 { STV090x_P1_CORRELMANT, 0x70 },
376 { STV090x_P1_CORRELABS, 0x88 },
377 { STV090x_P1_AGC2O, 0x5b },
378 { STV090x_P1_AGC2REF, 0x38 },
379 { STV090x_P1_CARCFG, 0xe4 },
380 { STV090x_P1_ACLC, 0x1A },
381 { STV090x_P1_BCLC, 0x09 },
382 { STV090x_P1_CARHDR, 0x08 },
383 { STV090x_P1_KREFTMG, 0xc1 },
384 { STV090x_P1_SFRSTEP, 0x58 },
385 { STV090x_P1_TMGCFG2, 0x01 },
386 { STV090x_P1_CAR2CFG, 0x26 },
387 { STV090x_P1_BCLC2S2Q, 0x86 },
388 { STV090x_P1_BCLC2S28, 0x86 },
389 { STV090x_P1_SMAPCOEF7, 0x77 },
390 { STV090x_P1_SMAPCOEF6, 0x85 },
391 { STV090x_P1_SMAPCOEF5, 0x77 },
392 { STV090x_P1_DMDCFG2, 0x3b },
393 { STV090x_P1_MODCODLST0, 0xff },
394 { STV090x_P1_MODCODLST1, 0xff },
395 { STV090x_P1_MODCODLST2, 0xff },
396 { STV090x_P1_MODCODLST3, 0xff },
397 { STV090x_P1_MODCODLST4, 0xff },
398 { STV090x_P1_MODCODLST5, 0xff },
399 { STV090x_P1_MODCODLST6, 0xff },
400 { STV090x_P1_MODCODLST7, 0xcc },
401 { STV090x_P1_MODCODLST8, 0xcc },
402 { STV090x_P1_MODCODLST9, 0xcc },
403 { STV090x_P1_MODCODLSTA, 0xcc },
404 { STV090x_P1_MODCODLSTB, 0xcc },
405 { STV090x_P1_MODCODLSTC, 0xcc },
406 { STV090x_P1_MODCODLSTD, 0xcc },
407 { STV090x_P1_MODCODLSTE, 0xcc },
408 { STV090x_P1_MODCODLSTF, 0xcf },
409 { STV090x_GENCFG, 0x1c },
410 { STV090x_NBITER_NF4, 0x37 },
411 { STV090x_NBITER_NF5, 0x29 },
412 { STV090x_NBITER_NF6, 0x37 },
413 { STV090x_NBITER_NF7, 0x33 },
414 { STV090x_NBITER_NF8, 0x31 },
415 { STV090x_NBITER_NF9, 0x2f },
416 { STV090x_NBITER_NF10, 0x39 },
417 { STV090x_NBITER_NF11, 0x3a },
418 { STV090x_NBITER_NF12, 0x29 },
419 { STV090x_NBITER_NF13, 0x37 },
420 { STV090x_NBITER_NF14, 0x33 },
421 { STV090x_NBITER_NF15, 0x2f },
422 { STV090x_NBITER_NF16, 0x39 },
423 { STV090x_NBITER_NF17, 0x3a },
424 { STV090x_NBITERNOERR, 0x04 },
425 { STV090x_GAINLLR_NF4, 0x0C },
426 { STV090x_GAINLLR_NF5, 0x0F },
427 { STV090x_GAINLLR_NF6, 0x11 },
428 { STV090x_GAINLLR_NF7, 0x14 },
429 { STV090x_GAINLLR_NF8, 0x17 },
430 { STV090x_GAINLLR_NF9, 0x19 },
431 { STV090x_GAINLLR_NF10, 0x20 },
432 { STV090x_GAINLLR_NF11, 0x21 },
433 { STV090x_GAINLLR_NF12, 0x0D },
434 { STV090x_GAINLLR_NF13, 0x0F },
435 { STV090x_GAINLLR_NF14, 0x13 },
436 { STV090x_GAINLLR_NF15, 0x1A },
437 { STV090x_GAINLLR_NF16, 0x1F },
438 { STV090x_GAINLLR_NF17, 0x21 },
439 { STV090x_RCCFGH, 0x20 },
440 { STV090x_P1_FECM, 0x01 }, /*disable the DSS mode */
441 { STV090x_P1_PRVIT, 0x2f } /*disable puncture rate 6/7*/
442};
443
444static struct stv090x_reg stv0900_cut20_val[] = {
445
446 { STV090x_P2_DMDCFG3, 0xe8 },
447 { STV090x_P2_DMDCFG4, 0x10 },
448 { STV090x_P2_CARFREQ, 0x38 },
449 { STV090x_P2_CARHDR, 0x20 },
450 { STV090x_P2_KREFTMG, 0x5a },
451 { STV090x_P2_SMAPCOEF7, 0x06 },
452 { STV090x_P2_SMAPCOEF6, 0x00 },
453 { STV090x_P2_SMAPCOEF5, 0x04 },
454 { STV090x_P2_NOSCFG, 0x0c },
455 { STV090x_P1_DMDCFG3, 0xe8 },
456 { STV090x_P1_DMDCFG4, 0x10 },
457 { STV090x_P1_CARFREQ, 0x38 },
458 { STV090x_P1_CARHDR, 0x20 },
459 { STV090x_P1_KREFTMG, 0x5a },
460 { STV090x_P1_SMAPCOEF7, 0x06 },
461 { STV090x_P1_SMAPCOEF6, 0x00 },
462 { STV090x_P1_SMAPCOEF5, 0x04 },
463 { STV090x_P1_NOSCFG, 0x0c },
464 { STV090x_GAINLLR_NF4, 0x21 },
465 { STV090x_GAINLLR_NF5, 0x21 },
466 { STV090x_GAINLLR_NF6, 0x20 },
467 { STV090x_GAINLLR_NF7, 0x1F },
468 { STV090x_GAINLLR_NF8, 0x1E },
469 { STV090x_GAINLLR_NF9, 0x1E },
470 { STV090x_GAINLLR_NF10, 0x1D },
471 { STV090x_GAINLLR_NF11, 0x1B },
472 { STV090x_GAINLLR_NF12, 0x20 },
473 { STV090x_GAINLLR_NF13, 0x20 },
474 { STV090x_GAINLLR_NF14, 0x20 },
475 { STV090x_GAINLLR_NF15, 0x20 },
476 { STV090x_GAINLLR_NF16, 0x20 },
477 { STV090x_GAINLLR_NF17, 0x21 },
478};
479
480static struct stv090x_reg stv0903_cut20_val[] = {
481 { STV090x_P1_DMDCFG3, 0xe8 },
482 { STV090x_P1_DMDCFG4, 0x10 },
483 { STV090x_P1_CARFREQ, 0x38 },
484 { STV090x_P1_CARHDR, 0x20 },
485 { STV090x_P1_KREFTMG, 0x5a },
486 { STV090x_P1_SMAPCOEF7, 0x06 },
487 { STV090x_P1_SMAPCOEF6, 0x00 },
488 { STV090x_P1_SMAPCOEF5, 0x04 },
489 { STV090x_P1_NOSCFG, 0x0c },
490 { STV090x_GAINLLR_NF4, 0x21 },
491 { STV090x_GAINLLR_NF5, 0x21 },
492 { STV090x_GAINLLR_NF6, 0x20 },
493 { STV090x_GAINLLR_NF7, 0x1F },
494 { STV090x_GAINLLR_NF8, 0x1E },
495 { STV090x_GAINLLR_NF9, 0x1E },
496 { STV090x_GAINLLR_NF10, 0x1D },
497 { STV090x_GAINLLR_NF11, 0x1B },
498 { STV090x_GAINLLR_NF12, 0x20 },
499 { STV090x_GAINLLR_NF13, 0x20 },
500 { STV090x_GAINLLR_NF14, 0x20 },
501 { STV090x_GAINLLR_NF15, 0x20 },
502 { STV090x_GAINLLR_NF16, 0x20 },
503 { STV090x_GAINLLR_NF17, 0x21 }
504};
505
506/* Cut 2.0 Long Frame Tracking CR loop */
507static struct stv090x_long_frame_crloop stv090x_s2_crl_cut20[] = {
508 /* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
509 { STV090x_QPSK_12, 0x1f, 0x3f, 0x1e, 0x3f, 0x3d, 0x1f, 0x3d, 0x3e, 0x3d, 0x1e },
510 { STV090x_QPSK_35, 0x2f, 0x3f, 0x2e, 0x2f, 0x3d, 0x0f, 0x0e, 0x2e, 0x3d, 0x0e },
511 { STV090x_QPSK_23, 0x2f, 0x3f, 0x2e, 0x2f, 0x0e, 0x0f, 0x0e, 0x1e, 0x3d, 0x3d },
512 { STV090x_QPSK_34, 0x3f, 0x3f, 0x3e, 0x1f, 0x0e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
513 { STV090x_QPSK_45, 0x3f, 0x3f, 0x3e, 0x1f, 0x0e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
514 { STV090x_QPSK_56, 0x3f, 0x3f, 0x3e, 0x1f, 0x0e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
515 { STV090x_QPSK_89, 0x3f, 0x3f, 0x3e, 0x1f, 0x1e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
516 { STV090x_QPSK_910, 0x3f, 0x3f, 0x3e, 0x1f, 0x1e, 0x3e, 0x0e, 0x1e, 0x3d, 0x3d },
517 { STV090x_8PSK_35, 0x3c, 0x3e, 0x1c, 0x2e, 0x0c, 0x1e, 0x2b, 0x2d, 0x1b, 0x1d },
518 { STV090x_8PSK_23, 0x1d, 0x3e, 0x3c, 0x2e, 0x2c, 0x1e, 0x0c, 0x2d, 0x2b, 0x1d },
519 { STV090x_8PSK_34, 0x0e, 0x3e, 0x3d, 0x2e, 0x0d, 0x1e, 0x2c, 0x2d, 0x0c, 0x1d },
520 { STV090x_8PSK_56, 0x2e, 0x3e, 0x1e, 0x2e, 0x2d, 0x1e, 0x3c, 0x2d, 0x2c, 0x1d },
521 { STV090x_8PSK_89, 0x3e, 0x3e, 0x1e, 0x2e, 0x3d, 0x1e, 0x0d, 0x2d, 0x3c, 0x1d },
522 { STV090x_8PSK_910, 0x3e, 0x3e, 0x1e, 0x2e, 0x3d, 0x1e, 0x1d, 0x2d, 0x0d, 0x1d }
523};
524
525/* Cut 3.0 Long Frame Tracking CR loop */
526static struct stv090x_long_frame_crloop stv090x_s2_crl_cut30[] = {
527 /* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
528 { STV090x_QPSK_12, 0x3c, 0x2c, 0x0c, 0x2c, 0x1b, 0x2c, 0x1b, 0x1c, 0x0b, 0x3b },
529 { STV090x_QPSK_35, 0x0d, 0x0d, 0x0c, 0x0d, 0x1b, 0x3c, 0x1b, 0x1c, 0x0b, 0x3b },
530 { STV090x_QPSK_23, 0x1d, 0x0d, 0x0c, 0x1d, 0x2b, 0x3c, 0x1b, 0x1c, 0x0b, 0x3b },
531 { STV090x_QPSK_34, 0x1d, 0x1d, 0x0c, 0x1d, 0x2b, 0x3c, 0x1b, 0x1c, 0x0b, 0x3b },
532 { STV090x_QPSK_45, 0x2d, 0x1d, 0x1c, 0x1d, 0x2b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
533 { STV090x_QPSK_56, 0x2d, 0x1d, 0x1c, 0x1d, 0x2b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
534 { STV090x_QPSK_89, 0x3d, 0x2d, 0x1c, 0x1d, 0x3b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
535 { STV090x_QPSK_910, 0x3d, 0x2d, 0x1c, 0x1d, 0x3b, 0x3c, 0x2b, 0x0c, 0x1b, 0x3b },
536 { STV090x_8PSK_35, 0x39, 0x29, 0x39, 0x19, 0x19, 0x19, 0x19, 0x19, 0x09, 0x19 },
537 { STV090x_8PSK_23, 0x2a, 0x39, 0x1a, 0x0a, 0x39, 0x0a, 0x29, 0x39, 0x29, 0x0a },
538 { STV090x_8PSK_34, 0x2b, 0x3a, 0x1b, 0x1b, 0x3a, 0x1b, 0x1a, 0x0b, 0x1a, 0x3a },
539 { STV090x_8PSK_56, 0x0c, 0x1b, 0x3b, 0x3b, 0x1b, 0x3b, 0x3a, 0x3b, 0x3a, 0x1b },
540 { STV090x_8PSK_89, 0x0d, 0x3c, 0x2c, 0x2c, 0x2b, 0x0c, 0x0b, 0x3b, 0x0b, 0x1b },
541 { STV090x_8PSK_910, 0x0d, 0x0d, 0x2c, 0x3c, 0x3b, 0x1c, 0x0b, 0x3b, 0x0b, 0x1b }
542};
543
544/* Cut 2.0 Long Frame Tracking CR Loop */
545static struct stv090x_long_frame_crloop stv090x_s2_apsk_crl_cut20[] = {
546 /* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
547 { STV090x_16APSK_23, 0x0c, 0x0c, 0x0c, 0x0c, 0x1d, 0x0c, 0x3c, 0x0c, 0x2c, 0x0c },
548 { STV090x_16APSK_34, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0c, 0x2d, 0x0c, 0x1d, 0x0c },
549 { STV090x_16APSK_45, 0x0c, 0x0c, 0x0c, 0x0c, 0x1e, 0x0c, 0x3d, 0x0c, 0x2d, 0x0c },
550 { STV090x_16APSK_56, 0x0c, 0x0c, 0x0c, 0x0c, 0x1e, 0x0c, 0x3d, 0x0c, 0x2d, 0x0c },
551 { STV090x_16APSK_89, 0x0c, 0x0c, 0x0c, 0x0c, 0x2e, 0x0c, 0x0e, 0x0c, 0x3d, 0x0c },
552 { STV090x_16APSK_910, 0x0c, 0x0c, 0x0c, 0x0c, 0x2e, 0x0c, 0x0e, 0x0c, 0x3d, 0x0c },
553 { STV090x_32APSK_34, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
554 { STV090x_32APSK_45, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
555 { STV090x_32APSK_56, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
556 { STV090x_32APSK_89, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c },
557 { STV090x_32APSK_910, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c }
558};
559
560/* Cut 3.0 Long Frame Tracking CR Loop */
561static struct stv090x_long_frame_crloop stv090x_s2_apsk_crl_cut30[] = {
562 /* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
563 { STV090x_16APSK_23, 0x0a, 0x0a, 0x0a, 0x0a, 0x1a, 0x0a, 0x3a, 0x0a, 0x2a, 0x0a },
564 { STV090x_16APSK_34, 0x0a, 0x0a, 0x0a, 0x0a, 0x0b, 0x0a, 0x3b, 0x0a, 0x1b, 0x0a },
565 { STV090x_16APSK_45, 0x0a, 0x0a, 0x0a, 0x0a, 0x1b, 0x0a, 0x3b, 0x0a, 0x2b, 0x0a },
566 { STV090x_16APSK_56, 0x0a, 0x0a, 0x0a, 0x0a, 0x1b, 0x0a, 0x3b, 0x0a, 0x2b, 0x0a },
567 { STV090x_16APSK_89, 0x0a, 0x0a, 0x0a, 0x0a, 0x2b, 0x0a, 0x0c, 0x0a, 0x3b, 0x0a },
568 { STV090x_16APSK_910, 0x0a, 0x0a, 0x0a, 0x0a, 0x2b, 0x0a, 0x0c, 0x0a, 0x3b, 0x0a },
569 { STV090x_32APSK_34, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
570 { STV090x_32APSK_45, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
571 { STV090x_32APSK_56, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
572 { STV090x_32APSK_89, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a },
573 { STV090x_32APSK_910, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a }
574};
575
576static struct stv090x_long_frame_crloop stv090x_s2_lowqpsk_crl_cut20[] = {
577 /* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
578 { STV090x_QPSK_14, 0x0f, 0x3f, 0x0e, 0x3f, 0x2d, 0x2f, 0x2d, 0x1f, 0x3d, 0x3e },
579 { STV090x_QPSK_13, 0x0f, 0x3f, 0x0e, 0x3f, 0x2d, 0x2f, 0x3d, 0x0f, 0x3d, 0x2e },
580 { STV090x_QPSK_25, 0x1f, 0x3f, 0x1e, 0x3f, 0x3d, 0x1f, 0x3d, 0x3e, 0x3d, 0x2e }
581};
582
583static struct stv090x_long_frame_crloop stv090x_s2_lowqpsk_crl_cut30[] = {
584 /* MODCOD 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon 20MPoff 30MPon 30MPoff */
585 { STV090x_QPSK_14, 0x0c, 0x3c, 0x0b, 0x3c, 0x2a, 0x2c, 0x2a, 0x1c, 0x3a, 0x3b },
586 { STV090x_QPSK_13, 0x0c, 0x3c, 0x0b, 0x3c, 0x2a, 0x2c, 0x3a, 0x0c, 0x3a, 0x2b },
587 { STV090x_QPSK_25, 0x1c, 0x3c, 0x1b, 0x3c, 0x3a, 0x1c, 0x3a, 0x3b, 0x3a, 0x2b }
588};
589
590/* Cut 2.0 Short Frame Tracking CR Loop */
591static struct stv090x_short_frame_crloop stv090x_s2_short_crl_cut20[] = {
592 /* MODCOD 2M 5M 10M 20M 30M */
593 { STV090x_QPSK, 0x2f, 0x2e, 0x0e, 0x0e, 0x3d },
594 { STV090x_8PSK, 0x3e, 0x0e, 0x2d, 0x0d, 0x3c },
595 { STV090x_16APSK, 0x1e, 0x1e, 0x1e, 0x3d, 0x2d },
596 { STV090x_32APSK, 0x1e, 0x1e, 0x1e, 0x3d, 0x2d }
597};
598
599/* Cut 3.0 Short Frame Tracking CR Loop */
600static struct stv090x_short_frame_crloop stv090x_s2_short_crl_cut30[] = {
601 /* MODCOD 2M 5M 10M 20M 30M */
602 { STV090x_QPSK, 0x2C, 0x2B, 0x0B, 0x0B, 0x3A },
603 { STV090x_8PSK, 0x3B, 0x0B, 0x2A, 0x0A, 0x39 },
604 { STV090x_16APSK, 0x1B, 0x1B, 0x1B, 0x3A, 0x2A },
605 { STV090x_32APSK, 0x1B, 0x1B, 0x1B, 0x3A, 0x2A }
606};
607
608static inline s32 comp2(s32 __x, s32 __width)
609{
610 if (__width == 32)
611 return __x;
612 else
613 return (__x >= (1 << (__width - 1))) ? (__x - (1 << __width)) : __x;
614}
615
616static int stv090x_read_reg(struct stv090x_state *state, unsigned int reg)
617{
618 const struct stv090x_config *config = state->config;
619 int ret;
620
621 u8 b0[] = { reg >> 8, reg & 0xff };
622 u8 buf;
623
624 struct i2c_msg msg[] = {
625 { .addr = config->address, .flags = 0, .buf = b0, .len = 2 },
626 { .addr = config->address, .flags = I2C_M_RD, .buf = &buf, .len = 1 }
627 };
628
629 ret = i2c_transfer(state->i2c, msg, 2);
630 if (ret != 2) {
631 if (ret != -ERESTARTSYS)
632 dprintk(FE_ERROR, 1,
633 "Read error, Reg=[0x%02x], Status=%d",
634 reg, ret);
635
636 return ret < 0 ? ret : -EREMOTEIO;
637 }
638 if (unlikely(*state->verbose >= FE_DEBUGREG))
639 dprintk(FE_ERROR, 1, "Reg=[0x%02x], data=%02x",
640 reg, buf);
641
642 return (unsigned int) buf;
643}
644
645static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8 *data, u32 count)
646{
647 const struct stv090x_config *config = state->config;
648 int ret;
649 u8 buf[2 + count];
650 struct i2c_msg i2c_msg = { .addr = config->address, .flags = 0, .buf = buf, .len = 2 + count };
651
652 buf[0] = reg >> 8;
653 buf[1] = reg & 0xff;
654 memcpy(&buf[2], data, count);
655
656 if (unlikely(*state->verbose >= FE_DEBUGREG)) {
657 int i;
658
659 printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
660 for (i = 0; i < count; i++)
661 printk(" %02x", data[i]);
662 printk("\n");
663 }
664
665 ret = i2c_transfer(state->i2c, &i2c_msg, 1);
666 if (ret != 1) {
667 if (ret != -ERESTARTSYS)
668 dprintk(FE_ERROR, 1, "Reg=[0x%04x], Data=[0x%02x ...], Count=%u, Status=%d",
669 reg, data[0], count, ret);
670 return ret < 0 ? ret : -EREMOTEIO;
671 }
672
673 return 0;
674}
675
676static int stv090x_write_reg(struct stv090x_state *state, unsigned int reg, u8 data)
677{
678 return stv090x_write_regs(state, reg, &data, 1);
679}
680
681static int stv090x_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
682{
683 struct stv090x_state *state = fe->demodulator_priv;
684 u32 reg;
685
686 reg = STV090x_READ_DEMOD(state, I2CRPT);
687 if (enable) {
688 dprintk(FE_DEBUG, 1, "Enable Gate");
689 STV090x_SETFIELD_Px(reg, I2CT_ON_FIELD, 1);
690 if (STV090x_WRITE_DEMOD(state, I2CRPT, reg) < 0)
691 goto err;
692
693 } else {
694 dprintk(FE_DEBUG, 1, "Disable Gate");
695 STV090x_SETFIELD_Px(reg, I2CT_ON_FIELD, 0);
696 if ((STV090x_WRITE_DEMOD(state, I2CRPT, reg)) < 0)
697 goto err;
698 }
699 return 0;
700err:
701 dprintk(FE_ERROR, 1, "I/O error");
702 return -1;
703}
704
705static void stv090x_get_lock_tmg(struct stv090x_state *state)
706{
707 switch (state->algo) {
708 case STV090x_BLIND_SEARCH:
709 dprintk(FE_DEBUG, 1, "Blind Search");
710 if (state->srate <= 1500000) { /*10Msps< SR <=15Msps*/
711 state->DemodTimeout = 1500;
712 state->FecTimeout = 400;
713 } else if (state->srate <= 5000000) { /*10Msps< SR <=15Msps*/
714 state->DemodTimeout = 1000;
715 state->FecTimeout = 300;
716 } else { /*SR >20Msps*/
717 state->DemodTimeout = 700;
718 state->FecTimeout = 100;
719 }
720 break;
721
722 case STV090x_COLD_SEARCH:
723 case STV090x_WARM_SEARCH:
724 default:
725 dprintk(FE_DEBUG, 1, "Normal Search");
726 if (state->srate <= 1000000) { /*SR <=1Msps*/
727 state->DemodTimeout = 4500;
728 state->FecTimeout = 1700;
729 } else if (state->srate <= 2000000) { /*1Msps < SR <= 2Msps */
730 state->DemodTimeout = 2500;
731 state->FecTimeout = 1100;
732 } else if (state->srate <= 5000000) { /*2Msps < SR <= 5Msps */
733 state->DemodTimeout = 1000;
734 state->FecTimeout = 550;
735 } else if (state->srate <= 10000000) { /*5Msps < SR <= 10Msps */
736 state->DemodTimeout = 700;
737 state->FecTimeout = 250;
738 } else if (state->srate <= 20000000) { /*10Msps < SR <= 20Msps */
739 state->DemodTimeout = 400;
740 state->FecTimeout = 130;
741 } else { /*SR >20Msps*/
742 state->DemodTimeout = 300;
743 state->FecTimeout = 100;
744 }
745 break;
746 }
747
748 if (state->algo == STV090x_WARM_SEARCH)
749 state->DemodTimeout /= 2;
750}
751
752static int stv090x_set_srate(struct stv090x_state *state, u32 srate)
753{
754 u32 sym;
755
756 if (srate > 60000000) {
757 sym = (srate << 4); /* SR * 2^16 / master_clk */
758 sym /= (state->mclk >> 12);
759 } else if (srate > 6000000) {
760 sym = (srate << 6);
761 sym /= (state->mclk >> 10);
762 } else {
763 sym = (srate << 9);
764 sym /= (state->mclk >> 7);
765 }
766
767 if (STV090x_WRITE_DEMOD(state, SFRINIT1, (sym >> 8) & 0x7f) < 0) /* MSB */
768 goto err;
769 if (STV090x_WRITE_DEMOD(state, SFRINIT0, (sym & 0xff)) < 0) /* LSB */
770 goto err;
771
772 return 0;
773err:
774 dprintk(FE_ERROR, 1, "I/O error");
775 return -1;
776}
777
778static int stv090x_set_max_srate(struct stv090x_state *state, u32 clk, u32 srate)
779{
780 u32 sym;
781
782 srate = 105 * (srate / 100);
783 if (srate > 60000000) {
784 sym = (srate << 4); /* SR * 2^16 / master_clk */
785 sym /= (state->mclk >> 12);
786 } else if (srate > 6000000) {
787 sym = (srate << 6);
788 sym /= (state->mclk >> 10);
789 } else {
790 sym = (srate << 9);
791 sym /= (state->mclk >> 7);
792 }
793
794 if (sym < 0x7fff) {
795 if (STV090x_WRITE_DEMOD(state, SFRUP1, (sym >> 8) & 0x7f) < 0) /* MSB */
796 goto err;
797 if (STV090x_WRITE_DEMOD(state, SFRUP0, sym & 0xff) < 0) /* LSB */
798 goto err;
799 } else {
800 if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x7f) < 0) /* MSB */
801 goto err;
802 if (STV090x_WRITE_DEMOD(state, SFRUP0, 0xff) < 0) /* LSB */
803 goto err;
804 }
805
806 return 0;
807err:
808 dprintk(FE_ERROR, 1, "I/O error");
809 return -1;
810}
811
812static int stv090x_set_min_srate(struct stv090x_state *state, u32 clk, u32 srate)
813{
814 u32 sym;
815
816 srate = 95 * (srate / 100);
817 if (srate > 60000000) {
818 sym = (srate << 4); /* SR * 2^16 / master_clk */
819 sym /= (state->mclk >> 12);
820 } else if (srate > 6000000) {
821 sym = (srate << 6);
822 sym /= (state->mclk >> 10);
823 } else {
824 sym = (srate << 9);
825 sym /= (state->mclk >> 7);
826 }
827
828 if (STV090x_WRITE_DEMOD(state, SFRLOW1, ((sym >> 8) & 0xff)) < 0) /* MSB */
829 goto err;
830 if (STV090x_WRITE_DEMOD(state, SFRLOW0, (sym & 0xff)) < 0) /* LSB */
831 goto err;
832 return 0;
833err:
834 dprintk(FE_ERROR, 1, "I/O error");
835 return -1;
836}
837
838static u32 stv090x_car_width(u32 srate, enum stv090x_rolloff rolloff)
839{
840 u32 ro;
841
842 switch (rolloff) {
843 case STV090x_RO_20:
844 ro = 20;
845 break;
846 case STV090x_RO_25:
847 ro = 25;
848 break;
849 case STV090x_RO_35:
850 default:
851 ro = 35;
852 break;
853 }
854
855 return srate + (srate * ro) / 100;
856}
857
858static int stv090x_set_vit_thacq(struct stv090x_state *state)
859{
860 if (STV090x_WRITE_DEMOD(state, VTH12, 0x96) < 0)
861 goto err;
862 if (STV090x_WRITE_DEMOD(state, VTH23, 0x64) < 0)
863 goto err;
864 if (STV090x_WRITE_DEMOD(state, VTH34, 0x36) < 0)
865 goto err;
866 if (STV090x_WRITE_DEMOD(state, VTH56, 0x23) < 0)
867 goto err;
868 if (STV090x_WRITE_DEMOD(state, VTH67, 0x1e) < 0)
869 goto err;
870 if (STV090x_WRITE_DEMOD(state, VTH78, 0x19) < 0)
871 goto err;
872 return 0;
873err:
874 dprintk(FE_ERROR, 1, "I/O error");
875 return -1;
876}
877
878static int stv090x_set_vit_thtracq(struct stv090x_state *state)
879{
880 if (STV090x_WRITE_DEMOD(state, VTH12, 0xd0) < 0)
881 goto err;
882 if (STV090x_WRITE_DEMOD(state, VTH23, 0x7d) < 0)
883 goto err;
884 if (STV090x_WRITE_DEMOD(state, VTH34, 0x53) < 0)
885 goto err;
886 if (STV090x_WRITE_DEMOD(state, VTH56, 0x2f) < 0)
887 goto err;
888 if (STV090x_WRITE_DEMOD(state, VTH67, 0x24) < 0)
889 goto err;
890 if (STV090x_WRITE_DEMOD(state, VTH78, 0x1f) < 0)
891 goto err;
892 return 0;
893err:
894 dprintk(FE_ERROR, 1, "I/O error");
895 return -1;
896}
897
898static int stv090x_set_viterbi(struct stv090x_state *state)
899{
900 switch (state->search_mode) {
901 case STV090x_SEARCH_AUTO:
902 if (STV090x_WRITE_DEMOD(state, FECM, 0x10) < 0) /* DVB-S and DVB-S2 */
903 goto err;
904 if (STV090x_WRITE_DEMOD(state, PRVIT, 0x3f) < 0) /* all puncture rate */
905 goto err;
906 break;
907 case STV090x_SEARCH_DVBS1:
908 if (STV090x_WRITE_DEMOD(state, FECM, 0x00) < 0) /* disable DSS */
909 goto err;
910 switch (state->fec) {
911 case STV090x_PR12:
912 if (STV090x_WRITE_DEMOD(state, PRVIT, 0x01) < 0)
913 goto err;
914 break;
915
916 case STV090x_PR23:
917 if (STV090x_WRITE_DEMOD(state, PRVIT, 0x02) < 0)
918 goto err;
919 break;
920
921 case STV090x_PR34:
922 if (STV090x_WRITE_DEMOD(state, PRVIT, 0x04) < 0)
923 goto err;
924 break;
925
926 case STV090x_PR56:
927 if (STV090x_WRITE_DEMOD(state, PRVIT, 0x08) < 0)
928 goto err;
929 break;
930
931 case STV090x_PR78:
932 if (STV090x_WRITE_DEMOD(state, PRVIT, 0x20) < 0)
933 goto err;
934 break;
935
936 default:
937 if (STV090x_WRITE_DEMOD(state, PRVIT, 0x2f) < 0) /* all */
938 goto err;
939 break;
940 }
941 break;
942 case STV090x_SEARCH_DSS:
943 if (STV090x_WRITE_DEMOD(state, FECM, 0x80) < 0)
944 goto err;
945 switch (state->fec) {
946 case STV090x_PR12:
947 if (STV090x_WRITE_DEMOD(state, PRVIT, 0x01) < 0)
948 goto err;
949 break;
950
951 case STV090x_PR23:
952 if (STV090x_WRITE_DEMOD(state, PRVIT, 0x02) < 0)
953 goto err;
954 break;
955
956 case STV090x_PR67:
957 if (STV090x_WRITE_DEMOD(state, PRVIT, 0x10) < 0)
958 goto err;
959 break;
960
961 default:
962 if (STV090x_WRITE_DEMOD(state, PRVIT, 0x13) < 0) /* 1/2, 2/3, 6/7 */
963 goto err;
964 break;
965 }
966 break;
967 default:
968 break;
969 }
970 return 0;
971err:
972 dprintk(FE_ERROR, 1, "I/O error");
973 return -1;
974}
975
976static int stv090x_stop_modcod(struct stv090x_state *state)
977{
978 if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
979 goto err;
980 if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xff) < 0)
981 goto err;
982 if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0xff) < 0)
983 goto err;
984 if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0xff) < 0)
985 goto err;
986 if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0xff) < 0)
987 goto err;
988 if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0xff) < 0)
989 goto err;
990 if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0xff) < 0)
991 goto err;
992 if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0xff) < 0)
993 goto err;
994 if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0xff) < 0)
995 goto err;
996 if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0xff) < 0)
997 goto err;
998 if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0xff) < 0)
999 goto err;
1000 if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0xff) < 0)
1001 goto err;
1002 if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0xff) < 0)
1003 goto err;
1004 if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0xff) < 0)
1005 goto err;
1006 if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0xff) < 0)
1007 goto err;
1008 if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0xff) < 0)
1009 goto err;
1010 return 0;
1011err:
1012 dprintk(FE_ERROR, 1, "I/O error");
1013 return -1;
1014}
1015
1016static int stv090x_activate_modcod(struct stv090x_state *state)
1017{
1018 if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
1019 goto err;
1020 if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xfc) < 0)
1021 goto err;
1022 if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0xcc) < 0)
1023 goto err;
1024 if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0xcc) < 0)
1025 goto err;
1026 if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0xcc) < 0)
1027 goto err;
1028 if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0xcc) < 0)
1029 goto err;
1030 if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0xcc) < 0)
1031 goto err;
1032 if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0xcc) < 0)
1033 goto err;
1034 if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0xcc) < 0)
1035 goto err;
1036 if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0xcc) < 0)
1037 goto err;
1038 if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0xcc) < 0)
1039 goto err;
1040 if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0xcc) < 0)
1041 goto err;
1042 if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0xcc) < 0)
1043 goto err;
1044 if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0xcc) < 0)
1045 goto err;
1046 if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0xcc) < 0)
1047 goto err;
1048 if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0xcf) < 0)
1049 goto err;
1050
1051 return 0;
1052err:
1053 dprintk(FE_ERROR, 1, "I/O error");
1054 return -1;
1055}
1056
1057static int stv090x_activate_modcod_single(struct stv090x_state *state)
1058{
1059
1060 if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
1061 goto err;
1062 if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xf0) < 0)
1063 goto err;
1064 if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0x00) < 0)
1065 goto err;
1066 if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0x00) < 0)
1067 goto err;
1068 if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0x00) < 0)
1069 goto err;
1070 if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0x00) < 0)
1071 goto err;
1072 if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0x00) < 0)
1073 goto err;
1074 if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0x00) < 0)
1075 goto err;
1076 if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0x00) < 0)
1077 goto err;
1078 if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0x00) < 0)
1079 goto err;
1080 if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0x00) < 0)
1081 goto err;
1082 if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0x00) < 0)
1083 goto err;
1084 if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0x00) < 0)
1085 goto err;
1086 if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0x00) < 0)
1087 goto err;
1088 if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0x00) < 0)
1089 goto err;
1090 if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0x0f) < 0)
1091 goto err;
1092
1093 return 0;
1094
1095err:
1096 dprintk(FE_ERROR, 1, "I/O error");
1097 return -1;
1098}
1099
1100static int stv090x_vitclk_ctl(struct stv090x_state *state, int enable)
1101{
1102 u32 reg;
1103
1104 switch (state->demod) {
1105 case STV090x_DEMODULATOR_0:
1106 mutex_lock(&demod_lock);
1107 reg = stv090x_read_reg(state, STV090x_STOPCLK2);
1108 STV090x_SETFIELD(reg, STOP_CLKVIT1_FIELD, enable);
1109 if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
1110 goto err;
1111 mutex_unlock(&demod_lock);
1112 break;
1113
1114 case STV090x_DEMODULATOR_1:
1115 mutex_lock(&demod_lock);
1116 reg = stv090x_read_reg(state, STV090x_STOPCLK2);
1117 STV090x_SETFIELD(reg, STOP_CLKVIT2_FIELD, enable);
1118 if (stv090x_write_reg(state, STV090x_STOPCLK2, reg) < 0)
1119 goto err;
1120 mutex_unlock(&demod_lock);
1121 break;
1122
1123 default:
1124 dprintk(FE_ERROR, 1, "Wrong demodulator!");
1125 break;
1126 }
1127 return 0;
1128err:
1129 mutex_unlock(&demod_lock);
1130 dprintk(FE_ERROR, 1, "I/O error");
1131 return -1;
1132}
1133
1134static int stv090x_dvbs_track_crl(struct stv090x_state *state)
1135{
1136 if (state->dev_ver >= 0x30) {
1137 /* Set ACLC BCLC optimised value vs SR */
1138 if (state->srate >= 15000000) {
1139 if (STV090x_WRITE_DEMOD(state, ACLC, 0x2b) < 0)
1140 goto err;
1141 if (STV090x_WRITE_DEMOD(state, BCLC, 0x1a) < 0)
1142 goto err;
1143 } else if ((state->srate >= 7000000) && (15000000 > state->srate)) {
1144 if (STV090x_WRITE_DEMOD(state, ACLC, 0x0c) < 0)
1145 goto err;
1146 if (STV090x_WRITE_DEMOD(state, BCLC, 0x1b) < 0)
1147 goto err;
1148 } else if (state->srate < 7000000) {
1149 if (STV090x_WRITE_DEMOD(state, ACLC, 0x2c) < 0)
1150 goto err;
1151 if (STV090x_WRITE_DEMOD(state, BCLC, 0x1c) < 0)
1152 goto err;
1153 }
1154
1155 } else {
1156 /* Cut 2.0 */
1157 if (STV090x_WRITE_DEMOD(state, ACLC, 0x1a) < 0)
1158 goto err;
1159 if (STV090x_WRITE_DEMOD(state, BCLC, 0x09) < 0)
1160 goto err;
1161 }
1162 return 0;
1163err:
1164 dprintk(FE_ERROR, 1, "I/O error");
1165 return -1;
1166}
1167
1168static int stv090x_delivery_search(struct stv090x_state *state)
1169{
1170 u32 reg;
1171
1172 switch (state->search_mode) {
1173 case STV090x_SEARCH_DVBS1:
1174 case STV090x_SEARCH_DSS:
1175 reg = STV090x_READ_DEMOD(state, DMDCFGMD);
1176 STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
1177 STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
1178 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
1179 goto err;
1180
1181 /* Activate Viterbi decoder in legacy search,
1182 * do not use FRESVIT1, might impact VITERBI2
1183 */
1184 if (stv090x_vitclk_ctl(state, 0) < 0)
1185 goto err;
1186
1187 if (stv090x_dvbs_track_crl(state) < 0)
1188 goto err;
1189
1190 if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x22) < 0) /* disable DVB-S2 */
1191 goto err;
1192
1193 if (stv090x_set_vit_thacq(state) < 0)
1194 goto err;
1195 if (stv090x_set_viterbi(state) < 0)
1196 goto err;
1197 break;
1198
1199 case STV090x_SEARCH_DVBS2:
1200 reg = STV090x_READ_DEMOD(state, DMDCFGMD);
1201 STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 0);
1202 STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
1203 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
1204 goto err;
1205 STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
1206 STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
1207 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
1208 goto err;
1209
1210 if (stv090x_vitclk_ctl(state, 1) < 0)
1211 goto err;
1212
1213 if (STV090x_WRITE_DEMOD(state, ACLC, 0x1a) < 0) /* stop DVB-S CR loop */
1214 goto err;
1215 if (STV090x_WRITE_DEMOD(state, BCLC, 0x09) < 0)
1216 goto err;
1217
1218 if (state->dev_ver <= 0x20) {
1219 /* enable S2 carrier loop */
1220 if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x26) < 0)
1221 goto err;
1222 } else {
1223 /* > Cut 3: Stop carrier 3 */
1224 if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x66) < 0)
1225 goto err;
1226 }
1227
1228 if (state->demod_mode != STV090x_SINGLE) {
1229 /* Cut 2: enable link during search */
1230 if (stv090x_activate_modcod(state) < 0)
1231 goto err;
1232 } else {
1233 /* Single demodulator
1234 * Authorize SHORT and LONG frames,
1235 * QPSK, 8PSK, 16APSK and 32APSK
1236 */
1237 if (stv090x_activate_modcod_single(state) < 0)
1238 goto err;
1239 }
1240
1241 break;
1242
1243 case STV090x_SEARCH_AUTO:
1244 default:
1245 /* enable DVB-S2 and DVB-S2 in Auto MODE */
1246 reg = STV090x_READ_DEMOD(state, DMDCFGMD);
1247 STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
1248 STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
1249 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
1250 goto err;
1251
1252 if (stv090x_vitclk_ctl(state, 0) < 0)
1253 goto err;
1254
1255 if (stv090x_dvbs_track_crl(state) < 0)
1256 goto err;
1257
1258 if (state->dev_ver <= 0x20) {
1259 /* enable S2 carrier loop */
1260 if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x26) < 0)
1261 goto err;
1262 } else {
1263 /* > Cut 3: Stop carrier 3 */
1264 if (STV090x_WRITE_DEMOD(state, CAR2CFG, 0x66) < 0)
1265 goto err;
1266 }
1267
1268 if (state->demod_mode != STV090x_SINGLE) {
1269 /* Cut 2: enable link during search */
1270 if (stv090x_activate_modcod(state) < 0)
1271 goto err;
1272 } else {
1273 /* Single demodulator
1274 * Authorize SHORT and LONG frames,
1275 * QPSK, 8PSK, 16APSK and 32APSK
1276 */
1277 if (stv090x_activate_modcod_single(state) < 0)
1278 goto err;
1279 }
1280
1281 if (state->srate >= 2000000) {
1282 /* Srate >= 2MSPS, Viterbi threshold to acquire */
1283 if (stv090x_set_vit_thacq(state) < 0)
1284 goto err;
1285 } else {
1286 /* Srate < 2MSPS, Reset Viterbi thresholdto track
1287 * and then re-acquire
1288 */
1289 if (stv090x_set_vit_thtracq(state) < 0)
1290 goto err;
1291 }
1292
1293 if (stv090x_set_viterbi(state) < 0)
1294 goto err;
1295 break;
1296 }
1297 return 0;
1298err:
1299 dprintk(FE_ERROR, 1, "I/O error");
1300 return -1;
1301}
1302
1303static int stv090x_start_search(struct stv090x_state *state)
1304{
1305 u32 reg, freq_abs;
1306 s16 freq;
1307
1308 /* Reset demodulator */
1309 reg = STV090x_READ_DEMOD(state, DMDISTATE);
1310 STV090x_SETFIELD_Px(reg, I2C_DEMOD_MODE_FIELD, 0x1f);
1311 if (STV090x_WRITE_DEMOD(state, DMDISTATE, reg) < 0)
1312 goto err;
1313
1314 if (state->dev_ver <= 0x20) {
1315 if (state->srate <= 5000000) {
1316 if (STV090x_WRITE_DEMOD(state, CARCFG, 0x44) < 0)
1317 goto err;
1318 if (STV090x_WRITE_DEMOD(state, CFRUP1, 0x0f) < 0)
1319 goto err;
1320 if (STV090x_WRITE_DEMOD(state, CFRUP1, 0xff) < 0)
1321 goto err;
1322 if (STV090x_WRITE_DEMOD(state, CFRLOW1, 0xf0) < 0)
1323 goto err;
1324 if (STV090x_WRITE_DEMOD(state, CFRLOW0, 0x00) < 0)
1325 goto err;
1326
1327 /*enlarge the timing bandwith for Low SR*/
1328 if (STV090x_WRITE_DEMOD(state, RTCS2, 0x68) < 0)
1329 goto err;
1330 } else {
1331 /* If the symbol rate is >5 Msps
1332 Set The carrier search up and low to auto mode */
1333 if (STV090x_WRITE_DEMOD(state, CARCFG, 0xc4) < 0)
1334 goto err;
1335 /*reduce the timing bandwith for high SR*/
1336 if (STV090x_WRITE_DEMOD(state, RTCS2, 0x44) < 0)
1337 goto err;
1338 }
1339 } else {
1340 /* >= Cut 3 */
1341 if (state->srate <= 5000000) {
1342 /* enlarge the timing bandwith for Low SR */
1343 STV090x_WRITE_DEMOD(state, RTCS2, 0x68);
1344 } else {
1345 /* reduce timing bandwith for high SR */
1346 STV090x_WRITE_DEMOD(state, RTCS2, 0x44);
1347 }
1348
1349 /* Set CFR min and max to manual mode */
1350 STV090x_WRITE_DEMOD(state, CARCFG, 0x46);
1351
1352 if (state->algo == STV090x_WARM_SEARCH) {
1353 /* WARM Start
1354 * CFR min = -1MHz,
1355 * CFR max = +1MHz
1356 */
1357 freq_abs = 1000 << 16;
1358 freq_abs /= (state->mclk / 1000);
1359 freq = (s16) freq_abs;
1360 } else {
1361 /* COLD Start
1362 * CFR min =- (SearchRange / 2 + 600KHz)
1363 * CFR max = +(SearchRange / 2 + 600KHz)
1364 * (600KHz for the tuner step size)
1365 */
1366 freq_abs = (state->search_range / 2000) + 600;
1367 freq_abs = freq_abs << 16;
1368 freq_abs /= (state->mclk / 1000);
1369 freq = (s16) freq_abs;
1370 }
1371
1372 if (STV090x_WRITE_DEMOD(state, CFRUP1, MSB(freq)) < 0)
1373 goto err;
1374 if (STV090x_WRITE_DEMOD(state, CFRUP1, LSB(freq)) < 0)
1375 goto err;
1376
1377 freq *= -1;
1378
1379 if (STV090x_WRITE_DEMOD(state, CFRLOW1, MSB(freq)) < 0)
1380 goto err;
1381 if (STV090x_WRITE_DEMOD(state, CFRLOW0, LSB(freq)) < 0)
1382 goto err;
1383
1384 }
1385
1386 if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0) < 0)
1387 goto err;
1388 if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0) < 0)
1389 goto err;
1390
1391 if (state->dev_ver >= 0x20) {
1392 if (STV090x_WRITE_DEMOD(state, EQUALCFG, 0x41) < 0)
1393 goto err;
1394 if (STV090x_WRITE_DEMOD(state, FFECFG, 0x41) < 0)
1395 goto err;
1396
1397 if ((state->search_mode == STV090x_DVBS1) ||
1398 (state->search_mode == STV090x_DSS) ||
1399 (state->search_mode == STV090x_SEARCH_AUTO)) {
1400
1401 if (STV090x_WRITE_DEMOD(state, VITSCALE, 0x82) < 0)
1402 goto err;
1403 if (STV090x_WRITE_DEMOD(state, VAVSRVIT, 0x00) < 0)
1404 goto err;
1405 }
1406 }
1407
1408 if (STV090x_WRITE_DEMOD(state, SFRSTEP, 0x00) < 0)
1409 goto err;
1410 if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0xe0) < 0)
1411 goto err;
1412 if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0xc0) < 0)
1413 goto err;
1414
1415 reg = STV090x_READ_DEMOD(state, DMDCFGMD);
1416 STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 0);
1417 STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0);
1418 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
1419 goto err;
1420 reg = STV090x_READ_DEMOD(state, DMDCFG2);
1421 STV090x_SETFIELD_Px(reg, S1S2_SEQUENTIAL_FIELD, 0x0);
1422 if (STV090x_WRITE_DEMOD(state, DMDCFG2, reg) < 0)
1423 goto err;
1424
1425 if (state->dev_ver >= 0x20) {
1426 /*Frequency offset detector setting*/
1427 if (state->srate < 2000000) {
1428 if (state->dev_ver <= 0x20) {
1429 /* Cut 2 */
1430 if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x39) < 0)
1431 goto err;
1432 } else {
1433 /* Cut 2 */
1434 if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x89) < 0)
1435 goto err;
1436 }
1437 if (STV090x_WRITE_DEMOD(state, CARHDR, 0x40) < 0)
1438 goto err;
1439 }
1440
1441 if (state->srate < 10000000) {
1442 if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x4c) < 0)
1443 goto err;
1444 } else {
1445 if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x4b) < 0)
1446 goto err;
1447 }
1448 } else {
1449 if (state->srate < 10000000) {
1450 if (STV090x_WRITE_DEMOD(state, CARFREQ, 0xef) < 0)
1451 goto err;
1452 } else {
1453 if (STV090x_WRITE_DEMOD(state, CARFREQ, 0xed) < 0)
1454 goto err;
1455 }
1456 }
1457
1458 switch (state->algo) {
1459 case STV090x_WARM_SEARCH:
1460 /* The symbol rate and the exact
1461 * carrier Frequency are known
1462 */
1463 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
1464 goto err;
1465 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
1466 goto err;
1467 break;
1468
1469 case STV090x_COLD_SEARCH:
1470 /* The symbol rate is known */
1471 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
1472 goto err;
1473 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
1474 goto err;
1475 break;
1476
1477 default:
1478 break;
1479 }
1480 return 0;
1481err:
1482 dprintk(FE_ERROR, 1, "I/O error");
1483 return -1;
1484}
1485
1486static int stv090x_get_agc2_min_level(struct stv090x_state *state)
1487{
1488 u32 agc2_min = 0, agc2 = 0, freq_init, freq_step, reg;
1489 s32 i, j, steps, dir;
1490
1491 if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
1492 goto err;
1493 reg = STV090x_READ_DEMOD(state, DMDCFGMD);
1494 STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 1);
1495 STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 1);
1496 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
1497 goto err;
1498
1499 if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x83) < 0) /* SR = 65 Msps Max */
1500 goto err;
1501 if (STV090x_WRITE_DEMOD(state, SFRUP0, 0xc0) < 0)
1502 goto err;
1503 if (STV090x_WRITE_DEMOD(state, SFRLOW1, 0x82) < 0) /* SR= 400 ksps Min */
1504 goto err;
1505 if (STV090x_WRITE_DEMOD(state, SFRLOW0, 0xa0) < 0)
1506 goto err;
1507 if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x00) < 0) /* stop acq @ coarse carrier state */
1508 goto err;
1509 if (stv090x_set_srate(state, 1000000) < 0)
1510 goto err;
1511
1512 steps = -1 + state->search_range / 1000000;
1513 steps /= 2;
1514 steps = (2 * steps) + 1;
1515 if (steps < 0)
1516 steps = 1;
1517
1518 dir = 1;
1519 freq_step = (1000000 * 256) / (state->mclk / 256);
1520 freq_init = 0;
1521
1522 for (i = 0; i < steps; i++) {
1523 if (dir > 0)
1524 freq_init = freq_init + (freq_step * i);
1525 else
1526 freq_init = freq_init - (freq_step * i);
1527
1528 dir = -1;
1529
1530 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x5c) < 0) /* Demod RESET */
1531 goto err;
1532 if (STV090x_WRITE_DEMOD(state, CFRINIT1, (freq_init >> 8) & 0xff) < 0)
1533 goto err;
1534 if (STV090x_WRITE_DEMOD(state, CFRINIT0, freq_init & 0xff) < 0)
1535 goto err;
1536 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x58) < 0) /* Demod RESET */
1537 goto err;
1538 msleep(10);
1539 for (j = 0; j < 10; j++) {
1540 agc2 += STV090x_READ_DEMOD(state, AGC2I1) << 8;
1541 agc2 |= STV090x_READ_DEMOD(state, AGC2I0);
1542 }
1543 agc2 /= 10;
1544 agc2_min = 0xffff;
1545 if (agc2 < 0xffff)
1546 agc2_min = agc2;
1547 }
1548
1549 return agc2_min;
1550err:
1551 dprintk(FE_ERROR, 1, "I/O error");
1552 return -1;
1553}
1554
1555static u32 stv090x_get_srate(struct stv090x_state *state, u32 clk)
1556{
1557 u8 r3, r2, r1, r0;
1558 s32 srate, int_1, int_2, tmp_1, tmp_2;
1559
1560 r3 = STV090x_READ_DEMOD(state, SFR3);
1561 r2 = STV090x_READ_DEMOD(state, SFR2);
1562 r1 = STV090x_READ_DEMOD(state, SFR1);
1563 r0 = STV090x_READ_DEMOD(state, SFR0);
1564
1565 srate = ((r3 << 24) | (r2 << 16) | (r1 << 8) | r0);
1566
1567 int_1 = clk >> 16;
1568 int_2 = srate >> 16;
1569
1570 tmp_1 = clk % 0x10000;
1571 tmp_2 = srate % 0x10000;
1572
1573 srate = (int_1 * int_2) +
1574 ((int_1 * tmp_2) >> 16) +
1575 ((int_2 * tmp_1) >> 16);
1576
1577 return srate;
1578}
1579
1580static u32 stv090x_srate_srch_coarse(struct stv090x_state *state)
1581{
1582 struct dvb_frontend *fe = &state->frontend;
1583
1584 int tmg_lock = 0, i;
1585 s32 tmg_cpt = 0, dir = 1, steps, cur_step = 0, freq;
1586 u32 srate_coarse = 0, agc2 = 0, car_step = 1200, reg;
1587
1588 reg = STV090x_READ_DEMOD(state, DMDISTATE);
1589 STV090x_SETFIELD_Px(reg, I2C_DEMOD_MODE_FIELD, 0x1f); /* Demod RESET */
1590 if (STV090x_WRITE_DEMOD(state, DMDISTATE, reg) < 0)
1591 goto err;
1592 if (STV090x_WRITE_DEMOD(state, TMGCFG, 0x12) < 0)
1593 goto err;
1594 if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0xf0) < 0)
1595 goto err;
1596 if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0xe0) < 0)
1597 goto err;
1598 reg = STV090x_READ_DEMOD(state, DMDCFGMD);
1599 STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 1);
1600 STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 1);
1601 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
1602 goto err;
1603
1604 if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x83) < 0)
1605 goto err;
1606 if (STV090x_WRITE_DEMOD(state, SFRUP0, 0xc0) < 0)
1607 goto err;
1608 if (STV090x_WRITE_DEMOD(state, SFRLOW1, 0x82) < 0)
1609 goto err;
1610 if (STV090x_WRITE_DEMOD(state, SFRLOW0, 0xa0) < 0)
1611 goto err;
1612 if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x00) < 0)
1613 goto err;
1614 if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x60) < 0)
1615 goto err;
1616
1617 if (state->dev_ver >= 0x30) {
1618 if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x99) < 0)
1619 goto err;
1620 if (STV090x_WRITE_DEMOD(state, SFRSTEP, 0x95) < 0)
1621 goto err;
1622
1623 } else if (state->dev_ver >= 0x20) {
1624 if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x6a) < 0)
1625 goto err;
1626 if (STV090x_WRITE_DEMOD(state, SFRSTEP, 0x95) < 0)
1627 goto err;
1628 }
1629
1630 if (state->srate <= 2000000)
1631 car_step = 1000;
1632 else if (state->srate <= 5000000)
1633 car_step = 2000;
1634 else if (state->srate <= 12000000)
1635 car_step = 3000;
1636 else
1637 car_step = 5000;
1638
1639 steps = -1 + ((state->search_range / 1000) / car_step);
1640 steps /= 2;
1641 steps = (2 * steps) + 1;
1642 if (steps < 0)
1643 steps = 1;
1644 else if (steps > 10) {
1645 steps = 11;
1646 car_step = (state->search_range / 1000) / 10;
1647 }
1648 cur_step = 0;
1649 dir = 1;
1650 freq = state->frequency;
1651
1652 while ((!tmg_lock) && (cur_step < steps)) {
1653 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x5f) < 0) /* Demod RESET */
1654 goto err;
1655 reg = STV090x_READ_DEMOD(state, DMDISTATE);
1656 STV090x_SETFIELD_Px(reg, I2C_DEMOD_MODE_FIELD, 0x00); /* trigger acquisition */
1657 if (STV090x_WRITE_DEMOD(state, DMDISTATE, reg) < 0)
1658 goto err;
1659 msleep(50);
1660 for (i = 0; i < 10; i++) {
1661 reg = STV090x_READ_DEMOD(state, DSTATUS);
1662 if (STV090x_GETFIELD_Px(reg, TMGLOCK_QUALITY_FIELD) >= 2)
1663 tmg_cpt++;
1664 agc2 += STV090x_READ_DEMOD(state, AGC2I1) << 8;
1665 agc2 |= STV090x_READ_DEMOD(state, AGC2I0);
1666 }
1667 agc2 /= 10;
1668 srate_coarse = stv090x_get_srate(state, state->mclk);
1669 cur_step++;
1670 dir *= -1;
1671 if ((tmg_cpt >= 5) && (agc2 < 0x1f00) && (srate_coarse < 55000000) && (srate_coarse > 850000))
1672 tmg_lock = 1;
1673 else if (cur_step < steps) {
1674 if (dir > 0)
1675 freq += cur_step * car_step;
1676 else
1677 freq -= cur_step * car_step;
1678
1679 /* Setup tuner */
1680 if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
1681 goto err;
1682
1683 if (state->config->tuner_set_frequency) {
1684 if (state->config->tuner_set_frequency(fe, state->frequency) < 0)
1685 goto err;
1686 }
1687
1688 if (state->config->tuner_set_bandwidth) {
1689 if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
1690 goto err;
1691 }
1692
1693 if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
1694 goto err;
1695
1696 msleep(50);
1697
1698 if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
1699 goto err;
1700
1701 if (state->config->tuner_get_status) {
1702 if (state->config->tuner_get_status(fe, &reg) < 0)
1703 goto err;
1704 }
1705
1706 if (reg)
1707 dprintk(FE_DEBUG, 1, "Tuner phase locked");
1708 else
1709 dprintk(FE_DEBUG, 1, "Tuner unlocked");
1710
1711 if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
1712 goto err;
1713
1714 }
1715 }
1716 if (!tmg_lock)
1717 srate_coarse = 0;
1718 else
1719 srate_coarse = stv090x_get_srate(state, state->mclk);
1720
1721 return srate_coarse;
1722err:
1723 dprintk(FE_ERROR, 1, "I/O error");
1724 return -1;
1725}
1726
1727static u32 stv090x_srate_srch_fine(struct stv090x_state *state)
1728{
1729 u32 srate_coarse, freq_coarse, sym, reg;
1730
1731 srate_coarse = stv090x_get_srate(state, state->mclk);
1732 freq_coarse = STV090x_READ_DEMOD(state, CFR2) << 8;
1733 freq_coarse |= STV090x_READ_DEMOD(state, CFR1);
1734 sym = 13 * (srate_coarse / 10); /* SFRUP = SFR + 30% */
1735
1736 if (sym < state->srate)
1737 srate_coarse = 0;
1738 else {
1739 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0) /* Demod RESET */
1740 goto err;
1741 if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0x01) < 0)
1742 goto err;
1743 if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0x20) < 0)
1744 goto err;
1745 if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0x00) < 0)
1746 goto err;
1747 if (STV090x_WRITE_DEMOD(state, TMGCFG, 0xd2) < 0)
1748 goto err;
1749 reg = STV090x_READ_DEMOD(state, DMDCFGMD);
1750 STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0x00);
1751 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
1752 goto err;
1753
1754 if (state->dev_ver >= 0x30) {
1755 if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x79) < 0)
1756 goto err;
1757 } else if (state->dev_ver >= 0x20) {
1758 if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x49) < 0)
1759 goto err;
1760 }
1761
1762 if (srate_coarse > 3000000) {
1763 sym = 13 * (srate_coarse / 10); /* SFRUP = SFR + 30% */
1764 sym = (sym / 1000) * 65536;
1765 sym /= (state->mclk / 1000);
1766 if (STV090x_WRITE_DEMOD(state, SFRUP1, (sym >> 8) & 0x7f) < 0)
1767 goto err;
1768 if (STV090x_WRITE_DEMOD(state, SFRUP0, sym & 0xff) < 0)
1769 goto err;
1770 sym = 10 * (srate_coarse / 13); /* SFRLOW = SFR - 30% */
1771 sym = (sym / 1000) * 65536;
1772 sym /= (state->mclk / 1000);
1773 if (STV090x_WRITE_DEMOD(state, SFRLOW1, (sym >> 8) & 0x7f) < 0)
1774 goto err;
1775 if (STV090x_WRITE_DEMOD(state, SFRLOW0, sym & 0xff) < 0)
1776 goto err;
1777 sym = (srate_coarse / 1000) * 65536;
1778 sym /= (state->mclk / 1000);
1779 if (STV090x_WRITE_DEMOD(state, SFRINIT1, (sym >> 8) & 0xff) < 0)
1780 goto err;
1781 if (STV090x_WRITE_DEMOD(state, SFRINIT0, sym & 0xff) < 0)
1782 goto err;
1783 } else {
1784 sym = 13 * (srate_coarse / 10); /* SFRUP = SFR + 30% */
1785 sym = (sym / 100) * 65536;
1786 sym /= (state->mclk / 100);
1787 if (STV090x_WRITE_DEMOD(state, SFRUP1, (sym >> 8) & 0x7f) < 0)
1788 goto err;
1789 if (STV090x_WRITE_DEMOD(state, SFRUP0, sym & 0xff) < 0)
1790 goto err;
1791 sym = 10 * (srate_coarse / 14); /* SFRLOW = SFR - 30% */
1792 sym = (sym / 100) * 65536;
1793 sym /= (state->mclk / 100);
1794 if (STV090x_WRITE_DEMOD(state, SFRLOW1, (sym >> 8) & 0x7f) < 0)
1795 goto err;
1796 if (STV090x_WRITE_DEMOD(state, SFRLOW0, sym & 0xff) < 0)
1797 goto err;
1798 sym = (srate_coarse / 100) * 65536;
1799 sym /= (state->mclk / 100);
1800 if (STV090x_WRITE_DEMOD(state, SFRINIT1, (sym >> 8) & 0xff) < 0)
1801 goto err;
1802 if (STV090x_WRITE_DEMOD(state, SFRINIT0, sym & 0xff) < 0)
1803 goto err;
1804 }
1805 if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x20) < 0)
1806 goto err;
1807 if (STV090x_WRITE_DEMOD(state, CFRINIT1, (freq_coarse >> 8) & 0xff) < 0)
1808 goto err;
1809 if (STV090x_WRITE_DEMOD(state, CFRINIT0, freq_coarse & 0xff) < 0)
1810 goto err;
1811 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0) /* trigger acquisition */
1812 goto err;
1813 }
1814
1815 return srate_coarse;
1816
1817err:
1818 dprintk(FE_ERROR, 1, "I/O error");
1819 return -1;
1820}
1821
1822static int stv090x_get_dmdlock(struct stv090x_state *state, s32 timeout)
1823{
1824 s32 timer = 0, lock = 0;
1825 u32 reg;
1826 u8 stat;
1827
1828 while ((timer < timeout) && (!lock)) {
1829 reg = STV090x_READ_DEMOD(state, DMDSTATE);
1830 stat = STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD);
1831
1832 switch (stat) {
1833 case 0: /* searching */
1834 case 1: /* first PLH detected */
1835 default:
1836 dprintk(FE_DEBUG, 1, "Demodulator searching ..");
1837 lock = 0;
1838 break;
1839 case 2: /* DVB-S2 mode */
1840 case 3: /* DVB-S1/legacy mode */
1841 reg = STV090x_READ_DEMOD(state, DSTATUS);
1842 lock = STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD);
1843 break;
1844 }
1845
1846 if (!lock)
1847 msleep(10);
1848 else
1849 dprintk(FE_DEBUG, 1, "Demodulator acquired LOCK");
1850
1851 timer += 10;
1852 }
1853 return lock;
1854}
1855
1856static int stv090x_blind_search(struct stv090x_state *state)
1857{
1858 u32 agc2, reg, srate_coarse;
1859 s32 timeout_dmd = 500, cpt_fail, agc2_ovflw, i;
1860 u8 k_ref, k_max, k_min;
1861 int coarse_fail, lock;
1862
1863 k_max = 120;
1864 k_min = 30;
1865
1866 agc2 = stv090x_get_agc2_min_level(state);
1867
1868 if (agc2 > STV090x_SEARCH_AGC2_TH(state->dev_ver)) {
1869 lock = 0;
1870 } else {
1871
1872 if (state->dev_ver <= 0x20) {
1873 if (STV090x_WRITE_DEMOD(state, CARCFG, 0xc4) < 0)
1874 goto err;
1875 } else {
1876 /* > Cut 3 */
1877 if (STV090x_WRITE_DEMOD(state, CARCFG, 0x06) < 0)
1878 goto err;
1879 }
1880
1881 if (STV090x_WRITE_DEMOD(state, RTCS2, 0x44) < 0)
1882 goto err;
1883
1884 if (state->dev_ver >= 0x20) {
1885 if (STV090x_WRITE_DEMOD(state, EQUALCFG, 0x41) < 0)
1886 goto err;
1887 if (STV090x_WRITE_DEMOD(state, FFECFG, 0x41) < 0)
1888 goto err;
1889 if (STV090x_WRITE_DEMOD(state, VITSCALE, 0x82) < 0)
1890 goto err;
1891 if (STV090x_WRITE_DEMOD(state, VAVSRVIT, 0x00) < 0) /* set viterbi hysteresis */
1892 goto err;
1893 }
1894
1895 k_ref = k_max;
1896 do {
1897 if (STV090x_WRITE_DEMOD(state, KREFTMG, k_ref) < 0)
1898 goto err;
1899 if (stv090x_srate_srch_coarse(state) != 0) {
1900 srate_coarse = stv090x_srate_srch_fine(state);
1901 if (srate_coarse != 0) {
1902 stv090x_get_lock_tmg(state);
1903 lock = stv090x_get_dmdlock(state, timeout_dmd);
1904 } else {
1905 lock = 0;
1906 }
1907 } else {
1908 cpt_fail = 0;
1909 agc2_ovflw = 0;
1910 for (i = 0; i < 10; i++) {
1911 agc2 = STV090x_READ_DEMOD(state, AGC2I1) << 8;
1912 agc2 |= STV090x_READ_DEMOD(state, AGC2I0);
1913 if (agc2 >= 0xff00)
1914 agc2_ovflw++;
1915 reg = STV090x_READ_DEMOD(state, DSTATUS2);
1916 if ((STV090x_GETFIELD_Px(reg, CFR_OVERFLOW_FIELD) == 0x01) &&
1917 (STV090x_GETFIELD_Px(reg, DEMOD_DELOCK_FIELD) == 0x01))
1918
1919 cpt_fail++;
1920 }
1921 if ((cpt_fail > 7) || (agc2_ovflw > 7))
1922 coarse_fail = 1;
1923
1924 lock = 0;
1925 }
1926 k_ref -= 30;
1927 } while ((k_ref >= k_min) && (!lock) && (!coarse_fail));
1928 }
1929
1930 return lock;
1931
1932err:
1933 dprintk(FE_ERROR, 1, "I/O error");
1934 return -1;
1935}
1936
1937static int stv090x_chk_tmg(struct stv090x_state *state)
1938{
1939 u32 reg;
1940 s32 tmg_cpt = 0, i;
1941 u8 freq, tmg_thh, tmg_thl;
1942 int tmg_lock;
1943
1944 freq = STV090x_READ_DEMOD(state, CARFREQ);
1945 tmg_thh = STV090x_READ_DEMOD(state, TMGTHRISE);
1946 tmg_thl = STV090x_READ_DEMOD(state, TMGTHFALL);
1947 if (STV090x_WRITE_DEMOD(state, TMGTHRISE, 0x20) < 0)
1948 goto err;
1949 if (STV090x_WRITE_DEMOD(state, TMGTHFALL, 0x00) < 0)
1950 goto err;
1951
1952 reg = STV090x_READ_DEMOD(state, DMDCFGMD);
1953 STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0x00); /* stop carrier offset search */
1954 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
1955 goto err;
1956 if (STV090x_WRITE_DEMOD(state, RTC, 0x80) < 0)
1957 goto err;
1958
1959 if (STV090x_WRITE_DEMOD(state, RTCS2, 0x40) < 0)
1960 goto err;
1961 if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x00) < 0)
1962 goto err;
1963
1964 if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0x00) < 0) /* set car ofset to 0 */
1965 goto err;
1966 if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0x00) < 0)
1967 goto err;
1968 if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x65) < 0)
1969 goto err;
1970
1971 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0) /* trigger acquisition */
1972 goto err;
1973 msleep(10);
1974
1975 for (i = 0; i < 10; i++) {
1976 reg = STV090x_READ_DEMOD(state, DSTATUS);
1977 if (STV090x_GETFIELD_Px(reg, TMGLOCK_QUALITY_FIELD) >= 2)
1978 tmg_cpt++;
1979 msleep(1);
1980 }
1981 if (tmg_cpt >= 3)
1982 tmg_lock = 1;
1983
1984 if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
1985 goto err;
1986 if (STV090x_WRITE_DEMOD(state, RTC, 0x88) < 0) /* DVB-S1 timing */
1987 goto err;
1988 if (STV090x_WRITE_DEMOD(state, RTCS2, 0x68) < 0) /* DVB-S2 timing */
1989 goto err;
1990
1991 if (STV090x_WRITE_DEMOD(state, CARFREQ, freq) < 0)
1992 goto err;
1993 if (STV090x_WRITE_DEMOD(state, TMGTHRISE, tmg_thh) < 0)
1994 goto err;
1995 if (STV090x_WRITE_DEMOD(state, TMGTHFALL, tmg_thl) < 0)
1996 goto err;
1997
1998 return tmg_lock;
1999
2000err:
2001 dprintk(FE_ERROR, 1, "I/O error");
2002 return -1;
2003}
2004
2005static int stv090x_get_coldlock(struct stv090x_state *state, s32 timeout_dmd)
2006{
2007 struct dvb_frontend *fe = &state->frontend;
2008
2009 u32 reg;
2010 s32 car_step, steps, cur_step, dir, freq, timeout_lock;
2011 int lock = 0;
2012
2013 if (state->srate >= 10000000)
2014 timeout_lock = timeout_dmd / 3;
2015 else
2016 timeout_lock = timeout_dmd / 2;
2017
2018 lock = stv090x_get_dmdlock(state, timeout_lock); /* cold start wait */
2019 if (!lock) {
2020 if (state->srate >= 10000000) {
2021 if (stv090x_chk_tmg(state)) {
2022 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
2023 goto err;
2024 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
2025 goto err;
2026 lock = stv090x_get_dmdlock(state, timeout_dmd);
2027 } else {
2028 lock = 0;
2029 }
2030 } else {
2031 if (state->srate <= 4000000)
2032 car_step = 1000;
2033 else if (state->srate <= 7000000)
2034 car_step = 2000;
2035 else if (state->srate <= 10000000)
2036 car_step = 3000;
2037 else
2038 car_step = 5000;
2039
2040 steps = (state->search_range / 1000) / car_step;
2041 steps /= 2;
2042 steps = 2 * (steps + 1);
2043 if (steps < 0)
2044 steps = 2;
2045 else if (steps > 12)
2046 steps = 12;
2047
2048 cur_step = 1;
2049 dir = 1;
2050
2051 if (!lock) {
2052 freq = state->frequency;
2053 state->tuner_bw = stv090x_car_width(state->srate, state->rolloff) + state->srate;
2054 while ((cur_step <= steps) && (!lock)) {
2055 if (dir > 0)
2056 freq += cur_step * car_step;
2057 else
2058 freq -= cur_step * car_step;
2059
2060 /* Setup tuner */
2061 if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
2062 goto err;
2063
2064 if (state->config->tuner_set_frequency) {
2065 if (state->config->tuner_set_frequency(fe, state->frequency) < 0)
2066 goto err;
2067 }
2068
2069 if (state->config->tuner_set_bandwidth) {
2070 if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
2071 goto err;
2072 }
2073
2074 if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
2075 goto err;
2076
2077 msleep(50);
2078
2079 if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
2080 goto err;
2081
2082 if (state->config->tuner_get_status) {
2083 if (state->config->tuner_get_status(fe, &reg) < 0)
2084 goto err;
2085 }
2086
2087 if (reg)
2088 dprintk(FE_DEBUG, 1, "Tuner phase locked");
2089 else
2090 dprintk(FE_DEBUG, 1, "Tuner unlocked");
2091
2092 if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
2093 goto err;
2094
2095 STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1c);
2096 if (state->delsys == STV090x_DVBS2) {
2097 reg = STV090x_READ_DEMOD(state, DMDCFGMD);
2098 STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 0);
2099 STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
2100 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
2101 goto err;
2102 STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
2103 STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
2104 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
2105 goto err;
2106 }
2107 if (STV090x_WRITE_DEMOD(state, CFRINIT1, 0x00) < 0)
2108 goto err;
2109 if (STV090x_WRITE_DEMOD(state, CFRINIT0, 0x00) < 0)
2110 goto err;
2111 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
2112 goto err;
2113 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x15) < 0)
2114 goto err;
2115 lock = stv090x_get_dmdlock(state, (timeout_dmd / 3));
2116
2117 dir *= -1;
2118 cur_step++;
2119 }
2120 }
2121 }
2122 }
2123
2124 return lock;
2125
2126err:
2127 dprintk(FE_ERROR, 1, "I/O error");
2128 return -1;
2129}
2130
2131static int stv090x_get_loop_params(struct stv090x_state *state, s32 *freq_inc, s32 *timeout_sw, s32 *steps)
2132{
2133 s32 timeout, inc, steps_max, srate, car_max;
2134
2135 srate = state->srate;
2136 car_max = state->search_range / 1000;
2137 car_max += car_max / 10;
2138 car_max = 65536 * (car_max / 2);
2139 car_max /= (state->mclk / 1000);
2140
2141 if (car_max > 0x4000)
2142 car_max = 0x4000 ; /* maxcarrier should be<= +-1/4 Mclk */
2143
2144 inc = srate;
2145 inc /= state->mclk / 1000;
2146 inc *= 256;
2147 inc *= 256;
2148 inc /= 1000;
2149
2150 switch (state->search_mode) {
2151 case STV090x_SEARCH_DVBS1:
2152 case STV090x_SEARCH_DSS:
2153 inc *= 3; /* freq step = 3% of srate */
2154 timeout = 20;
2155 break;
2156
2157 case STV090x_SEARCH_DVBS2:
2158 inc *= 4;
2159 timeout = 25;
2160 break;
2161
2162 case STV090x_SEARCH_AUTO:
2163 default:
2164 inc *= 3;
2165 timeout = 25;
2166 break;
2167 }
2168 inc /= 100;
2169 if ((inc > car_max) || (inc < 0))
2170 inc = car_max / 2; /* increment <= 1/8 Mclk */
2171
2172 timeout *= 27500; /* 27.5 Msps reference */
2173 if (srate > 0)
2174 timeout /= (srate / 1000);
2175
2176 if ((timeout > 100) || (timeout < 0))
2177 timeout = 100;
2178
2179 steps_max = (car_max / inc) + 1; /* min steps = 3 */
2180 if ((steps_max > 100) || (steps_max < 0)) {
2181 steps_max = 100; /* max steps <= 100 */
2182 inc = car_max / steps_max;
2183 }
2184 *freq_inc = inc;
2185 *timeout_sw = timeout;
2186 *steps = steps_max;
2187
2188 return 0;
2189}
2190
2191static int stv090x_chk_signal(struct stv090x_state *state)
2192{
2193 s32 offst_car, agc2, car_max;
2194 int no_signal;
2195
2196 offst_car = STV090x_READ_DEMOD(state, CFR2) << 8;
2197 offst_car |= STV090x_READ_DEMOD(state, CFR1);
2198 offst_car = comp2(offst_car, 16);
2199
2200 agc2 = STV090x_READ_DEMOD(state, AGC2I1) << 8;
2201 agc2 |= STV090x_READ_DEMOD(state, AGC2I0);
2202 car_max = state->search_range / 1000;
2203
2204 car_max += (car_max / 10); /* 10% margin */
2205 car_max = (65536 * car_max / 2);
2206 car_max /= state->mclk / 1000;
2207
2208 if (car_max > 0x4000)
2209 car_max = 0x4000;
2210
2211 if ((agc2 > 0x2000) || (offst_car > 2 * car_max) || (offst_car < -2 * car_max)) {
2212 no_signal = 1;
2213 dprintk(FE_DEBUG, 1, "No Signal");
2214 } else {
2215 no_signal = 0;
2216 dprintk(FE_DEBUG, 1, "Found Signal");
2217 }
2218
2219 return no_signal;
2220}
2221
2222static int stv090x_search_car_loop(struct stv090x_state *state, s32 inc, s32 timeout, int zigzag, s32 steps_max)
2223{
2224 int no_signal, lock = 0;
2225 s32 cpt_step = 0, offst_freq, car_max;
2226 u32 reg;
2227
2228 car_max = state->search_range / 1000;
2229 car_max += (car_max / 10);
2230 car_max = (65536 * car_max / 2);
2231 car_max /= (state->mclk / 1000);
2232 if (car_max > 0x4000)
2233 car_max = 0x4000;
2234
2235 if (zigzag)
2236 offst_freq = 0;
2237 else
2238 offst_freq = -car_max + inc;
2239
2240 do {
2241 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1c) < 0)
2242 goto err;
2243 if (STV090x_WRITE_DEMOD(state, CFRINIT1, ((offst_freq / 256) & 0xff)) < 0)
2244 goto err;
2245 if (STV090x_WRITE_DEMOD(state, CFRINIT0, offst_freq & 0xff) < 0)
2246 goto err;
2247 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
2248 goto err;
2249
2250 reg = STV090x_READ_DEMOD(state, PDELCTRL1);
2251 STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0x1); /* stop DVB-S2 packet delin */
2252 if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
2253 goto err;
2254
2255 if (zigzag) {
2256 if (offst_freq >= 0)
2257 offst_freq = -offst_freq - 2 * inc;
2258 else
2259 offst_freq = -offst_freq;
2260 } else {
2261 offst_freq += 2 * inc;
2262 }
2263
2264 cpt_step++;
2265
2266 lock = stv090x_get_dmdlock(state, timeout);
2267 no_signal = stv090x_chk_signal(state);
2268
2269 } while ((!lock) &&
2270 (!no_signal) &&
2271 ((offst_freq - inc) < car_max) &&
2272 ((offst_freq + inc) > -car_max) &&
2273 (cpt_step < steps_max));
2274
2275 reg = STV090x_READ_DEMOD(state, PDELCTRL1);
2276 STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0);
2277 if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
2278 goto err;
2279
2280 return lock;
2281err:
2282 dprintk(FE_ERROR, 1, "I/O error");
2283 return -1;
2284}
2285
2286static int stv090x_sw_algo(struct stv090x_state *state)
2287{
2288 int no_signal, zigzag, lock = 0;
2289 u32 reg;
2290
2291 s32 dvbs2_fly_wheel;
2292 s32 inc, timeout_step, trials, steps_max;
2293
2294 /* get params */
2295 stv090x_get_loop_params(state, &inc, &timeout_step, &steps_max);
2296
2297 switch (state->search_mode) {
2298 case STV090x_SEARCH_DVBS1:
2299 case STV090x_SEARCH_DSS:
2300 /* accelerate the frequency detector */
2301 if (state->dev_ver >= 0x20) {
2302 if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x3B) < 0)
2303 goto err;
2304 }
2305
2306 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0x49) < 0)
2307 goto err;
2308 zigzag = 0;
2309 break;
2310
2311 case STV090x_SEARCH_DVBS2:
2312 if (state->dev_ver >= 0x20) {
2313 if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x79) < 0)
2314 goto err;
2315 }
2316
2317 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0x89) < 0)
2318 goto err;
2319 zigzag = 1;
2320 break;
2321
2322 case STV090x_SEARCH_AUTO:
2323 default:
2324 /* accelerate the frequency detector */
2325 if (state->dev_ver >= 0x20) {
2326 if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x3b) < 0)
2327 goto err;
2328 if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x79) < 0)
2329 goto err;
2330 }
2331
2332 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0xc9) < 0)
2333 goto err;
2334 zigzag = 0;
2335 break;
2336 }
2337
2338 trials = 0;
2339 do {
2340 lock = stv090x_search_car_loop(state, inc, timeout_step, zigzag, steps_max);
2341 no_signal = stv090x_chk_signal(state);
2342 trials++;
2343
2344 /*run the SW search 2 times maximum*/
2345 if (lock || no_signal || (trials == 2)) {
2346 /*Check if the demod is not losing lock in DVBS2*/
2347 if (state->dev_ver >= 0x20) {
2348 if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x49) < 0)
2349 goto err;
2350 if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x9e) < 0)
2351 goto err;
2352 }
2353
2354 reg = STV090x_READ_DEMOD(state, DMDSTATE);
2355 if ((lock) && (STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD) == STV090x_DVBS2)) {
2356 /*Check if the demod is not losing lock in DVBS2*/
2357 msleep(timeout_step);
2358 reg = STV090x_READ_DEMOD(state, DMDFLYW);
2359 dvbs2_fly_wheel = STV090x_GETFIELD_Px(reg, FLYWHEEL_CPT_FIELD);
2360 if (dvbs2_fly_wheel < 0xd) { /*if correct frames is decrementing */
2361 msleep(timeout_step);
2362 reg = STV090x_READ_DEMOD(state, DMDFLYW);
2363 dvbs2_fly_wheel = STV090x_GETFIELD_Px(reg, FLYWHEEL_CPT_FIELD);
2364 }
2365 if (dvbs2_fly_wheel < 0xd) {
2366 /*FALSE lock, The demod is loosing lock */
2367 lock = 0;
2368 if (trials < 2) {
2369 if (state->dev_ver >= 0x20) {
2370 if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x79) < 0)
2371 goto err;
2372 }
2373
2374 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, 0x89) < 0)
2375 goto err;
2376 }
2377 }
2378 }
2379 }
2380 } while ((!lock) && (trials < 2) && (!no_signal));
2381
2382 return lock;
2383err:
2384 dprintk(FE_ERROR, 1, "I/O error");
2385 return -1;
2386}
2387
2388static enum stv090x_delsys stv090x_get_std(struct stv090x_state *state)
2389{
2390 u32 reg;
2391 enum stv090x_delsys delsys;
2392
2393 reg = STV090x_READ_DEMOD(state, DMDSTATE);
2394 if (STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD) == 2)
2395 delsys = STV090x_DVBS2;
2396 else if (STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD) == 3) {
2397 reg = STV090x_READ_DEMOD(state, FECM);
2398 if (STV090x_GETFIELD_Px(reg, DSS_DVB_FIELD) == 1)
2399 delsys = STV090x_DSS;
2400 else
2401 delsys = STV090x_DVBS1;
2402 } else {
2403 delsys = STV090x_ERROR;
2404 }
2405
2406 return delsys;
2407}
2408
2409/* in Hz */
2410static s32 stv090x_get_car_freq(struct stv090x_state *state, u32 mclk)
2411{
2412 s32 derot, int_1, int_2, tmp_1, tmp_2;
2413
2414 derot = STV090x_READ_DEMOD(state, CFR2) << 16;
2415 derot |= STV090x_READ_DEMOD(state, CFR1) << 8;
2416 derot |= STV090x_READ_DEMOD(state, CFR0);
2417
2418 derot = comp2(derot, 24);
2419 int_1 = state->mclk >> 12;
2420 int_2 = derot >> 12;
2421
2422 /* carrier_frequency = MasterClock * Reg / 2^24 */
2423 tmp_1 = state->mclk % 0x1000;
2424 tmp_2 = derot % 0x1000;
2425
2426 derot = (int_1 * int_2) +
2427 ((int_1 * tmp_2) >> 12) +
2428 ((int_1 * tmp_1) >> 12);
2429
2430 return derot;
2431}
2432
2433static int stv090x_get_viterbi(struct stv090x_state *state)
2434{
2435 u32 reg, rate;
2436
2437 reg = STV090x_READ_DEMOD(state, VITCURPUN);
2438 rate = STV090x_GETFIELD_Px(reg, VIT_CURPUN_FIELD);
2439
2440 switch (rate) {
2441 case 13:
2442 state->fec = STV090x_PR12;
2443 break;
2444
2445 case 18:
2446 state->fec = STV090x_PR23;
2447 break;
2448
2449 case 21:
2450 state->fec = STV090x_PR34;
2451 break;
2452
2453 case 24:
2454 state->fec = STV090x_PR56;
2455 break;
2456
2457 case 25:
2458 state->fec = STV090x_PR67;
2459 break;
2460
2461 case 26:
2462 state->fec = STV090x_PR78;
2463 break;
2464
2465 default:
2466 state->fec = STV090x_PRERR;
2467 break;
2468 }
2469
2470 return 0;
2471}
2472
2473static enum stv090x_signal_state stv090x_get_sig_params(struct stv090x_state *state)
2474{
2475 struct dvb_frontend *fe = &state->frontend;
2476
2477 u8 tmg;
2478 u32 reg;
2479 s32 i = 0, offst_freq;
2480
2481 msleep(5);
2482
2483 if (state->algo == STV090x_BLIND_SEARCH) {
2484 tmg = STV090x_READ_DEMOD(state, TMGREG2);
2485 STV090x_WRITE_DEMOD(state, SFRSTEP, 0x5c);
2486 while ((i <= 50) && (tmg != 0) && (tmg != 0xff)) {
2487 tmg = STV090x_READ_DEMOD(state, TMGREG2);
2488 msleep(5);
2489 i += 5;
2490 }
2491 }
2492 state->delsys = stv090x_get_std(state);
2493
2494 if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
2495 goto err;
2496
2497 if (state->config->tuner_get_frequency) {
2498 if (state->config->tuner_get_frequency(fe, &state->frequency) < 0)
2499 goto err;
2500 }
2501
2502 if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
2503 goto err;
2504
2505 offst_freq = stv090x_get_car_freq(state, state->mclk) / 1000;
2506 state->frequency += offst_freq;
2507
2508 if (stv090x_get_viterbi(state) < 0)
2509 goto err;
2510
2511 reg = STV090x_READ_DEMOD(state, DMDMODCOD);
2512 state->modcod = STV090x_GETFIELD_Px(reg, DEMOD_MODCOD_FIELD);
2513 state->pilots = STV090x_GETFIELD_Px(reg, DEMOD_TYPE_FIELD) & 0x01;
2514 state->frame_len = STV090x_GETFIELD_Px(reg, DEMOD_TYPE_FIELD) >> 1;
2515 reg = STV090x_READ_DEMOD(state, TMGOBS);
2516 state->rolloff = STV090x_GETFIELD_Px(reg, ROLLOFF_STATUS_FIELD);
2517 reg = STV090x_READ_DEMOD(state, FECM);
2518 state->inversion = STV090x_GETFIELD_Px(reg, IQINV_FIELD);
2519
2520 if ((state->algo == STV090x_BLIND_SEARCH) || (state->srate < 10000000)) {
2521
2522 if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
2523 goto err;
2524
2525 if (state->config->tuner_get_frequency) {
2526 if (state->config->tuner_get_frequency(fe, &state->frequency) < 0)
2527 goto err;
2528 }
2529
2530 if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
2531 goto err;
2532
2533 if (abs(offst_freq) <= ((state->search_range / 2000) + 500))
2534 return STV090x_RANGEOK;
2535 else if (abs(offst_freq) <= (stv090x_car_width(state->srate, state->rolloff) / 2000))
2536 return STV090x_RANGEOK;
2537 else
2538 return STV090x_OUTOFRANGE; /* Out of Range */
2539 } else {
2540 if (abs(offst_freq) <= ((state->search_range / 2000) + 500))
2541 return STV090x_RANGEOK;
2542 else
2543 return STV090x_OUTOFRANGE;
2544 }
2545
2546 return STV090x_OUTOFRANGE;
2547err:
2548 dprintk(FE_ERROR, 1, "I/O error");
2549 return -1;
2550}
2551
2552static u32 stv090x_get_tmgoffst(struct stv090x_state *state, u32 srate)
2553{
2554 s32 offst_tmg;
2555
2556 offst_tmg = STV090x_READ_DEMOD(state, TMGREG2) << 16;
2557 offst_tmg |= STV090x_READ_DEMOD(state, TMGREG1) << 8;
2558 offst_tmg |= STV090x_READ_DEMOD(state, TMGREG0);
2559
2560 offst_tmg = comp2(offst_tmg, 24); /* 2's complement */
2561 if (!offst_tmg)
2562 offst_tmg = 1;
2563
2564 offst_tmg = ((s32) srate * 10) / ((s32) 0x1000000 / offst_tmg);
2565 offst_tmg /= 320;
2566
2567 return offst_tmg;
2568}
2569
2570static u8 stv090x_optimize_carloop(struct stv090x_state *state, enum stv090x_modcod modcod, s32 pilots)
2571{
2572 u8 aclc = 0x29;
2573 s32 i;
2574 struct stv090x_long_frame_crloop *car_loop, *car_loop_qpsk_low, *car_loop_apsk_low;
2575
2576 if (state->dev_ver == 0x20) {
2577 car_loop = stv090x_s2_crl_cut20;
2578 car_loop_qpsk_low = stv090x_s2_lowqpsk_crl_cut20;
2579 car_loop_apsk_low = stv090x_s2_apsk_crl_cut20;
2580 } else {
2581 /* >= Cut 3 */
2582 car_loop = stv090x_s2_crl_cut30;
2583 car_loop_qpsk_low = stv090x_s2_lowqpsk_crl_cut30;
2584 car_loop_apsk_low = stv090x_s2_apsk_crl_cut30;
2585 }
2586
2587 if (modcod < STV090x_QPSK_12) {
2588 i = 0;
2589 while ((i < 3) && (modcod != car_loop_qpsk_low[i].modcod))
2590 i++;
2591
2592 if (i >= 3)
2593 i = 2;
2594
2595 } else {
2596 i = 0;
2597 while ((i < 14) && (modcod != car_loop[i].modcod))
2598 i++;
2599
2600 if (i >= 14) {
2601 i = 0;
2602 while ((i < 11) && (modcod != car_loop_apsk_low[i].modcod))
2603 i++;
2604
2605 if (i >= 11)
2606 i = 10;
2607 }
2608 }
2609
2610 if (modcod <= STV090x_QPSK_25) {
2611 if (pilots) {
2612 if (state->srate <= 3000000)
2613 aclc = car_loop_qpsk_low[i].crl_pilots_on_2;
2614 else if (state->srate <= 7000000)
2615 aclc = car_loop_qpsk_low[i].crl_pilots_on_5;
2616 else if (state->srate <= 15000000)
2617 aclc = car_loop_qpsk_low[i].crl_pilots_on_10;
2618 else if (state->srate <= 25000000)
2619 aclc = car_loop_qpsk_low[i].crl_pilots_on_20;
2620 else
2621 aclc = car_loop_qpsk_low[i].crl_pilots_on_30;
2622 } else {
2623 if (state->srate <= 3000000)
2624 aclc = car_loop_qpsk_low[i].crl_pilots_off_2;
2625 else if (state->srate <= 7000000)
2626 aclc = car_loop_qpsk_low[i].crl_pilots_off_5;
2627 else if (state->srate <= 15000000)
2628 aclc = car_loop_qpsk_low[i].crl_pilots_off_10;
2629 else if (state->srate <= 25000000)
2630 aclc = car_loop_qpsk_low[i].crl_pilots_off_20;
2631 else
2632 aclc = car_loop_qpsk_low[i].crl_pilots_off_30;
2633 }
2634
2635 } else if (modcod <= STV090x_8PSK_910) {
2636 if (pilots) {
2637 if (state->srate <= 3000000)
2638 aclc = car_loop[i].crl_pilots_on_2;
2639 else if (state->srate <= 7000000)
2640 aclc = car_loop[i].crl_pilots_on_5;
2641 else if (state->srate <= 15000000)
2642 aclc = car_loop[i].crl_pilots_on_10;
2643 else if (state->srate <= 25000000)
2644 aclc = car_loop[i].crl_pilots_on_20;
2645 else
2646 aclc = car_loop[i].crl_pilots_on_30;
2647 } else {
2648 if (state->srate <= 3000000)
2649 aclc = car_loop[i].crl_pilots_off_2;
2650 else if (state->srate <= 7000000)
2651 aclc = car_loop[i].crl_pilots_off_5;
2652 else if (state->srate <= 15000000)
2653 aclc = car_loop[i].crl_pilots_off_10;
2654 else if (state->srate <= 25000000)
2655 aclc = car_loop[i].crl_pilots_off_20;
2656 else
2657 aclc = car_loop[i].crl_pilots_off_30;
2658 }
2659 } else { /* 16APSK and 32APSK */
2660 if (state->srate <= 3000000)
2661 aclc = car_loop_apsk_low[i].crl_pilots_on_2;
2662 else if (state->srate <= 7000000)
2663 aclc = car_loop_apsk_low[i].crl_pilots_on_5;
2664 else if (state->srate <= 15000000)
2665 aclc = car_loop_apsk_low[i].crl_pilots_on_10;
2666 else if (state->srate <= 25000000)
2667 aclc = car_loop_apsk_low[i].crl_pilots_on_20;
2668 else
2669 aclc = car_loop_apsk_low[i].crl_pilots_on_30;
2670 }
2671
2672 return aclc;
2673}
2674
2675static u8 stv090x_optimize_carloop_short(struct stv090x_state *state)
2676{
2677 struct stv090x_short_frame_crloop *short_crl;
2678 s32 index = 0;
2679 u8 aclc = 0x0b;
2680
2681 switch (state->modulation) {
2682 case STV090x_QPSK:
2683 default:
2684 index = 0;
2685 break;
2686 case STV090x_8PSK:
2687 index = 1;
2688 break;
2689 case STV090x_16APSK:
2690 index = 2;
2691 break;
2692 case STV090x_32APSK:
2693 index = 3;
2694 break;
2695 }
2696
2697 if (state->dev_ver >= 0x30)
2698 short_crl = stv090x_s2_short_crl_cut20;
2699 else if (state->dev_ver >= 0x20)
2700 short_crl = stv090x_s2_short_crl_cut30;
2701
2702 if (state->srate <= 3000000)
2703 aclc = short_crl[index].crl_2;
2704 else if (state->srate <= 7000000)
2705 aclc = short_crl[index].crl_5;
2706 else if (state->srate <= 15000000)
2707 aclc = short_crl[index].crl_10;
2708 else if (state->srate <= 25000000)
2709 aclc = short_crl[index].crl_20;
2710 else
2711 aclc = short_crl[index].crl_30;
2712
2713 return aclc;
2714}
2715
2716static int stv090x_optimize_track(struct stv090x_state *state)
2717{
2718 struct dvb_frontend *fe = &state->frontend;
2719
2720 enum stv090x_rolloff rolloff;
2721 enum stv090x_modcod modcod;
2722
2723 s32 srate, pilots, aclc, f_1, f_0, i = 0, blind_tune = 0;
2724 u32 reg;
2725
2726 srate = stv090x_get_srate(state, state->mclk);
2727 srate += stv090x_get_tmgoffst(state, srate);
2728
2729 switch (state->delsys) {
2730 case STV090x_DVBS1:
2731 case STV090x_DSS:
2732 if (state->algo == STV090x_SEARCH_AUTO) {
2733 reg = STV090x_READ_DEMOD(state, DMDCFGMD);
2734 STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
2735 STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 0);
2736 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
2737 goto err;
2738 }
2739 reg = STV090x_READ_DEMOD(state, DEMOD);
2740 STV090x_SETFIELD_Px(reg, ROLLOFF_CONTROL_FIELD, state->rolloff);
2741 STV090x_SETFIELD_Px(reg, MANUAL_SXROLLOFF_FIELD, 0x01);
2742 if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
2743 goto err;
2744
2745 if (state->dev_ver >= 0x30) {
2746 if (stv090x_get_viterbi(state) < 0)
2747 goto err;
2748
2749 if (state->fec == STV090x_PR12) {
2750 if (STV090x_WRITE_DEMOD(state, GAUSSR0, 0x98) < 0)
2751 goto err;
2752 if (STV090x_WRITE_DEMOD(state, CCIR0, 0x18) < 0)
2753 goto err;
2754 } else {
2755 if (STV090x_WRITE_DEMOD(state, GAUSSR0, 0x18) < 0)
2756 goto err;
2757 if (STV090x_WRITE_DEMOD(state, CCIR0, 0x18) < 0)
2758 goto err;
2759 }
2760 }
2761
2762 if (STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x75) < 0)
2763 goto err;
2764 break;
2765
2766 case STV090x_DVBS2:
2767 reg = STV090x_READ_DEMOD(state, DMDCFGMD);
2768 STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 0);
2769 STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
2770 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
2771 goto err;
2772 if (STV090x_WRITE_DEMOD(state, ACLC, 0) < 0)
2773 goto err;
2774 if (STV090x_WRITE_DEMOD(state, BCLC, 0) < 0)
2775 goto err;
2776 if (state->frame_len == STV090x_LONG_FRAME) {
2777 reg = STV090x_READ_DEMOD(state, DMDMODCOD);
2778 modcod = STV090x_GETFIELD_Px(reg, DEMOD_MODCOD_FIELD);
2779 pilots = STV090x_GETFIELD_Px(reg, DEMOD_TYPE_FIELD) & 0x01;
2780 aclc = stv090x_optimize_carloop(state, modcod, pilots);
2781 if (modcod <= STV090x_QPSK_910) {
2782 STV090x_WRITE_DEMOD(state, ACLC2S2Q, aclc);
2783 } else if (modcod <= STV090x_8PSK_910) {
2784 if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
2785 goto err;
2786 if (STV090x_WRITE_DEMOD(state, ACLC2S28, aclc) < 0)
2787 goto err;
2788 }
2789 if ((state->demod_mode == STV090x_SINGLE) && (modcod > STV090x_8PSK_910)) {
2790 if (modcod <= STV090x_16APSK_910) {
2791 if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
2792 goto err;
2793 if (STV090x_WRITE_DEMOD(state, ACLC2S216A, aclc) < 0)
2794 goto err;
2795 } else {
2796 if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
2797 goto err;
2798 if (STV090x_WRITE_DEMOD(state, ACLC2S232A, aclc) < 0)
2799 goto err;
2800 }
2801 }
2802 } else {
2803 /*Carrier loop setting for short frame*/
2804 aclc = stv090x_optimize_carloop_short(state);
2805 if (state->modulation == STV090x_QPSK) {
2806 if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, aclc) < 0)
2807 goto err;
2808 } else if (state->modulation == STV090x_8PSK) {
2809 if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
2810 goto err;
2811 if (STV090x_WRITE_DEMOD(state, ACLC2S28, aclc) < 0)
2812 goto err;
2813 } else if (state->modulation == STV090x_16APSK) {
2814 if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
2815 goto err;
2816 if (STV090x_WRITE_DEMOD(state, ACLC2S216A, aclc) < 0)
2817 goto err;
2818 } else if (state->modulation == STV090x_32APSK) {
2819 if (STV090x_WRITE_DEMOD(state, ACLC2S2Q, 0x2a) < 0)
2820 goto err;
2821 if (STV090x_WRITE_DEMOD(state, ACLC2S232A, aclc) < 0)
2822 goto err;
2823 }
2824 }
2825
2826 STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x67); /* PER */
2827 break;
2828
2829 case STV090x_UNKNOWN:
2830 default:
2831 reg = STV090x_READ_DEMOD(state, DMDCFGMD);
2832 STV090x_SETFIELD_Px(reg, DVBS1_ENABLE_FIELD, 1);
2833 STV090x_SETFIELD_Px(reg, DVBS2_ENABLE_FIELD, 1);
2834 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
2835 goto err;
2836 break;
2837 }
2838
2839 f_1 = STV090x_READ_DEMOD(state, CFR2);
2840 f_0 = STV090x_READ_DEMOD(state, CFR1);
2841 reg = STV090x_READ_DEMOD(state, TMGOBS);
2842 rolloff = STV090x_GETFIELD_Px(reg, ROLLOFF_STATUS_FIELD);
2843
2844 if (state->algo == STV090x_BLIND_SEARCH) {
2845 STV090x_WRITE_DEMOD(state, SFRSTEP, 0x00);
2846 reg = STV090x_READ_DEMOD(state, DMDCFGMD);
2847 STV090x_SETFIELD_Px(reg, SCAN_ENABLE_FIELD, 0x00);
2848 STV090x_SETFIELD_Px(reg, CFR_AUTOSCAN_FIELD, 0x00);
2849 if (STV090x_WRITE_DEMOD(state, DMDCFGMD, reg) < 0)
2850 goto err;
2851 if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0xc1) < 0)
2852 goto err;
2853
2854 if (stv090x_set_srate(state, srate) < 0)
2855 goto err;
2856 blind_tune = 1;
2857 }
2858
2859 if (state->dev_ver >= 0x20) {
2860 if ((state->search_mode == STV090x_SEARCH_DVBS1) ||
2861 (state->search_mode == STV090x_SEARCH_DSS) ||
2862 (state->search_mode == STV090x_SEARCH_AUTO)) {
2863
2864 if (STV090x_WRITE_DEMOD(state, VAVSRVIT, 0x0a) < 0)
2865 goto err;
2866 if (STV090x_WRITE_DEMOD(state, VITSCALE, 0x00) < 0)
2867 goto err;
2868 }
2869 }
2870
2871 if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
2872 goto err;
2873
2874 /* AUTO tracking MODE */
2875 if (STV090x_WRITE_DEMOD(state, SFRUP1, 0x80) < 0)
2876 goto err;
2877 /* AUTO tracking MODE */
2878 if (STV090x_WRITE_DEMOD(state, SFRLOW1, 0x80) < 0)
2879 goto err;
2880
2881 if ((state->dev_ver >= 0x20) || (blind_tune == 1) || (state->srate < 10000000)) {
2882 /* update initial carrier freq with the found freq offset */
2883 if (STV090x_WRITE_DEMOD(state, CFRINIT1, f_1) < 0)
2884 goto err;
2885 if (STV090x_WRITE_DEMOD(state, CFRINIT0, f_0) < 0)
2886 goto err;
2887 state->tuner_bw = stv090x_car_width(srate, state->rolloff) + 10000000;
2888
2889 if ((state->dev_ver >= 0x20) || (blind_tune == 1)) {
2890
2891 if (state->algo != STV090x_WARM_SEARCH) {
2892
2893 if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
2894 goto err;
2895
2896 if (state->config->tuner_set_bandwidth) {
2897 if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
2898 goto err;
2899 }
2900
2901 if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
2902 goto err;
2903
2904 }
2905 }
2906 if ((state->algo == STV090x_BLIND_SEARCH) || (state->srate < 10000000))
2907 msleep(50); /* blind search: wait 50ms for SR stabilization */
2908 else
2909 msleep(5);
2910
2911 stv090x_get_lock_tmg(state);
2912
2913 if (!(stv090x_get_dmdlock(state, (state->DemodTimeout / 2)))) {
2914 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
2915 goto err;
2916 if (STV090x_WRITE_DEMOD(state, CFRINIT1, f_1) < 0)
2917 goto err;
2918 if (STV090x_WRITE_DEMOD(state, CFRINIT0, f_0) < 0)
2919 goto err;
2920 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
2921 goto err;
2922
2923 i = 0;
2924
2925 while ((!(stv090x_get_dmdlock(state, (state->DemodTimeout / 2)))) && (i <= 2)) {
2926
2927 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x1f) < 0)
2928 goto err;
2929 if (STV090x_WRITE_DEMOD(state, CFRINIT1, f_1) < 0)
2930 goto err;
2931 if (STV090x_WRITE_DEMOD(state, CFRINIT0, f_0) < 0)
2932 goto err;
2933 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x18) < 0)
2934 goto err;
2935 i++;
2936 }
2937 }
2938
2939 }
2940
2941 if (state->dev_ver >= 0x20) {
2942 if (STV090x_WRITE_DEMOD(state, CARFREQ, 0x49) < 0)
2943 goto err;
2944 }
2945
2946 if ((state->delsys == STV090x_DVBS1) || (state->delsys == STV090x_DSS))
2947 stv090x_set_vit_thtracq(state);
2948
2949 return 0;
2950err:
2951 dprintk(FE_ERROR, 1, "I/O error");
2952 return -1;
2953}
2954
2955static int stv090x_get_feclock(struct stv090x_state *state, s32 timeout)
2956{
2957 s32 timer = 0, lock = 0, stat;
2958 u32 reg;
2959
2960 while ((timer < timeout) && (!lock)) {
2961 reg = STV090x_READ_DEMOD(state, DMDSTATE);
2962 stat = STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD);
2963
2964 switch (stat) {
2965 case 0: /* searching */
2966 case 1: /* first PLH detected */
2967 default:
2968 lock = 0;
2969 break;
2970
2971 case 2: /* DVB-S2 mode */
2972 reg = STV090x_READ_DEMOD(state, PDELSTATUS1);
2973 lock = STV090x_GETFIELD_Px(reg, PKTDELIN_LOCK_FIELD);
2974 break;
2975
2976 case 3: /* DVB-S1/legacy mode */
2977 reg = STV090x_READ_DEMOD(state, VSTATUSVIT);
2978 lock = STV090x_GETFIELD_Px(reg, LOCKEDVIT_FIELD);
2979 break;
2980 }
2981 if (!lock) {
2982 msleep(10);
2983 timer += 10;
2984 }
2985 }
2986 return lock;
2987}
2988
2989static int stv090x_get_lock(struct stv090x_state *state, s32 timeout_dmd, s32 timeout_fec)
2990{
2991 u32 reg;
2992 s32 timer = 0;
2993 int lock;
2994
2995 lock = stv090x_get_dmdlock(state, timeout_dmd);
2996 if (lock)
2997 lock = stv090x_get_feclock(state, timeout_fec);
2998
2999 if (lock) {
3000 lock = 0;
3001
3002 while ((timer < timeout_fec) && (!lock)) {
3003 reg = STV090x_READ_DEMOD(state, TSSTATUS);
3004 lock = STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD);
3005 msleep(1);
3006 timer++;
3007 }
3008 }
3009
3010 return lock;
3011}
3012
3013static int stv090x_set_s2rolloff(struct stv090x_state *state)
3014{
3015 u32 reg;
3016
3017 if (state->dev_ver <= 0x20) {
3018 /* rolloff to auto mode if DVBS2 */
3019 reg = STV090x_READ_DEMOD(state, DEMOD);
3020 STV090x_SETFIELD_Px(reg, MANUAL_SXROLLOFF_FIELD, 0x00);
3021 if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
3022 goto err;
3023 } else {
3024 /* DVB-S2 rolloff to auto mode if DVBS2 */
3025 reg = STV090x_READ_DEMOD(state, DEMOD);
3026 STV090x_SETFIELD_Px(reg, MANUAL_S2ROLLOFF_FIELD, 0x00);
3027 if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
3028 goto err;
3029 }
3030 return 0;
3031err:
3032 dprintk(FE_ERROR, 1, "I/O error");
3033 return -1;
3034}
3035
3036
3037static enum stv090x_signal_state stv090x_algo(struct stv090x_state *state)
3038{
3039 struct dvb_frontend *fe = &state->frontend;
3040 enum stv090x_signal_state signal_state = STV090x_NOCARRIER;
3041 u32 reg;
3042 s32 timeout_dmd = 500, timeout_fec = 50, agc1_power, power_iq = 0, i;
3043 int lock = 0, low_sr = 0, no_signal = 0;
3044
3045 reg = STV090x_READ_DEMOD(state, TSCFGH);
3046 STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 1); /* Stop path 1 stream merger */
3047 if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
3048 goto err;
3049
3050 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x5c) < 0) /* Demod stop */
3051 goto err;
3052
3053 if (state->dev_ver >= 0x20) {
3054 if (STV090x_WRITE_DEMOD(state, CORRELABS, 0x9e) < 0) /* cut 2.0 */
3055 goto err;
3056 }
3057
3058 stv090x_get_lock_tmg(state);
3059
3060 if (state->algo == STV090x_BLIND_SEARCH) {
3061 state->tuner_bw = 2 * 36000000; /* wide bw for unknown srate */
3062 if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0xc0) < 0) /* wider srate scan */
3063 goto err;
3064 if (STV090x_WRITE_DEMOD(state, CORRELMANT, 0x70) < 0)
3065 goto err;
3066 if (stv090x_set_srate(state, 1000000) < 0) /* inital srate = 1Msps */
3067 goto err;
3068 } else {
3069 /* known srate */
3070 if (STV090x_WRITE_DEMOD(state, DMDTOM, 0x20) < 0)
3071 goto err;
3072 if (STV090x_WRITE_DEMOD(state, TMGCFG, 0xd2) < 0)
3073 goto err;
3074
3075 if (state->srate < 2000000) {
3076 /* SR < 2MSPS */
3077 if (STV090x_WRITE_DEMOD(state, CORRELMANT, 0x63) < 0)
3078 goto err;
3079 } else {
3080 /* SR >= 2Msps */
3081 if (STV090x_WRITE_DEMOD(state, CORRELMANT, 0x70) < 0)
3082 goto err;
3083 }
3084
3085 if (STV090x_WRITE_DEMOD(state, AGC2REF, 0x38) < 0)
3086 goto err;
3087
3088 if (state->dev_ver >= 0x20) {
3089 if (STV090x_WRITE_DEMOD(state, KREFTMG, 0x5a) < 0)
3090 goto err;
3091 if (state->algo == STV090x_COLD_SEARCH)
3092 state->tuner_bw = (15 * (stv090x_car_width(state->srate, state->rolloff) + 10000000)) / 10;
3093 else if (state->algo == STV090x_WARM_SEARCH)
3094 state->tuner_bw = stv090x_car_width(state->srate, state->rolloff) + 10000000;
3095 }
3096
3097 /* if cold start or warm (Symbolrate is known)
3098 * use a Narrow symbol rate scan range
3099 */
3100 if (STV090x_WRITE_DEMOD(state, TMGCFG2, 0xc1) < 0) /* narrow srate scan */
3101 goto err;
3102
3103 if (stv090x_set_srate(state, state->srate) < 0)
3104 goto err;
3105
3106 if (stv090x_set_max_srate(state, state->mclk, state->srate) < 0)
3107 goto err;
3108 if (stv090x_set_min_srate(state, state->mclk, state->srate) < 0)
3109 goto err;
3110
3111 if (state->srate >= 10000000)
3112 low_sr = 0;
3113 else
3114 low_sr = 1;
3115 }
3116
3117 /* Setup tuner */
3118 if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
3119 goto err;
3120
3121 if (state->config->tuner_set_bbgain) {
3122 if (state->config->tuner_set_bbgain(fe, 10) < 0) /* 10dB */
3123 goto err;
3124 }
3125
3126 if (state->config->tuner_set_frequency) {
3127 if (state->config->tuner_set_frequency(fe, state->frequency) < 0)
3128 goto err;
3129 }
3130
3131 if (state->config->tuner_set_bandwidth) {
3132 if (state->config->tuner_set_bandwidth(fe, state->tuner_bw) < 0)
3133 goto err;
3134 }
3135
3136 if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
3137 goto err;
3138
3139 msleep(50);
3140
3141 if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
3142 goto err;
3143
3144 if (state->config->tuner_get_status) {
3145 if (state->config->tuner_get_status(fe, &reg) < 0)
3146 goto err;
3147 }
3148
3149 if (reg)
3150 dprintk(FE_DEBUG, 1, "Tuner phase locked");
3151 else
3152 dprintk(FE_DEBUG, 1, "Tuner unlocked");
3153
3154 if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
3155 goto err;
3156
3157 msleep(10);
3158 agc1_power = MAKEWORD16(STV090x_READ_DEMOD(state, AGCIQIN1),
3159 STV090x_READ_DEMOD(state, AGCIQIN0));
3160
3161 if (agc1_power == 0) {
3162 /* If AGC1 integrator value is 0
3163 * then read POWERI, POWERQ
3164 */
3165 for (i = 0; i < 5; i++) {
3166 power_iq += (STV090x_READ_DEMOD(state, POWERI) +
3167 STV090x_READ_DEMOD(state, POWERQ)) >> 1;
3168 }
3169 power_iq /= 5;
3170 }
3171
3172 if ((agc1_power == 0) && (power_iq < STV090x_IQPOWER_THRESHOLD)) {
3173 dprintk(FE_ERROR, 1, "No Signal: POWER_IQ=0x%02x", power_iq);
3174 lock = 0;
3175
3176 } else {
3177 reg = STV090x_READ_DEMOD(state, DEMOD);
3178 STV090x_SETFIELD_Px(reg, SPECINV_CONTROL_FIELD, state->inversion);
3179
3180 if (state->dev_ver <= 0x20) {
3181 /* rolloff to auto mode if DVBS2 */
3182 STV090x_SETFIELD_Px(reg, MANUAL_SXROLLOFF_FIELD, 1);
3183 } else {
3184 /* DVB-S2 rolloff to auto mode if DVBS2 */
3185 STV090x_SETFIELD_Px(reg, MANUAL_S2ROLLOFF_FIELD, 1);
3186 }
3187 if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
3188 goto err;
3189
3190 if (stv090x_delivery_search(state) < 0)
3191 goto err;
3192
3193 if (state->algo != STV090x_BLIND_SEARCH) {
3194 if (stv090x_start_search(state) < 0)
3195 goto err;
3196 }
3197 }
3198
3199 /* need to check for AGC1 state */
3200
3201
3202
3203 if (state->algo == STV090x_BLIND_SEARCH)
3204 lock = stv090x_blind_search(state);
3205
3206 else if (state->algo == STV090x_COLD_SEARCH)
3207 lock = stv090x_get_coldlock(state, timeout_dmd);
3208
3209 else if (state->algo == STV090x_WARM_SEARCH)
3210 lock = stv090x_get_dmdlock(state, timeout_dmd);
3211
3212 if ((!lock) && (state->algo == STV090x_COLD_SEARCH)) {
3213 if (!low_sr) {
3214 if (stv090x_chk_tmg(state))
3215 lock = stv090x_sw_algo(state);
3216 }
3217 }
3218
3219 if (lock)
3220 signal_state = stv090x_get_sig_params(state);
3221
3222 if ((lock) && (signal_state == STV090x_RANGEOK)) { /* signal within Range */
3223 stv090x_optimize_track(state);
3224
3225 if (state->dev_ver >= 0x20) {
3226 /* >= Cut 2.0 :release TS reset after
3227 * demod lock and optimized Tracking
3228 */
3229 reg = STV090x_READ_DEMOD(state, TSCFGH);
3230 STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0); /* release merger reset */
3231 if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
3232 goto err;
3233
3234 msleep(3);
3235
3236 STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 1); /* merger reset */
3237 if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
3238 goto err;
3239
3240 STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0); /* release merger reset */
3241 if (STV090x_WRITE_DEMOD(state, TSCFGH, reg) < 0)
3242 goto err;
3243 }
3244
3245 if (stv090x_get_lock(state, timeout_fec, timeout_fec)) {
3246 lock = 1;
3247 if (state->delsys == STV090x_DVBS2) {
3248 stv090x_set_s2rolloff(state);
3249
3250 reg = STV090x_READ_DEMOD(state, PDELCTRL2);
3251 STV090x_SETFIELD_Px(reg, RESET_UPKO_COUNT, 1);
3252 if (STV090x_WRITE_DEMOD(state, PDELCTRL2, reg) < 0)
3253 goto err;
3254 /* Reset DVBS2 packet delinator error counter */
3255 reg = STV090x_READ_DEMOD(state, PDELCTRL2);
3256 STV090x_SETFIELD_Px(reg, RESET_UPKO_COUNT, 0);
3257 if (STV090x_WRITE_DEMOD(state, PDELCTRL2, reg) < 0)
3258 goto err;
3259
3260 if (STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x67) < 0) /* PER */
3261 goto err;
3262 } else {
3263 if (STV090x_WRITE_DEMOD(state, ERRCTRL1, 0x75) < 0)
3264 goto err;
3265 }
3266 /* Reset the Total packet counter */
3267 if (STV090x_WRITE_DEMOD(state, FBERCPT4, 0x00) < 0)
3268 goto err;
3269 /* Reset the packet Error counter2 */
3270 if (STV090x_WRITE_DEMOD(state, ERRCTRL2, 0xc1) < 0)
3271 goto err;
3272 } else {
3273 lock = 0;
3274 signal_state = STV090x_NODATA;
3275 no_signal = stv090x_chk_signal(state);
3276 }
3277 }
3278 return signal_state;
3279
3280err:
3281 dprintk(FE_ERROR, 1, "I/O error");
3282 return -1;
3283}
3284
3285static enum dvbfe_search stv090x_search(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
3286{
3287 struct stv090x_state *state = fe->demodulator_priv;
3288 struct dtv_frontend_properties *props = &fe->dtv_property_cache;
3289
3290 state->delsys = props->delivery_system;
3291 state->frequency = p->frequency;
3292 state->srate = p->u.qpsk.symbol_rate;
3293 state->search_mode = STV090x_SEARCH_AUTO;
3294 state->algo = STV090x_COLD_SEARCH;
3295 state->fec = STV090x_PRERR;
3296 state->search_range = 2000000;
3297
3298 if (stv090x_algo(state) == STV090x_RANGEOK) {
3299 dprintk(FE_DEBUG, 1, "Search success!");
3300 return DVBFE_ALGO_SEARCH_SUCCESS;
3301 } else {
3302 dprintk(FE_DEBUG, 1, "Search failed!");
3303 return DVBFE_ALGO_SEARCH_FAILED;
3304 }
3305
3306 return DVBFE_ALGO_SEARCH_ERROR;
3307}
3308
3309/* FIXME! */
3310static int stv090x_read_status(struct dvb_frontend *fe, enum fe_status *status)
3311{
3312 struct stv090x_state *state = fe->demodulator_priv;
3313 u32 reg;
3314 u8 search_state;
3315
3316 reg = STV090x_READ_DEMOD(state, DMDSTATE);
3317 search_state = STV090x_GETFIELD_Px(reg, HEADER_MODE_FIELD);
3318
3319 switch (search_state) {
3320 case 0: /* searching */
3321 case 1: /* first PLH detected */
3322 default:
3323 dprintk(FE_DEBUG, 1, "Status: Unlocked (Searching ..)");
3324 *status = 0;
3325 break;
3326
3327 case 2: /* DVB-S2 mode */
3328 dprintk(FE_DEBUG, 1, "Delivery system: DVB-S2");
3329 reg = STV090x_READ_DEMOD(state, DSTATUS);
3330 if (STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD)) {
3331 reg = STV090x_READ_DEMOD(state, TSSTATUS);
3332 if (STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD)) {
3333 *status = FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
3334 }
3335 }
3336 break;
3337
3338 case 3: /* DVB-S1/legacy mode */
3339 dprintk(FE_DEBUG, 1, "Delivery system: DVB-S");
3340 reg = STV090x_READ_DEMOD(state, DSTATUS);
3341 if (STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD)) {
3342 reg = STV090x_READ_DEMOD(state, VSTATUSVIT);
3343 if (STV090x_GETFIELD_Px(reg, LOCKEDVIT_FIELD)) {
3344 reg = STV090x_READ_DEMOD(state, TSSTATUS);
3345 if (STV090x_GETFIELD_Px(reg, TSFIFO_LINEOK_FIELD)) {
3346 *status = FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
3347 }
3348 }
3349 }
3350 break;
3351 }
3352
3353 return 0;
3354}
3355
3356static int stv090x_read_per(struct dvb_frontend *fe, u32 *per)
3357{
3358 struct stv090x_state *state = fe->demodulator_priv;
3359
3360 s32 count_4, count_3, count_2, count_1, count_0, count;
3361 u32 reg, h, m, l;
3362 enum fe_status status;
3363
3364 stv090x_read_status(fe, &status);
3365 if (!(status & FE_HAS_LOCK)) {
3366 *per = 1 << 23; /* Max PER */
3367 } else {
3368 /* Counter 2 */
3369 reg = STV090x_READ_DEMOD(state, ERRCNT22);
3370 h = STV090x_GETFIELD_Px(reg, ERR_CNT2_FIELD);
3371
3372 reg = STV090x_READ_DEMOD(state, ERRCNT21);
3373 m = STV090x_GETFIELD_Px(reg, ERR_CNT21_FIELD);
3374
3375 reg = STV090x_READ_DEMOD(state, ERRCNT20);
3376 l = STV090x_GETFIELD_Px(reg, ERR_CNT20_FIELD);
3377
3378 *per = ((h << 16) | (m << 8) | l);
3379
3380 count_4 = STV090x_READ_DEMOD(state, FBERCPT4);
3381 count_3 = STV090x_READ_DEMOD(state, FBERCPT3);
3382 count_2 = STV090x_READ_DEMOD(state, FBERCPT2);
3383 count_1 = STV090x_READ_DEMOD(state, FBERCPT1);
3384 count_0 = STV090x_READ_DEMOD(state, FBERCPT0);
3385
3386 if ((!count_4) && (!count_3)) {
3387 count = (count_2 & 0xff) << 16;
3388 count |= (count_1 & 0xff) << 8;
3389 count |= count_0 & 0xff;
3390 } else {
3391 count = 1 << 24;
3392 }
3393 if (count == 0)
3394 *per = 1;
3395 }
3396 if (STV090x_WRITE_DEMOD(state, FBERCPT4, 0) < 0)
3397 goto err;
3398 if (STV090x_WRITE_DEMOD(state, ERRCTRL2, 0xc1) < 0)
3399 goto err;
3400
3401 return 0;
3402err:
3403 dprintk(FE_ERROR, 1, "I/O error");
3404 return -1;
3405}
3406
3407static int stv090x_table_lookup(const struct stv090x_tab *tab, int max, int val)
3408{
3409 int res = 0;
3410 int min = 0, med;
3411
3412 if (val < tab[min].read)
3413 res = tab[min].real;
3414 else if (val >= tab[max].read)
3415 res = tab[max].real;
3416 else {
3417 while ((max - min) > 1) {
3418 med = (max + min) / 2;
3419 if (val >= tab[min].read && val < tab[med].read)
3420 max = med;
3421 else
3422 min = med;
3423 }
3424 res = ((val - tab[min].read) *
3425 (tab[max].real - tab[min].real) /
3426 (tab[max].read - tab[min].read)) +
3427 tab[min].real;
3428 }
3429
3430 return res;
3431}
3432
3433static int stv090x_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
3434{
3435 struct stv090x_state *state = fe->demodulator_priv;
3436 u32 reg;
3437 s32 agc;
3438
3439 reg = STV090x_READ_DEMOD(state, AGCIQIN1);
3440 agc = STV090x_GETFIELD_Px(reg, AGCIQ_VALUE_FIELD);
3441
3442 *strength = stv090x_table_lookup(stv090x_rf_tab, ARRAY_SIZE(stv090x_rf_tab) - 1, agc);
3443 if (agc > stv090x_rf_tab[0].read)
3444 *strength = 5;
3445 else if (agc < stv090x_rf_tab[ARRAY_SIZE(stv090x_rf_tab) - 1].read)
3446 *strength = -100;
3447
3448 return 0;
3449}
3450
3451static int stv090x_read_cnr(struct dvb_frontend *fe, u16 *cnr)
3452{
3453 struct stv090x_state *state = fe->demodulator_priv;
3454 u32 reg_0, reg_1, reg, i;
3455 s32 val_0, val_1, val = 0;
3456 u8 lock_f;
3457
3458 switch (state->delsys) {
3459 case STV090x_DVBS2:
3460 reg = STV090x_READ_DEMOD(state, DSTATUS);
3461 lock_f = STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD);
3462 if (lock_f) {
3463 msleep(5);
3464 for (i = 0; i < 16; i++) {
3465 reg_1 = STV090x_READ_DEMOD(state, NNOSPLHT1);
3466 val_1 = STV090x_GETFIELD_Px(reg_1, NOSPLHT_NORMED_FIELD);
3467 reg_0 = STV090x_READ_DEMOD(state, NNOSPLHT0);
3468 val_0 = STV090x_GETFIELD_Px(reg_1, NOSPLHT_NORMED_FIELD);
3469 val += MAKEWORD16(val_1, val_0);
3470 msleep(1);
3471 }
3472 val /= 16;
3473 *cnr = stv090x_table_lookup(stv090x_s2cn_tab, ARRAY_SIZE(stv090x_s2cn_tab) - 1, val);
3474 if (val < stv090x_s2cn_tab[ARRAY_SIZE(stv090x_s2cn_tab) - 1].read)
3475 *cnr = 1000;
3476 }
3477 break;
3478
3479 case STV090x_DVBS1:
3480 case STV090x_DSS:
3481 reg = STV090x_READ_DEMOD(state, DSTATUS);
3482 lock_f = STV090x_GETFIELD_Px(reg, LOCK_DEFINITIF_FIELD);
3483 if (lock_f) {
3484 msleep(5);
3485 for (i = 0; i < 16; i++) {
3486 reg_1 = STV090x_READ_DEMOD(state, NOSDATAT1);
3487 val_1 = STV090x_GETFIELD_Px(reg_1, NOSDATAT_UNNORMED_FIELD);
3488 reg_0 = STV090x_READ_DEMOD(state, NOSDATAT0);
3489 val_0 = STV090x_GETFIELD_Px(reg_1, NOSDATAT_UNNORMED_FIELD);
3490 val += MAKEWORD16(val_1, val_0);
3491 msleep(1);
3492 }
3493 val /= 16;
3494 *cnr = stv090x_table_lookup(stv090x_s1cn_tab, ARRAY_SIZE(stv090x_s1cn_tab) - 1, val);
3495 if (val < stv090x_s2cn_tab[ARRAY_SIZE(stv090x_s1cn_tab) - 1].read)
3496 *cnr = 1000;
3497 }
3498 break;
3499 default:
3500 break;
3501 }
3502
3503 return 0;
3504}
3505
3506static int stv090x_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
3507{
3508 struct stv090x_state *state = fe->demodulator_priv;
3509 u32 reg;
3510
3511 reg = STV090x_READ_DEMOD(state, DISTXCTL);
3512 switch (tone) {
3513 case SEC_TONE_ON:
3514 STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, 0);
3515 STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
3516 if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
3517 goto err;
3518 STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 0);
3519 if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
3520 goto err;
3521 break;
3522
3523 case SEC_TONE_OFF:
3524 STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, 0);
3525 STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
3526 if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
3527 goto err;
3528 break;
3529 default:
3530 return -EINVAL;
3531 }
3532
3533 return 0;
3534err:
3535 dprintk(FE_ERROR, 1, "I/O error");
3536 return -1;
3537}
3538
3539
3540static enum dvbfe_algo stv090x_frontend_algo(struct dvb_frontend *fe)
3541{
3542 return DVBFE_ALGO_CUSTOM;
3543}
3544
3545static int stv090x_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *cmd)
3546{
3547 struct stv090x_state *state = fe->demodulator_priv;
3548 u32 reg, idle = 0, fifo_full = 1;
3549 int i;
3550
3551 reg = STV090x_READ_DEMOD(state, DISTXCTL);
3552
3553 STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, 2);
3554 STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
3555 if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
3556 goto err;
3557 STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 0);
3558 if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
3559 goto err;
3560
3561 STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 1);
3562 if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
3563 goto err;
3564
3565 for (i = 0; i < cmd->msg_len; i++) {
3566
3567 while (fifo_full) {
3568 reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
3569 fifo_full = STV090x_GETFIELD_Px(reg, FIFO_FULL_FIELD);
3570 }
3571
3572 if (STV090x_WRITE_DEMOD(state, DISTXDATA, cmd->msg[i]) < 0)
3573 goto err;
3574 }
3575 reg = STV090x_READ_DEMOD(state, DISTXCTL);
3576 STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 0);
3577 if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
3578 goto err;
3579
3580 i = 0;
3581
3582 while ((!idle) && (i < 10)) {
3583 reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
3584 idle = STV090x_GETFIELD_Px(reg, TX_IDLE_FIELD);
3585 msleep(10);
3586 i++;
3587 }
3588
3589 return 0;
3590err:
3591 dprintk(FE_ERROR, 1, "I/O error");
3592 return -1;
3593}
3594
3595static int stv090x_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst)
3596{
3597 struct stv090x_state *state = fe->demodulator_priv;
3598 u32 reg, idle = 0, fifo_full = 1;
3599 u8 mode, value;
3600 int i;
3601
3602 reg = STV090x_READ_DEMOD(state, DISTXCTL);
3603
3604 if (burst == SEC_MINI_A) {
3605 mode = 3;
3606 value = 0x00;
3607 } else {
3608 mode = 2;
3609 value = 0xFF;
3610 }
3611
3612 STV090x_SETFIELD_Px(reg, DISTX_MODE_FIELD, mode);
3613 STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 1);
3614 if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
3615 goto err;
3616 STV090x_SETFIELD_Px(reg, DISEQC_RESET_FIELD, 0);
3617 if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
3618 goto err;
3619
3620 STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 1);
3621 if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
3622 goto err;
3623
3624 while (fifo_full) {
3625 reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
3626 fifo_full = STV090x_GETFIELD_Px(reg, FIFO_FULL_FIELD);
3627 }
3628
3629 if (STV090x_WRITE_DEMOD(state, DISTXDATA, value) < 0)
3630 goto err;
3631
3632 reg = STV090x_READ_DEMOD(state, DISTXCTL);
3633 STV090x_SETFIELD_Px(reg, DIS_PRECHARGE_FIELD, 0);
3634 if (STV090x_WRITE_DEMOD(state, DISTXCTL, reg) < 0)
3635 goto err;
3636
3637 i = 0;
3638
3639 while ((!idle) && (i < 10)) {
3640 reg = STV090x_READ_DEMOD(state, DISTXSTATUS);
3641 idle = STV090x_GETFIELD_Px(reg, TX_IDLE_FIELD);
3642 msleep(10);
3643 i++;
3644 }
3645
3646 return 0;
3647err:
3648 dprintk(FE_ERROR, 1, "I/O error");
3649 return -1;
3650}
3651
3652static int stv090x_recv_slave_reply(struct dvb_frontend *fe, struct dvb_diseqc_slave_reply *reply)
3653{
3654 struct stv090x_state *state = fe->demodulator_priv;
3655 u32 reg = 0, i = 0, rx_end = 0;
3656
3657 while ((rx_end != 1) && (i < 10)) {
3658 msleep(10);
3659 i++;
3660 reg = STV090x_READ_DEMOD(state, DISRX_ST0);
3661 rx_end = STV090x_GETFIELD_Px(reg, RX_END_FIELD);
3662 }
3663
3664 if (rx_end) {
3665 reply->msg_len = STV090x_GETFIELD_Px(reg, FIFO_BYTENBR_FIELD);
3666 for (i = 0; i < reply->msg_len; i++)
3667 reply->msg[i] = STV090x_READ_DEMOD(state, DISRXDATA);
3668 }
3669
3670 return 0;
3671}
3672
3673static int stv090x_sleep(struct dvb_frontend *fe)
3674{
3675 struct stv090x_state *state = fe->demodulator_priv;
3676 u32 reg;
3677
3678 dprintk(FE_DEBUG, 1, "Set %s to sleep",
3679 state->device == STV0900 ? "STV0900" : "STV0903");
3680
3681 reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
3682 STV090x_SETFIELD(reg, STANDBY_FIELD, 0x01);
3683 if (stv090x_write_reg(state, STV090x_SYNTCTRL, reg) < 0)
3684 goto err;
3685
3686 reg = stv090x_read_reg(state, STV090x_TSTTNR1);
3687 STV090x_SETFIELD(reg, ADC1_PON_FIELD, 0);
3688 if (stv090x_write_reg(state, STV090x_TSTTNR1, reg) < 0)
3689 goto err;
3690
3691 return 0;
3692err:
3693 dprintk(FE_ERROR, 1, "I/O error");
3694 return -1;
3695}
3696
3697static int stv090x_wakeup(struct dvb_frontend *fe)
3698{
3699 struct stv090x_state *state = fe->demodulator_priv;
3700 u32 reg;
3701
3702 dprintk(FE_DEBUG, 1, "Wake %s from standby",
3703 state->device == STV0900 ? "STV0900" : "STV0903");
3704
3705 reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
3706 STV090x_SETFIELD(reg, STANDBY_FIELD, 0x00);
3707 if (stv090x_write_reg(state, STV090x_SYNTCTRL, reg) < 0)
3708 goto err;
3709
3710 reg = stv090x_read_reg(state, STV090x_TSTTNR1);
3711 STV090x_SETFIELD(reg, ADC1_PON_FIELD, 1);
3712 if (stv090x_write_reg(state, STV090x_TSTTNR1, reg) < 0)
3713 goto err;
3714
3715 return 0;
3716err:
3717 dprintk(FE_ERROR, 1, "I/O error");
3718 return -1;
3719}
3720
3721static void stv090x_release(struct dvb_frontend *fe)
3722{
3723 struct stv090x_state *state = fe->demodulator_priv;
3724
3725 kfree(state);
3726}
3727
3728static int stv090x_ldpc_mode(struct stv090x_state *state, enum stv090x_mode ldpc_mode)
3729{
3730 u32 reg = 0;
3731
3732 switch (ldpc_mode) {
3733 case STV090x_DUAL:
3734 default:
3735 if ((state->demod_mode != STV090x_DUAL) || (STV090x_GETFIELD(reg, DDEMOD_FIELD) != 1)) {
3736 /* set LDPC to dual mode */
3737 if (stv090x_write_reg(state, STV090x_GENCFG, 0x1d) < 0)
3738 goto err;
3739
3740 state->demod_mode = STV090x_DUAL;
3741
3742 reg = stv090x_read_reg(state, STV090x_TSTRES0);
3743 STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x1);
3744 if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
3745 goto err;
3746 STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x0);
3747 if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
3748 goto err;
3749
3750 if (STV090x_WRITE_DEMOD(state, MODCODLST0, 0xff) < 0)
3751 goto err;
3752 if (STV090x_WRITE_DEMOD(state, MODCODLST1, 0xff) < 0)
3753 goto err;
3754 if (STV090x_WRITE_DEMOD(state, MODCODLST2, 0xff) < 0)
3755 goto err;
3756 if (STV090x_WRITE_DEMOD(state, MODCODLST3, 0xff) < 0)
3757 goto err;
3758 if (STV090x_WRITE_DEMOD(state, MODCODLST4, 0xff) < 0)
3759 goto err;
3760 if (STV090x_WRITE_DEMOD(state, MODCODLST5, 0xff) < 0)
3761 goto err;
3762 if (STV090x_WRITE_DEMOD(state, MODCODLST6, 0xff) < 0)
3763 goto err;
3764
3765 if (STV090x_WRITE_DEMOD(state, MODCODLST7, 0xcc) < 0)
3766 goto err;
3767 if (STV090x_WRITE_DEMOD(state, MODCODLST8, 0xcc) < 0)
3768 goto err;
3769 if (STV090x_WRITE_DEMOD(state, MODCODLST9, 0xcc) < 0)
3770 goto err;
3771 if (STV090x_WRITE_DEMOD(state, MODCODLSTA, 0xcc) < 0)
3772 goto err;
3773 if (STV090x_WRITE_DEMOD(state, MODCODLSTB, 0xcc) < 0)
3774 goto err;
3775 if (STV090x_WRITE_DEMOD(state, MODCODLSTC, 0xcc) < 0)
3776 goto err;
3777 if (STV090x_WRITE_DEMOD(state, MODCODLSTD, 0xcc) < 0)
3778 goto err;
3779
3780 if (STV090x_WRITE_DEMOD(state, MODCODLSTE, 0xff) < 0)
3781 goto err;
3782 if (STV090x_WRITE_DEMOD(state, MODCODLSTF, 0xcf) < 0)
3783 goto err;
3784 }
3785 break;
3786
3787 case STV090x_SINGLE:
3788 if (stv090x_stop_modcod(state) < 0)
3789 goto err;
3790 if (stv090x_activate_modcod_single(state) < 0)
3791 goto err;
3792
3793 if (state->demod == STV090x_DEMODULATOR_1) {
3794 if (stv090x_write_reg(state, STV090x_GENCFG, 0x06) < 0) /* path 2 */
3795 goto err;
3796 } else {
3797 if (stv090x_write_reg(state, STV090x_GENCFG, 0x04) < 0) /* path 1 */
3798 goto err;
3799 }
3800
3801 reg = stv090x_read_reg(state, STV090x_TSTRES0);
3802 STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x1);
3803 if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
3804 goto err;
3805 STV090x_SETFIELD(reg, FRESFEC_FIELD, 0x0);
3806 if (stv090x_write_reg(state, STV090x_TSTRES0, reg) < 0)
3807 goto err;
3808
3809 reg = STV090x_READ_DEMOD(state, PDELCTRL1);
3810 STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0x01);
3811 if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
3812 goto err;
3813 STV090x_SETFIELD_Px(reg, ALGOSWRST_FIELD, 0x00);
3814 if (STV090x_WRITE_DEMOD(state, PDELCTRL1, reg) < 0)
3815 goto err;
3816 break;
3817 }
3818
3819 return 0;
3820err:
3821 dprintk(FE_ERROR, 1, "I/O error");
3822 return -1;
3823}
3824
3825/* return (Hz), clk in Hz*/
3826static u32 stv090x_get_mclk(struct stv090x_state *state)
3827{
3828 const struct stv090x_config *config = state->config;
3829 u32 div, reg;
3830 u8 ratio;
3831
3832 div = stv090x_read_reg(state, STV090x_NCOARSE);
3833 reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
3834 ratio = STV090x_GETFIELD(reg, SELX1RATIO_FIELD) ? 4 : 6;
3835
3836 return (div + 1) * config->xtal / ratio; /* kHz */
3837}
3838
3839static int stv090x_set_mclk(struct stv090x_state *state, u32 mclk, u32 clk)
3840{
3841 const struct stv090x_config *config = state->config;
3842 u32 reg, div, clk_sel;
3843
3844 reg = stv090x_read_reg(state, STV090x_SYNTCTRL);
3845 clk_sel = ((STV090x_GETFIELD(reg, SELX1RATIO_FIELD) == 1) ? 4 : 6);
3846
3847 div = ((clk_sel * mclk) / config->xtal) - 1;
3848
3849 reg = stv090x_read_reg(state, STV090x_NCOARSE);
3850 STV090x_SETFIELD(reg, M_DIV_FIELD, div);
3851 if (stv090x_write_reg(state, STV090x_NCOARSE, reg) < 0)
3852 goto err;
3853
3854 state->mclk = stv090x_get_mclk(state);
3855
3856 /*Set the DiseqC frequency to 22KHz */
3857 div = state->mclk / 704000;
3858 if (STV090x_WRITE_DEMOD(state, F22TX, div) < 0)
3859 goto err;
3860 if (STV090x_WRITE_DEMOD(state, F22RX, div) < 0)
3861 goto err;
3862
3863 return 0;
3864err:
3865 dprintk(FE_ERROR, 1, "I/O error");
3866 return -1;
3867}
3868
3869static int stv090x_set_tspath(struct stv090x_state *state)
3870{
3871 u32 reg;
3872
3873 if (state->dev_ver >= 0x20) {
3874 switch (state->config->ts1_mode) {
3875 case STV090x_TSMODE_PARALLEL_PUNCTURED:
3876 case STV090x_TSMODE_DVBCI:
3877 switch (state->config->ts2_mode) {
3878 case STV090x_TSMODE_SERIAL_PUNCTURED:
3879 case STV090x_TSMODE_SERIAL_CONTINUOUS:
3880 default:
3881 stv090x_write_reg(state, STV090x_TSGENERAL, 0x00);
3882 break;
3883
3884 case STV090x_TSMODE_PARALLEL_PUNCTURED:
3885 case STV090x_TSMODE_DVBCI:
3886 if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x06) < 0) /* Mux'd stream mode */
3887 goto err;
3888 reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
3889 STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
3890 if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
3891 goto err;
3892 reg = stv090x_read_reg(state, STV090x_P2_TSCFGM);
3893 STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
3894 if (stv090x_write_reg(state, STV090x_P2_TSCFGM, reg) < 0)
3895 goto err;
3896 if (stv090x_write_reg(state, STV090x_P1_TSSPEED, 0x14) < 0)
3897 goto err;
3898 if (stv090x_write_reg(state, STV090x_P2_TSSPEED, 0x28) < 0)
3899 goto err;
3900 break;
3901 }
3902 break;
3903
3904 case STV090x_TSMODE_SERIAL_PUNCTURED:
3905 case STV090x_TSMODE_SERIAL_CONTINUOUS:
3906 default:
3907 switch (state->config->ts2_mode) {
3908 case STV090x_TSMODE_SERIAL_PUNCTURED:
3909 case STV090x_TSMODE_SERIAL_CONTINUOUS:
3910 default:
3911 if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x0c) < 0)
3912 goto err;
3913 break;
3914
3915 case STV090x_TSMODE_PARALLEL_PUNCTURED:
3916 case STV090x_TSMODE_DVBCI:
3917 if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x0a) < 0)
3918 goto err;
3919 break;
3920 }
3921 break;
3922 }
3923 } else {
3924 switch (state->config->ts1_mode) {
3925 case STV090x_TSMODE_PARALLEL_PUNCTURED:
3926 case STV090x_TSMODE_DVBCI:
3927 switch (state->config->ts2_mode) {
3928 case STV090x_TSMODE_SERIAL_PUNCTURED:
3929 case STV090x_TSMODE_SERIAL_CONTINUOUS:
3930 default:
3931 stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x10);
3932 break;
3933
3934 case STV090x_TSMODE_PARALLEL_PUNCTURED:
3935 case STV090x_TSMODE_DVBCI:
3936 stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x16);
3937 reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
3938 STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 3);
3939 if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
3940 goto err;
3941 reg = stv090x_read_reg(state, STV090x_P1_TSCFGM);
3942 STV090x_SETFIELD_Px(reg, TSFIFO_MANSPEED_FIELD, 0);
3943 if (stv090x_write_reg(state, STV090x_P1_TSCFGM, reg) < 0)
3944 goto err;
3945 if (stv090x_write_reg(state, STV090x_P1_TSSPEED, 0x14) < 0)
3946 goto err;
3947 if (stv090x_write_reg(state, STV090x_P2_TSSPEED, 0x28) < 0)
3948 goto err;
3949 break;
3950 }
3951 break;
3952
3953 case STV090x_TSMODE_SERIAL_PUNCTURED:
3954 case STV090x_TSMODE_SERIAL_CONTINUOUS:
3955 default:
3956 switch (state->config->ts2_mode) {
3957 case STV090x_TSMODE_SERIAL_PUNCTURED:
3958 case STV090x_TSMODE_SERIAL_CONTINUOUS:
3959 default:
3960 stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x14);
3961 break;
3962
3963 case STV090x_TSMODE_PARALLEL_PUNCTURED:
3964 case STV090x_TSMODE_DVBCI:
3965 stv090x_write_reg(state, STV090x_TSGENERAL1X, 0x12);
3966 break;
3967 }
3968 break;
3969 }
3970 }
3971
3972 switch (state->config->ts1_mode) {
3973 case STV090x_TSMODE_PARALLEL_PUNCTURED:
3974 reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
3975 STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
3976 STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
3977 if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
3978 goto err;
3979 break;
3980
3981 case STV090x_TSMODE_DVBCI:
3982 reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
3983 STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
3984 STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
3985 if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
3986 goto err;
3987 break;
3988
3989 case STV090x_TSMODE_SERIAL_PUNCTURED:
3990 reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
3991 STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
3992 STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
3993 if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
3994 goto err;
3995 break;
3996
3997 case STV090x_TSMODE_SERIAL_CONTINUOUS:
3998 reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
3999 STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
4000 STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
4001 if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
4002 goto err;
4003 break;
4004
4005 default:
4006 break;
4007 }
4008
4009 switch (state->config->ts2_mode) {
4010 case STV090x_TSMODE_PARALLEL_PUNCTURED:
4011 reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
4012 STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
4013 STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
4014 if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
4015 goto err;
4016 break;
4017
4018 case STV090x_TSMODE_DVBCI:
4019 reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
4020 STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x00);
4021 STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
4022 if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
4023 goto err;
4024 break;
4025
4026 case STV090x_TSMODE_SERIAL_PUNCTURED:
4027 reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
4028 STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
4029 STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x00);
4030 if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
4031 goto err;
4032 break;
4033
4034 case STV090x_TSMODE_SERIAL_CONTINUOUS:
4035 reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
4036 STV090x_SETFIELD_Px(reg, TSFIFO_SERIAL_FIELD, 0x01);
4037 STV090x_SETFIELD_Px(reg, TSFIFO_DVBCI_FIELD, 0x01);
4038 if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
4039 goto err;
4040 break;
4041
4042 default:
4043 break;
4044 }
4045 reg = stv090x_read_reg(state, STV090x_P2_TSCFGH);
4046 STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x01);
4047 if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
4048 goto err;
4049 STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x00);
4050 if (stv090x_write_reg(state, STV090x_P2_TSCFGH, reg) < 0)
4051 goto err;
4052
4053 reg = stv090x_read_reg(state, STV090x_P1_TSCFGH);
4054 STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x01);
4055 if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
4056 goto err;
4057 STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 0x00);
4058 if (stv090x_write_reg(state, STV090x_P1_TSCFGH, reg) < 0)
4059 goto err;
4060
4061 return 0;
4062err:
4063 dprintk(FE_ERROR, 1, "I/O error");
4064 return -1;
4065}
4066
4067static int stv090x_init(struct dvb_frontend *fe)
4068{
4069 struct stv090x_state *state = fe->demodulator_priv;
4070 const struct stv090x_config *config = state->config;
4071 u32 reg;
4072
4073 if (stv090x_wakeup(fe) < 0) {
4074 dprintk(FE_ERROR, 1, "Error waking device");
4075 goto err;
4076 }
4077
4078 if (stv090x_ldpc_mode(state, state->demod_mode) < 0)
4079 goto err;
4080
4081 reg = STV090x_READ_DEMOD(state, TNRCFG2);
4082 STV090x_SETFIELD_Px(reg, TUN_IQSWAP_FIELD, state->inversion);
4083 if (STV090x_WRITE_DEMOD(state, TNRCFG2, reg) < 0)
4084 goto err;
4085 reg = STV090x_READ_DEMOD(state, DEMOD);
4086 STV090x_SETFIELD_Px(reg, ROLLOFF_CONTROL_FIELD, state->rolloff);
4087 if (STV090x_WRITE_DEMOD(state, DEMOD, reg) < 0)
4088 goto err;
4089
4090 if (stv090x_i2c_gate_ctrl(fe, 1) < 0)
4091 goto err;
4092
4093 if (config->tuner_set_mode) {
4094 if (config->tuner_set_mode(fe, TUNER_WAKE) < 0)
4095 goto err;
4096 }
4097
4098 if (config->tuner_init) {
4099 if (config->tuner_init(fe) < 0)
4100 goto err;
4101 }
4102
4103 if (stv090x_i2c_gate_ctrl(fe, 0) < 0)
4104 goto err;
4105
4106 if (stv090x_set_tspath(state) < 0)
4107 goto err;
4108
4109 return 0;
4110err:
4111 dprintk(FE_ERROR, 1, "I/O error");
4112 return -1;
4113}
4114
4115static int stv090x_setup(struct dvb_frontend *fe)
4116{
4117 struct stv090x_state *state = fe->demodulator_priv;
4118 const struct stv090x_config *config = state->config;
4119 const struct stv090x_reg *stv090x_initval = NULL;
4120 const struct stv090x_reg *stv090x_cut20_val = NULL;
4121 unsigned long t1_size = 0, t2_size = 0;
4122 u32 reg = 0;
4123
4124 int i;
4125
4126 if (state->device == STV0900) {
4127 dprintk(FE_DEBUG, 1, "Initializing STV0900");
4128 stv090x_initval = stv0900_initval;
4129 t1_size = ARRAY_SIZE(stv0900_initval);
4130 stv090x_cut20_val = stv0900_cut20_val;
4131 t2_size = ARRAY_SIZE(stv0900_cut20_val);
4132 } else if (state->device == STV0903) {
4133 dprintk(FE_DEBUG, 1, "Initializing STV0903");
4134 stv090x_initval = stv0903_initval;
4135 t1_size = ARRAY_SIZE(stv0903_initval);
4136 stv090x_cut20_val = stv0903_cut20_val;
4137 t2_size = ARRAY_SIZE(stv0903_cut20_val);
4138 }
4139
4140 /* STV090x init */
4141 if (STV090x_WRITE_DEMOD(state, DMDISTATE, 0x5c) < 0) /* Stop Demod */
4142 goto err;
4143
4144 msleep(5);
4145
4146 if (STV090x_WRITE_DEMOD(state, TNRCFG, 0x6c) < 0) /* check register ! (No Tuner Mode) */
4147 goto err;
4148
4149 STV090x_SETFIELD_Px(reg, ENARPT_LEVEL_FIELD, config->repeater_level);
4150 if (STV090x_WRITE_DEMOD(state, I2CRPT, reg) < 0) /* repeater OFF */
4151 goto err;
4152
4153 if (stv090x_write_reg(state, STV090x_NCOARSE, 0x13) < 0) /* set PLL divider */
4154 goto err;
4155 msleep(5);
4156 if (stv090x_write_reg(state, STV090x_I2CCFG, 0x08) < 0) /* 1/41 oversampling */
4157 goto err;
4158 if (stv090x_write_reg(state, STV090x_SYNTCTRL, 0x20 | config->clk_mode) < 0) /* enable PLL */
4159 goto err;
4160 msleep(5);
4161
4162 /* write initval */
4163 dprintk(FE_DEBUG, 1, "Setting up initial values");
4164 for (i = 0; i < t1_size; i++) {
4165 if (stv090x_write_reg(state, stv090x_initval[i].addr, stv090x_initval[i].data) < 0)
4166 goto err;
4167 }
4168
4169 state->dev_ver = stv090x_read_reg(state, STV090x_MID);
4170 if (state->dev_ver >= 0x20) {
4171 if (stv090x_write_reg(state, STV090x_TSGENERAL, 0x0c) < 0)
4172 goto err;
4173
4174 /* write cut20_val*/
4175 dprintk(FE_DEBUG, 1, "Setting up Cut 2.0 initial values");
4176 for (i = 0; i < t2_size; i++) {
4177 if (stv090x_write_reg(state, stv090x_cut20_val[i].addr, stv090x_cut20_val[i].data) < 0)
4178 goto err;
4179 }
4180
4181 } else if (state->dev_ver < 0x20) {
4182 dprintk(FE_ERROR, 1, "ERROR: Unsupported Cut: 0x%02x!",
4183 state->dev_ver);
4184
4185 goto err;
4186 } else if (state->dev_ver > 0x30) {
4187 /* we shouldn't bail out from here */
4188 dprintk(FE_ERROR, 1, "INFO: Cut: 0x%02x probably incomplete support!",
4189 state->dev_ver);
4190 }
4191
4192 if (stv090x_write_reg(state, STV090x_TSTRES0, 0x80) < 0)
4193 goto err;
4194 if (stv090x_write_reg(state, STV090x_TSTRES0, 0x00) < 0)
4195 goto err;
4196
4197 stv090x_set_mclk(state, 135000000, config->xtal); /* 135 Mhz */
4198 msleep(5);
4199 if (stv090x_write_reg(state, STV090x_SYNTCTRL, 0x20 | config->clk_mode) < 0)
4200 goto err;
4201 stv090x_get_mclk(state);
4202
4203 return 0;
4204err:
4205 dprintk(FE_ERROR, 1, "I/O error");
4206 return -1;
4207}
4208
4209static struct dvb_frontend_ops stv090x_ops = {
4210
4211 .info = {
4212 .name = "STV090x Multistandard",
4213 .type = FE_QPSK,
4214 .frequency_min = 950000,
4215 .frequency_max = 2150000,
4216 .frequency_stepsize = 0,
4217 .frequency_tolerance = 0,
4218 .symbol_rate_min = 1000000,
4219 .symbol_rate_max = 45000000,
4220 .caps = FE_CAN_INVERSION_AUTO |
4221 FE_CAN_FEC_AUTO |
4222 FE_CAN_QPSK |
4223 FE_CAN_2G_MODULATION
4224 },
4225
4226 .release = stv090x_release,
4227 .init = stv090x_init,
4228
4229 .sleep = stv090x_sleep,
4230 .get_frontend_algo = stv090x_frontend_algo,
4231
4232 .i2c_gate_ctrl = stv090x_i2c_gate_ctrl,
4233
4234 .diseqc_send_master_cmd = stv090x_send_diseqc_msg,
4235 .diseqc_send_burst = stv090x_send_diseqc_burst,
4236 .diseqc_recv_slave_reply = stv090x_recv_slave_reply,
4237 .set_tone = stv090x_set_tone,
4238
4239 .search = stv090x_search,
4240 .read_status = stv090x_read_status,
4241 .read_ber = stv090x_read_per,
4242 .read_signal_strength = stv090x_read_signal_strength,
4243 .read_snr = stv090x_read_cnr
4244};
4245
4246
4247struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
4248 struct i2c_adapter *i2c,
4249 enum stv090x_demodulator demod)
4250{
4251 struct stv090x_state *state = NULL;
4252
4253 state = kzalloc(sizeof (struct stv090x_state), GFP_KERNEL);
4254 if (state == NULL)
4255 goto error;
4256
4257 state->verbose = &verbose;
4258 state->config = config;
4259 state->i2c = i2c;
4260 state->frontend.ops = stv090x_ops;
4261 state->frontend.demodulator_priv = state;
4262 state->demod = demod;
4263 state->demod_mode = config->demod_mode; /* Single or Dual mode */
4264 state->device = config->device;
4265 state->rolloff = STV090x_RO_35; /* default */
4266
4267 if (state->demod == STV090x_DEMODULATOR_0)
4268 mutex_init(&demod_lock);
4269
4270 if (stv090x_sleep(&state->frontend) < 0) {
4271 dprintk(FE_ERROR, 1, "Error putting device to sleep");
4272 goto error;
4273 }
4274
4275 if (stv090x_setup(&state->frontend) < 0) {
4276 dprintk(FE_ERROR, 1, "Error setting up device");
4277 goto error;
4278 }
4279 if (stv090x_wakeup(&state->frontend) < 0) {
4280 dprintk(FE_ERROR, 1, "Error waking device");
4281 goto error;
4282 }
4283
4284 dprintk(FE_ERROR, 1, "Attaching %s demodulator(%d) Cut=0x%02x\n",
4285 state->device == STV0900 ? "STV0900" : "STV0903",
4286 demod,
4287 state->dev_ver);
4288
4289 return &state->frontend;
4290
4291error:
4292 kfree(state);
4293 return NULL;
4294}
4295EXPORT_SYMBOL(stv090x_attach);
4296MODULE_PARM_DESC(verbose, "Set Verbosity level");
4297MODULE_AUTHOR("Manu Abraham");
4298MODULE_DESCRIPTION("STV090x Multi-Std Broadcast frontend");
4299MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/stv090x.h b/drivers/media/dvb/frontends/stv090x.h
new file mode 100644
index 000000000000..e968c98bb70f
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv090x.h
@@ -0,0 +1,106 @@
1/*
2 STV0900/0903 Multistandard Broadcast Frontend driver
3 Copyright (C) Manu Abraham <abraham.manu@gmail.com>
4
5 Copyright (C) ST Microelectronics
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#ifndef __STV090x_H
23#define __STV090x_H
24
25enum stv090x_demodulator {
26 STV090x_DEMODULATOR_0 = 1,
27 STV090x_DEMODULATOR_1
28};
29
30enum stv090x_device {
31 STV0903 = 0,
32 STV0900,
33};
34
35enum stv090x_mode {
36 STV090x_DUAL = 0,
37 STV090x_SINGLE
38};
39
40enum stv090x_tsmode {
41 STV090x_TSMODE_SERIAL_PUNCTURED = 1,
42 STV090x_TSMODE_SERIAL_CONTINUOUS,
43 STV090x_TSMODE_PARALLEL_PUNCTURED,
44 STV090x_TSMODE_DVBCI
45};
46
47enum stv090x_clkmode {
48 STV090x_CLK_INT = 0, /* Clk i/p = CLKI */
49 STV090x_CLK_EXT = 2 /* Clk i/p = XTALI */
50};
51
52enum stv090x_i2crpt {
53 STV090x_RPTLEVEL_256 = 0,
54 STV090x_RPTLEVEL_128 = 1,
55 STV090x_RPTLEVEL_64 = 2,
56 STV090x_RPTLEVEL_32 = 3,
57 STV090x_RPTLEVEL_16 = 4,
58 STV090x_RPTLEVEL_8 = 5,
59 STV090x_RPTLEVEL_4 = 6,
60 STV090x_RPTLEVEL_2 = 7,
61};
62
63struct stv090x_config {
64 enum stv090x_device device;
65 enum stv090x_mode demod_mode;
66 enum stv090x_clkmode clk_mode;
67
68 u32 xtal; /* default: 8000000 */
69 u8 address; /* default: 0x68 */
70
71 u32 ref_clk; /* default: 16000000 FIXME to tuner config */
72
73 u8 ts1_mode;
74 u8 ts2_mode;
75
76 enum stv090x_i2crpt repeater_level;
77
78 int (*tuner_init) (struct dvb_frontend *fe);
79 int (*tuner_set_mode) (struct dvb_frontend *fe, enum tuner_mode mode);
80 int (*tuner_set_frequency) (struct dvb_frontend *fe, u32 frequency);
81 int (*tuner_get_frequency) (struct dvb_frontend *fe, u32 *frequency);
82 int (*tuner_set_bandwidth) (struct dvb_frontend *fe, u32 bandwidth);
83 int (*tuner_get_bandwidth) (struct dvb_frontend *fe, u32 *bandwidth);
84 int (*tuner_set_bbgain) (struct dvb_frontend *fe, u32 gain);
85 int (*tuner_get_bbgain) (struct dvb_frontend *fe, u32 *gain);
86 int (*tuner_set_refclk) (struct dvb_frontend *fe, u32 refclk);
87 int (*tuner_get_status) (struct dvb_frontend *fe, u32 *status);
88};
89
90#if defined(CONFIG_DVB_STV090x) || (defined(CONFIG_DVB_STV090x_MODULE) && defined(MODULE))
91
92extern struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
93 struct i2c_adapter *i2c,
94 enum stv090x_demodulator demod);
95#else
96
97static inline struct dvb_frontend *stv090x_attach(const struct stv090x_config *config,
98 struct i2c_adapter *i2c,
99 enum stv090x_demodulator demod)
100{
101 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
102 return NULL;
103}
104#endif /* CONFIG_DVB_STV090x */
105
106#endif /* __STV090x_H */
diff --git a/drivers/media/dvb/frontends/stv090x_priv.h b/drivers/media/dvb/frontends/stv090x_priv.h
new file mode 100644
index 000000000000..5a4a01740d88
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv090x_priv.h
@@ -0,0 +1,269 @@
1/*
2 STV0900/0903 Multistandard Broadcast Frontend driver
3 Copyright (C) Manu Abraham <abraham.manu@gmail.com>
4
5 Copyright (C) ST Microelectronics
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#ifndef __STV090x_PRIV_H
23#define __STV090x_PRIV_H
24
25#include "dvb_frontend.h"
26
27#define FE_ERROR 0
28#define FE_NOTICE 1
29#define FE_INFO 2
30#define FE_DEBUG 3
31#define FE_DEBUGREG 4
32
33#define dprintk(__y, __z, format, arg...) do { \
34 if (__z) { \
35 if ((verbose > FE_ERROR) && (verbose > __y)) \
36 printk(KERN_ERR "%s: " format "\n", __func__ , ##arg); \
37 else if ((verbose > FE_NOTICE) && (verbose > __y)) \
38 printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg); \
39 else if ((verbose > FE_INFO) && (verbose > __y)) \
40 printk(KERN_INFO "%s: " format "\n", __func__ , ##arg); \
41 else if ((verbose > FE_DEBUG) && (verbose > __y)) \
42 printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg); \
43 } else { \
44 if (verbose > __y) \
45 printk(format, ##arg); \
46 } \
47} while (0)
48
49#define STV090x_READ_DEMOD(__state, __reg) (( \
50 (__state)->demod == STV090x_DEMODULATOR_1) ? \
51 stv090x_read_reg(__state, STV090x_P2_##__reg) : \
52 stv090x_read_reg(__state, STV090x_P1_##__reg))
53
54#define STV090x_WRITE_DEMOD(__state, __reg, __data) (( \
55 (__state)->demod == STV090x_DEMODULATOR_1) ? \
56 stv090x_write_reg(__state, STV090x_P2_##__reg, __data) :\
57 stv090x_write_reg(__state, STV090x_P1_##__reg, __data))
58
59#define STV090x_ADDR_OFFST(__state, __x) (( \
60 (__state->demod) == STV090x_DEMODULATOR_1) ? \
61 STV090x_P1_##__x : \
62 STV090x_P2_##__x)
63
64
65#define STV090x_SETFIELD(mask, bitf, val) (mask = (mask & (~(((1 << STV090x_WIDTH_##bitf) - 1) <<\
66 STV090x_OFFST_##bitf))) | \
67 (val << STV090x_OFFST_##bitf))
68
69#define STV090x_GETFIELD(val, bitf) ((val >> STV090x_OFFST_##bitf) & ((1 << STV090x_WIDTH_##bitf) - 1))
70
71
72#define STV090x_SETFIELD_Px(mask, bitf, val) (mask = (mask & (~(((1 << STV090x_WIDTH_Px_##bitf) - 1) <<\
73 STV090x_OFFST_Px_##bitf))) | \
74 (val << STV090x_OFFST_Px_##bitf))
75
76#define STV090x_GETFIELD_Px(val, bitf) ((val >> STV090x_OFFST_Px_##bitf) & ((1 << STV090x_WIDTH_Px_##bitf) - 1))
77
78#define MAKEWORD16(__a, __b) (((__a) << 8) | (__b))
79
80#define MSB(__x) ((__x >> 8) & 0xff)
81#define LSB(__x) (__x & 0xff)
82
83
84#define STV090x_IQPOWER_THRESHOLD 30
85#define STV090x_SEARCH_AGC2_TH_CUT20 700
86#define STV090x_SEARCH_AGC2_TH_CUT30 1200
87
88#define STV090x_SEARCH_AGC2_TH(__ver) \
89 ((__ver <= 0x20) ? \
90 STV090x_SEARCH_AGC2_TH_CUT20 : \
91 STV090x_SEARCH_AGC2_TH_CUT30)
92
93enum stv090x_signal_state {
94 STV090x_NOCARRIER,
95 STV090x_NODATA,
96 STV090x_DATAOK,
97 STV090x_RANGEOK,
98 STV090x_OUTOFRANGE
99};
100
101enum stv090x_fec {
102 STV090x_PR12 = 0,
103 STV090x_PR23,
104 STV090x_PR34,
105 STV090x_PR45,
106 STV090x_PR56,
107 STV090x_PR67,
108 STV090x_PR78,
109 STV090x_PR89,
110 STV090x_PR910,
111 STV090x_PRERR
112};
113
114enum stv090x_modulation {
115 STV090x_QPSK,
116 STV090x_8PSK,
117 STV090x_16APSK,
118 STV090x_32APSK,
119 STV090x_UNKNOWN
120};
121
122enum stv090x_frame {
123 STV090x_LONG_FRAME,
124 STV090x_SHORT_FRAME
125};
126
127enum stv090x_pilot {
128 STV090x_PILOTS_OFF,
129 STV090x_PILOTS_ON
130};
131
132enum stv090x_rolloff {
133 STV090x_RO_35,
134 STV090x_RO_25,
135 STV090x_RO_20
136};
137
138enum stv090x_inversion {
139 STV090x_IQ_AUTO,
140 STV090x_IQ_NORMAL,
141 STV090x_IQ_SWAP
142};
143
144enum stv090x_modcod {
145 STV090x_DUMMY_PLF = 0,
146 STV090x_QPSK_14,
147 STV090x_QPSK_13,
148 STV090x_QPSK_25,
149 STV090x_QPSK_12,
150 STV090x_QPSK_35,
151 STV090x_QPSK_23,
152 STV090x_QPSK_34,
153 STV090x_QPSK_45,
154 STV090x_QPSK_56,
155 STV090x_QPSK_89,
156 STV090x_QPSK_910,
157 STV090x_8PSK_35,
158 STV090x_8PSK_23,
159 STV090x_8PSK_34,
160 STV090x_8PSK_56,
161 STV090x_8PSK_89,
162 STV090x_8PSK_910,
163 STV090x_16APSK_23,
164 STV090x_16APSK_34,
165 STV090x_16APSK_45,
166 STV090x_16APSK_56,
167 STV090x_16APSK_89,
168 STV090x_16APSK_910,
169 STV090x_32APSK_34,
170 STV090x_32APSK_45,
171 STV090x_32APSK_56,
172 STV090x_32APSK_89,
173 STV090x_32APSK_910,
174 STV090x_MODCODE_UNKNOWN
175};
176
177enum stv090x_search {
178 STV090x_SEARCH_DSS = 0,
179 STV090x_SEARCH_DVBS1,
180 STV090x_SEARCH_DVBS2,
181 STV090x_SEARCH_AUTO
182};
183
184enum stv090x_algo {
185 STV090x_BLIND_SEARCH,
186 STV090x_COLD_SEARCH,
187 STV090x_WARM_SEARCH
188};
189
190enum stv090x_delsys {
191 STV090x_ERROR = 0,
192 STV090x_DVBS1 = 1,
193 STV090x_DVBS2,
194 STV090x_DSS
195};
196
197struct stv090x_long_frame_crloop {
198 enum stv090x_modcod modcod;
199
200 u8 crl_pilots_on_2;
201 u8 crl_pilots_off_2;
202 u8 crl_pilots_on_5;
203 u8 crl_pilots_off_5;
204 u8 crl_pilots_on_10;
205 u8 crl_pilots_off_10;
206 u8 crl_pilots_on_20;
207 u8 crl_pilots_off_20;
208 u8 crl_pilots_on_30;
209 u8 crl_pilots_off_30;
210};
211
212struct stv090x_short_frame_crloop {
213 enum stv090x_modulation modulation;
214
215 u8 crl_2; /* SR < 3M */
216 u8 crl_5; /* 3 < SR <= 7M */
217 u8 crl_10; /* 7 < SR <= 15M */
218 u8 crl_20; /* 10 < SR <= 25M */
219 u8 crl_30; /* 10 < SR <= 45M */
220};
221
222struct stv090x_reg {
223 u16 addr;
224 u8 data;
225};
226
227struct stv090x_tab {
228 s32 real;
229 s32 read;
230};
231
232struct stv090x_state {
233 enum stv090x_device device;
234 enum stv090x_demodulator demod;
235 enum stv090x_mode demod_mode;
236 u32 dev_ver;
237
238 struct i2c_adapter *i2c;
239 const struct stv090x_config *config;
240 struct dvb_frontend frontend;
241
242 u32 *verbose; /* Cached module verbosity */
243
244 enum stv090x_delsys delsys;
245 enum stv090x_fec fec;
246 enum stv090x_modulation modulation;
247 enum stv090x_modcod modcod;
248 enum stv090x_search search_mode;
249 enum stv090x_frame frame_len;
250 enum stv090x_pilot pilots;
251 enum stv090x_rolloff rolloff;
252 enum stv090x_inversion inversion;
253 enum stv090x_algo algo;
254
255 u32 frequency;
256 u32 srate;
257
258 s32 mclk; /* Masterclock Divider factor */
259 s32 tuner_bw;
260
261 u32 tuner_refclk;
262
263 s32 search_range;
264
265 s32 DemodTimeout;
266 s32 FecTimeout;
267};
268
269#endif /* __STV090x_PRIV_H */
diff --git a/drivers/media/dvb/frontends/stv090x_reg.h b/drivers/media/dvb/frontends/stv090x_reg.h
new file mode 100644
index 000000000000..57b6abbbd32d
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv090x_reg.h
@@ -0,0 +1,2373 @@
1/*
2 STV0900/0903 Multistandard Broadcast Frontend driver
3 Copyright (C) Manu Abraham <abraham.manu@gmail.com>
4
5 Copyright (C) ST Microelectronics
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/
21
22#ifndef __STV090x_REG_H
23#define __STV090x_REG_H
24
25#define STV090x_MID 0xf100
26#define STV090x_OFFST_MCHIP_IDENT_FIELD 4
27#define STV090x_WIDTH_MCHIP_IDENT_FIELD 4
28#define STV090x_OFFST_MRELEASE_FIELD 0
29#define STV090x_WIDTH_MRELEASE_FIELD 4
30
31#define STV090x_DACR1 0xf113
32#define STV090x_OFFST_DACR1_MODE_FIELD 5
33#define STV090x_WIDTH_DACR1_MODE_FIELD 3
34#define STV090x_OFFST_DACR1_VALUE_FIELD 0
35#define STV090x_WIDTH_DACR1_VALUE_FIELD 4
36
37#define STV090x_DACR2 0xf114
38#define STV090x_OFFST_DACR2_VALUE_FIELD 0
39#define STV090x_WIDTH_DACR2_VALUE_FIELD 8
40
41#define STV090x_OUTCFG 0xf11c
42#define STV090x_OFFST_OUTSERRS1_HZ_FIELD 6
43#define STV090x_WIDTH_OUTSERRS1_HZ_FIELD 1
44#define STV090x_OFFST_OUTSERRS2_HZ_FIELD 5
45#define STV090x_WIDTH_OUTSERRS2_HZ_FIELD 1
46#define STV090x_OFFST_OUTSERRS3_HZ_FIELD 4
47#define STV090x_WIDTH_OUTPARRS3_HZ_FIELD 1
48#define STV090x_OFFST_OUTPARRS3_HZ_FIELD 3
49#define STV090x_WIDTH_OUTPARRS3_HZ_FIELD 1
50
51#define STV090x_MODECFG 0xf11d
52
53#define STV090x_IRQSTATUS3 0xf120
54#define STV090x_OFFST_SPLL_LOCK_FIELD 5
55#define STV090x_WIDTH_SPLL_LOCK_FIELD 1
56#define STV090x_OFFST_SSTREAM_LCK_3_FIELD 4
57#define STV090x_WIDTH_SSTREAM_LCK_3_FIELD 1
58#define STV090x_OFFST_SSTREAM_LCK_2_FIELD 3
59#define STV090x_WIDTH_SSTREAM_LCK_2_FIELD 1
60#define STV090x_OFFST_SSTREAM_LCK_1_FIELD 2
61#define STV090x_WIDTH_SSTREAM_LCK_1_FIELD 1
62#define STV090x_OFFST_SDVBS1_PRF_2_FIELD 1
63#define STV090x_WIDTH_SDVBS1_PRF_2_FIELD 1
64#define STV090x_OFFST_SDVBS1_PRF_1_FIELD 0
65#define STV090x_WIDTH_SDVBS1_PRF_1_FIELD 1
66
67#define STV090x_IRQSTATUS2 0xf121
68#define STV090x_OFFST_SSPY_ENDSIM_3_FIELD 7
69#define STV090x_WIDTH_SSPY_ENDSIM_3_FIELD 1
70#define STV090x_OFFST_SSPY_ENDSIM_2_FIELD 6
71#define STV090x_WIDTH_SSPY_ENDSIM_2_FIELD 1
72#define STV090x_OFFST_SSPY_ENDSIM_1_FIELD 5
73#define STV090x_WIDTH_SSPY_ENDSIM_1_FIELD 1
74#define STV090x_OFFST_SPKTDEL_ERROR_2_FIELD 4
75#define STV090x_WIDTH_SPKTDEL_ERROR_2_FIELD 1
76#define STV090x_OFFST_SPKTDEL_LOCKB_2_FIELD 3
77#define STV090x_WIDTH_SPKTDEL_LOCKB_2_FIELD 1
78#define STV090x_OFFST_SPKTDEL_LOCK_2_FIELD 2
79#define STV090x_WIDTH_SPKTDEL_LOCK_2_FIELD 1
80#define STV090x_OFFST_SPKTDEL_ERROR_1_FIELD 1
81#define STV090x_WIDTH_SPKTDEL_ERROR_1_FIELD 1
82#define STV090x_OFFST_SPKTDEL_LOCKB_1_FIELD 0
83#define STV090x_WIDTH_SPKTDEL_LOCKB_1_FIELD 1
84
85#define STV090x_IRQSTATUS1 0xf122
86#define STV090x_OFFST_SPKTDEL_LOCK_1_FIELD 7
87#define STV090x_WIDTH_SPKTDEL_LOCK_1_FIELD 1
88#define STV090x_OFFST_SDEMOD_LOCKB_2_FIELD 2
89#define STV090x_WIDTH_SDEMOD_LOCKB_2_FIELD 1
90#define STV090x_OFFST_SDEMOD_LOCK_2_FIELD 1
91#define STV090x_WIDTH_SDEMOD_LOCK_2_FIELD 1
92#define STV090x_OFFST_SDEMOD_IRQ_2_FIELD 0
93#define STV090x_WIDTH_SDEMOD_IRQ_2_FIELD 1
94
95#define STV090x_IRQSTATUS0 0xf123
96#define STV090x_OFFST_SDEMOD_LOCKB_1_FIELD 7
97#define STV090x_WIDTH_SDEMOD_LOCKB_1_FIELD 1
98#define STV090x_OFFST_SDEMOD_LOCK_1_FIELD 6
99#define STV090x_WIDTH_SDEMOD_LOCK_1_FIELD 1
100#define STV090x_OFFST_SDEMOD_IRQ_1_FIELD 5
101#define STV090x_WIDTH_SDEMOD_IRQ_1_FIELD 1
102#define STV090x_OFFST_SBCH_ERRFLAG_FIELD 4
103#define STV090x_WIDTH_SBCH_ERRFLAG_FIELD 1
104#define STV090x_OFFST_SDISEQC2RX_IRQ_FIELD 3
105#define STV090x_WIDTH_SDISEQC2RX_IRQ_FIELD 1
106#define STV090x_OFFST_SDISEQC2TX_IRQ_FIELD 2
107#define STV090x_WIDTH_SDISEQC2TX_IRQ_FIELD 1
108#define STV090x_OFFST_SDISEQC1RX_IRQ_FIELD 1
109#define STV090x_WIDTH_SDISEQC1RX_IRQ_FIELD 1
110#define STV090x_OFFST_SDISEQC1TX_IRQ_FIELD 0
111#define STV090x_WIDTH_SDISEQC1TX_IRQ_FIELD 1
112
113#define STV090x_IRQMASK3 0xf124
114#define STV090x_OFFST_MPLL_LOCK_FIELD 5
115#define STV090x_WIDTH_MPLL_LOCK_FIELD 1
116#define STV090x_OFFST_MSTREAM_LCK_3_FIELD 2
117#define STV090x_WIDTH_MSTREAM_LCK_3_FIELD 3
118#define STV090x_OFFST_MSTREAM_LCK_2_FIELD 2
119#define STV090x_WIDTH_MSTREAM_LCK_2_FIELD 3
120#define STV090x_OFFST_MSTREAM_LCK_1_FIELD 2
121#define STV090x_WIDTH_MSTREAM_LCK_1_FIELD 3
122#define STV090x_OFFST_MDVBS1_PRF_2_FIELD 1
123#define STV090x_WIDTH_MDVBS1_PRF_2_FIELD 1
124#define STV090x_OFFST_MDVBS1_PRF_1_FIELD 0
125#define STV090x_WIDTH_MDVBS1_PRF_1_FIELD 1
126
127#define STV090x_IRQMASK2 0xf125
128#define STV090x_OFFST_MSPY_ENDSIM_3_FIELD 5
129#define STV090x_WIDTH_MSPY_ENDSIM_3_FIELD 3
130#define STV090x_OFFST_MSPY_ENDSIM_2_FIELD 5
131#define STV090x_WIDTH_MSPY_ENDSIM_2_FIELD 3
132#define STV090x_OFFST_MSPY_ENDSIM_1_FIELD 5
133#define STV090x_WIDTH_MSPY_ENDSIM_1_FIELD 3
134#define STV090x_OFFST_MPKTDEL_ERROR_2_FIELD 4
135#define STV090x_WIDTH_MPKTDEL_ERROR_2_FIELD 1
136#define STV090x_OFFST_MPKTDEL_LOCKB_2_FIELD 3
137#define STV090x_WIDTH_MPKTDEL_LOCKB_2_FIELD 1
138#define STV090x_OFFST_MPKTDEL_LOCK_2_FIELD 2
139#define STV090x_WIDTH_MPKTDEL_LOCK_2_FIELD 1
140#define STV090x_OFFST_MPKTDEL_ERROR_1_FIELD 1
141#define STV090x_WIDTH_MPKTDEL_ERROR_1_FIELD 1
142#define STV090x_OFFST_MPKTDEL_LOCKB_1_FIELD 0
143#define STV090x_WIDTH_MPKTDEL_LOCKB_1_FIELD 1
144
145#define STV090x_IRQMASK1 0xf126
146#define STV090x_OFFST_MPKTDEL_LOCK_1_FIELD 7
147#define STV090x_WIDTH_MPKTDEL_LOCK_1_FIELD 1
148#define STV090x_OFFST_MEXTPINB2_FIELD 6
149#define STV090x_WIDTH_MEXTPINB2_FIELD 1
150#define STV090x_OFFST_MEXTPIN2_FIELD 5
151#define STV090x_WIDTH_MEXTPIN2_FIELD 1
152#define STV090x_OFFST_MEXTPINB1_FIELD 4
153#define STV090x_WIDTH_MEXTPINB1_FIELD 1
154#define STV090x_OFFST_MEXTPIN1_FIELD 3
155#define STV090x_WIDTH_MEXTPIN1_FIELD 1
156#define STV090x_OFFST_MDEMOD_LOCKB_2_FIELD 2
157#define STV090x_WIDTH_MDEMOD_LOCKB_2_FIELD 1
158#define STV090x_OFFST_MDEMOD_LOCK_2_FIELD 1
159#define STV090x_WIDTH_MDEMOD_LOCK_2_FIELD 1
160#define STV090x_OFFST_MDEMOD_IRQ_2_FIELD 0
161#define STV090x_WIDTH_MDEMOD_IRQ_2_FIELD 1
162
163#define STV090x_IRQMASK0 0xf127
164#define STV090x_OFFST_MDEMOD_LOCKB_1_FIELD 7
165#define STV090x_WIDTH_MDEMOD_LOCKB_1_FIELD 1
166#define STV090x_OFFST_MDEMOD_LOCK_1_FIELD 6
167#define STV090x_WIDTH_MDEMOD_LOCK_1_FIELD 1
168#define STV090x_OFFST_MDEMOD_IRQ_1_FIELD 5
169#define STV090x_WIDTH_MDEMOD_IRQ_1_FIELD 1
170#define STV090x_OFFST_MBCH_ERRFLAG_FIELD 4
171#define STV090x_WIDTH_MBCH_ERRFLAG_FIELD 1
172#define STV090x_OFFST_MDISEQC2RX_IRQ_FIELD 3
173#define STV090x_WIDTH_MDISEQC2RX_IRQ_FIELD 1
174#define STV090x_OFFST_MDISEQC2TX_IRQ_FIELD 2
175#define STV090x_WIDTH_MDISEQC2TX_IRQ_FIELD 1
176#define STV090x_OFFST_MDISEQC1RX_IRQ_FIELD 1
177#define STV090x_WIDTH_MDISEQC1RX_IRQ_FIELD 1
178#define STV090x_OFFST_MDISEQC1TX_IRQ_FIELD 0
179#define STV090x_WIDTH_MDISEQC1TX_IRQ_FIELD 1
180
181#define STV090x_I2CCFG 0xf129
182#define STV090x_OFFST_12C_FASTMODE_FIELD 3
183#define STV090x_WIDTH_12C_FASTMODE_FIELD 1
184#define STV090x_OFFST_12CADDR_INC_FIELD 0
185#define STV090x_WIDTH_12CADDR_INC_FIELD 2
186
187#define STV090x_Px_I2CRPT(__x) (0xf12a + (__x - 1) * 0x1)
188#define STV090x_P1_I2CRPT STV090x_Px_I2CRPT(1)
189#define STV090x_P2_I2CRPT STV090x_Px_I2CRPT(2)
190#define STV090x_OFFST_Px_I2CT_ON_FIELD 7
191#define STV090x_WIDTH_Px_I2CT_ON_FIELD 1
192#define STV090x_OFFST_Px_ENARPT_LEVEL_FIELD 4
193#define STV090x_WIDTH_Px_ENARPT_LEVEL_FIELD 3
194#define STV090x_OFFST_Px_SCLT_DELAY_FIELD 3
195#define STV090x_WIDTH_Px_SCLT_DELAY_FIELD 1
196#define STV090x_OFFST_Px_STOP_ENABLE_FIELD 2
197#define STV090x_WIDTH_Px_STOP_ENABLE_FIELD 1
198#define STV090x_OFFST_Px_STOP_SDAT2SDA_FIELD 1
199#define STV090x_WIDTH_Px_STOP_SDAT2SDA_FIELD 1
200
201#define STV090x_CLKI2CFG 0xf140
202#define STV090x_OFFST_CLKI2_OPD_FIELD 7
203#define STV090x_WIDTH_CLKI2_OPD_FIELD 1
204#define STV090x_OFFST_CLKI2_CONFIG_FIELD 1
205#define STV090x_WIDTH_CLKI2_CONFIG_FIELD 6
206#define STV090x_OFFST_CLKI2_XOR_FIELD 0
207#define STV090x_WIDTH_CLKI2_XOR_FIELD 1
208
209#define STV090x_GPIOxCFG(__x) (0xf141 + (__x - 1))
210#define STV090x_GPIO1CFG STV090x_GPIOxCFG(1)
211#define STV090x_GPIO2CFG STV090x_GPIOxCFG(2)
212#define STV090x_GPIO3CFG STV090x_GPIOxCFG(3)
213#define STV090x_GPIO4CFG STV090x_GPIOxCFG(4)
214#define STV090x_GPIO5CFG STV090x_GPIOxCFG(5)
215#define STV090x_GPIO6CFG STV090x_GPIOxCFG(6)
216#define STV090x_GPIO7CFG STV090x_GPIOxCFG(7)
217#define STV090x_GPIO8CFG STV090x_GPIOxCFG(8)
218#define STV090x_GPIO9CFG STV090x_GPIOxCFG(9)
219#define STV090x_GPIO10CFG STV090x_GPIOxCFG(10)
220#define STV090x_GPIO11CFG STV090x_GPIOxCFG(11)
221#define STV090x_GPIO12CFG STV090x_GPIOxCFG(12)
222#define STV090x_GPIO13CFG STV090x_GPIOxCFG(13)
223#define STV090x_OFFST_GPIOx_OPD_FIELD 7
224#define STV090x_WIDTH_GPIOx_OPD_FIELD 1
225#define STV090x_OFFST_GPIOx_CONFIG_FIELD 1
226#define STV090x_WIDTH_GPIOx_CONFIG_FIELD 6
227#define STV090x_OFFST_GPIOx_XOR_FIELD 0
228#define STV090x_WIDTH_GPIOx_XOR_FIELD 1
229
230#define STV090x_CSxCFG(__x) (0xf14e + __x * 0x1)
231#define STV090x_CS0CFG STV090x_CSxCFG(0)
232#define STV090x_CS1CFG STV090x_CSxCFG(1)
233#define STV090x_OFFST_CSX_OPD_FIELD 7
234#define STV090x_WIDTH_CSX_OPD_FIELD 1
235#define STV090x_OFFST_CSX_CONFIG_FIELD 1
236#define STV090x_WIDTH_CSX_CONFIG_FIELD 6
237#define STV090x_OFFST_CSX_XOR_FIELD 0
238#define STV090x_WIDTH_CSX_XOR_FIELD 1
239
240
241#define STV090x_STDBYCFG 0xf150
242#define STV090x_OFFST_STDBY_OPD_FIELD 7
243#define STV090x_WIDTH_STDBY_OPD_FIELD 1
244#define STV090x_OFFST_STDBY_CONFIG_FIELD 1
245#define STV090x_WIDTH_STDBY_CONFIG_FIELD 6
246#define STV090x_OFFST_STDBY_XOR_FIELD 0
247#define STV090x_WIDTH_STDBY_XOR_FIELD 1
248
249#define STV090x_DIRCLKCFG 0xf151
250#define STV090x_OFFST_DIRCLK_OPD_FIELD 7
251#define STV090x_WIDTH_DIRCLK_OPD_FIELD 1
252#define STV090x_OFFST_DIRCLK_CONFIG_FIELD 1
253#define STV090x_WIDTH_DIRCLK_CONFIG_FIELD 6
254#define STV090x_OFFST_DIRCLK_XOR_FIELD 0
255#define STV090x_WIDTH_DIRCLK_XOR_FIELD 1
256
257
258#define STV090x_AGCRFxCFG(__x) (0xf152 + (__x - 1) * 0x4)
259#define STV090x_AGCRF1CFG STV090x_AGCRFxCFG(1)
260#define STV090x_AGCRF2CFG STV090x_AGCRFxCFG(2)
261#define STV090x_OFFST_AGCRFx_OPD_FIELD 7
262#define STV090x_WIDTH_AGCRFx_OPD_FIELD 1
263#define STV090x_OFFST_AGCRFx_CONFIG_FIELD 1
264#define STV090x_WIDTH_AGCRFx_CONFIG_FIELD 6
265#define STV090x_OFFST_AGCRFx_XOR_FIELD 0
266#define STV090x_WIDTH_AGCRFx_XOR_FIELD 1
267
268#define STV090x_SDATxCFG(__x) (0xf153 + (__x - 1) * 0x4)
269#define STV090x_SDAT1CFG STV090x_SDATxCFG(1)
270#define STV090x_SDAT2CFG STV090x_SDATxCFG(2)
271#define STV090x_OFFST_SDATx_OPD_FIELD 7
272#define STV090x_WIDTH_SDATx_OPD_FIELD 1
273#define STV090x_OFFST_SDATx_CONFIG_FIELD 1
274#define STV090x_WIDTH_SDATx_CONFIG_FIELD 6
275#define STV090x_OFFST_SDATx_XOR_FIELD 0
276#define STV090x_WIDTH_SDATx_XOR_FIELD 1
277
278#define STV090x_SCLTxCFG(__x) (0xf154 + (__x - 1) * 0x4)
279#define STV090x_SCLT1CFG STV090x_SCLTxCFG(1)
280#define STV090x_SCLT2CFG STV090x_SCLTxCFG(2)
281#define STV090x_OFFST_SCLTx_OPD_FIELD 7
282#define STV090x_WIDTH_SCLTx_OPD_FIELD 1
283#define STV090x_OFFST_SCLTx_CONFIG_FIELD 1
284#define STV090x_WIDTH_SCLTx_CONFIG_FIELD 6
285#define STV090x_OFFST_SCLTx_XOR_FIELD 0
286#define STV090x_WIDTH_SCLTx_XOR_FIELD 1
287
288#define STV090x_DISEQCOxCFG(__x) (0xf155 + (__x - 1) * 0x4)
289#define STV090x_DISEQCO1CFG STV090x_DISEQCOxCFG(1)
290#define STV090x_DISEQCO2CFG STV090x_DISEQCOxCFG(2)
291#define STV090x_OFFST_DISEQCOx_OPD_FIELD 7
292#define STV090x_WIDTH_DISEQCOx_OPD_FIELD 1
293#define STV090x_OFFST_DISEQCOx_CONFIG_FIELD 1
294#define STV090x_WIDTH_DISEQCOx_CONFIG_FIELD 6
295#define STV090x_OFFST_DISEQCOx_XOR_FIELD 0
296#define STV090x_WIDTH_DISEQCOx_XOR_FIELD 1
297
298#define STV090x_CLKOUT27CFG 0xf15a
299#define STV090x_OFFST_CLKOUT27_OPD_FIELD 7
300#define STV090x_WIDTH_CLKOUT27_OPD_FIELD 1
301#define STV090x_OFFST_CLKOUT27_CONFIG_FIELD 1
302#define STV090x_WIDTH_CLKOUT27_CONFIG_FIELD 6
303#define STV090x_OFFST_CLKOUT27_XOR_FIELD 0
304#define STV090x_WIDTH_CLKOUT27_XOR_FIELD 1
305
306#define STV090x_ERRORxCFG(__x) (0xf15b + (__x - 1) * 0x5)
307#define STV090x_ERROR1CFG STV090x_ERRORxCFG(1)
308#define STV090x_ERROR2CFG STV090x_ERRORxCFG(2)
309#define STV090x_ERROR3CFG STV090x_ERRORxCFG(3)
310#define STV090x_OFFST_ERRORx_OPD_FIELD 7
311#define STV090x_WIDTH_ERRORx_OPD_FIELD 1
312#define STV090x_OFFST_ERRORx_CONFIG_FIELD 1
313#define STV090x_WIDTH_ERRORx_CONFIG_FIELD 6
314#define STV090x_OFFST_ERRORx_XOR_FIELD 0
315#define STV090x_WIDTH_ERRORx_XOR_FIELD 1
316
317#define STV090x_DPNxCFG(__x) (0xf15c + (__x - 1) * 0x5)
318#define STV090x_DPN1CFG STV090x_DPNxCFG(1)
319#define STV090x_DPN2CFG STV090x_DPNxCFG(2)
320#define STV090x_DPN3CFG STV090x_DPNxCFG(3)
321#define STV090x_OFFST_DPNx_OPD_FIELD 7
322#define STV090x_WIDTH_DPNx_OPD_FIELD 1
323#define STV090x_OFFST_DPNx_CONFIG_FIELD 1
324#define STV090x_WIDTH_DPNx_CONFIG_FIELD 6
325#define STV090x_OFFST_DPNx_XOR_FIELD 0
326#define STV090x_WIDTH_DPNx_XOR_FIELD 1
327
328#define STV090x_STROUTxCFG(__x) (0xf15d + (__x - 1) * 0x5)
329#define STV090x_STROUT1CFG STV090x_STROUTxCFG(1)
330#define STV090x_STROUT2CFG STV090x_STROUTxCFG(2)
331#define STV090x_STROUT3CFG STV090x_STROUTxCFG(3)
332#define STV090x_OFFST_STROUTx_OPD_FIELD 7
333#define STV090x_WIDTH_STROUTx_OPD_FIELD 1
334#define STV090x_OFFST_STROUTx_CONFIG_FIELD 1
335#define STV090x_WIDTH_STROUTx_CONFIG_FIELD 6
336#define STV090x_OFFST_STROUTx_XOR_FIELD 0
337#define STV090x_WIDTH_STROUTx_XOR_FIELD 1
338
339#define STV090x_CLKOUTxCFG(__x) (0xf15e + (__x - 1) * 0x5)
340#define STV090x_CLKOUT1CFG STV090x_CLKOUTxCFG(1)
341#define STV090x_CLKOUT2CFG STV090x_CLKOUTxCFG(2)
342#define STV090x_CLKOUT3CFG STV090x_CLKOUTxCFG(3)
343#define STV090x_OFFST_CLKOUTx_OPD_FIELD 7
344#define STV090x_WIDTH_CLKOUTx_OPD_FIELD 1
345#define STV090x_OFFST_CLKOUTx_CONFIG_FIELD 1
346#define STV090x_WIDTH_CLKOUTx_CONFIG_FIELD 6
347#define STV090x_OFFST_CLKOUTx_XOR_FIELD 0
348#define STV090x_WIDTH_CLKOUTx_XOR_FIELD 1
349
350#define STV090x_DATAxCFG(__x) (0xf15f + (__x - 71) * 0x5)
351#define STV090x_DATA71CFG STV090x_DATAxCFG(71)
352#define STV090x_DATA72CFG STV090x_DATAxCFG(72)
353#define STV090x_DATA73CFG STV090x_DATAxCFG(73)
354#define STV090x_OFFST_DATAx_OPD_FIELD 7
355#define STV090x_WIDTH_DATAx_OPD_FIELD 1
356#define STV090x_OFFST_DATAx_CONFIG_FIELD 1
357#define STV090x_WIDTH_DATAx_CONFIG_FIELD 6
358#define STV090x_OFFST_DATAx_XOR_FIELD 0
359#define STV090x_WIDTH_DATAx_XOR_FIELD 1
360
361#define STV090x_NCOARSE 0xf1b3
362#define STV090x_OFFST_M_DIV_FIELD 0
363#define STV090x_WIDTH_M_DIV_FIELD 8
364
365#define STV090x_SYNTCTRL 0xf1b6
366#define STV090x_OFFST_STANDBY_FIELD 7
367#define STV090x_WIDTH_STANDBY_FIELD 1
368#define STV090x_OFFST_BYPASSPLLCORE_FIELD 6
369#define STV090x_WIDTH_BYPASSPLLCORE_FIELD 1
370#define STV090x_OFFST_SELX1RATIO_FIELD 5
371#define STV090x_WIDTH_SELX1RATIO_FIELD 1
372#define STV090x_OFFST_STOP_PLL_FIELD 3
373#define STV090x_WIDTH_SELX1RATIO_FIELD 1
374#define STV090x_OFFST_BYPASSPLLFSK_FIELD 2
375#define STV090x_WIDTH_BYPASSPLLFSK_FIELD 1
376#define STV090x_OFFST_SELOSCI_FIELD 1
377#define STV090x_WIDTH_SELOSCI_FIELD 1
378#define STV090x_OFFST_BYPASSPLLADC_FIELD 0
379#define STV090x_WIDTH_BYPASSPLLADC_FIELD 1
380
381#define STV090x_FILTCTRL 0xf1b7
382#define STV090x_OFFST_INV_CLK135_FIELD 7
383#define STV090x_WIDTH_INV_CLK135_FIELD 1
384#define STV090x_OFFST_SEL_FSKCKDIV_FIELD 2
385#define STV090x_WIDTH_SEL_FSKCKDIV_FIELD 1
386#define STV090x_OFFST_INV_CLKFSK_FIELD 1
387#define STV090x_WIDTH_INV_CLKFSK_FIELD 1
388#define STV090x_OFFST_BYPASS_APPLI_FIELD 0
389#define STV090x_WIDTH_BYPASS_APPLI_FIELD 1
390
391#define STV090x_PLLSTAT 0xf1b8
392#define STV090x_OFFST_PLLLOCK_FIELD 0
393#define STV090x_WIDTH_PLLLOCK_FIELD 1
394
395#define STV090x_STOPCLK1 0xf1c2
396#define STV090x_OFFST_STOP_CLKPKDT2_FIELD 6
397#define STV090x_WIDTH_STOP_CLKPKDT2_FIELD 1
398#define STV090x_OFFST_STOP_CLKPKDT1_FIELD 5
399#define STV090x_WIDTH_STOP_CLKPKDT1_FIELD 1
400#define STV090x_OFFST_STOP_CLKFEC_FIELD 4
401#define STV090x_WIDTH_STOP_CLKFEC_FIELD 1
402#define STV090x_OFFST_STOP_CLKADCI2_FIELD 3
403#define STV090x_WIDTH_STOP_CLKADCI2_FIELD 1
404#define STV090x_OFFST_INV_CLKADCI2_FIELD 2
405#define STV090x_WIDTH_INV_CLKADCI2_FIELD 1
406#define STV090x_OFFST_STOP_CLKADCI1_FIELD 1
407#define STV090x_WIDTH_STOP_CLKADCI1_FIELD 1
408#define STV090x_OFFST_INV_CLKADCI1_FIELD 0
409#define STV090x_WIDTH_INV_CLKADCI1_FIELD 1
410
411#define STV090x_STOPCLK2 0xf1c3
412#define STV090x_OFFST_STOP_CLKSAMP2_FIELD 4
413#define STV090x_WIDTH_STOP_CLKSAMP2_FIELD 1
414#define STV090x_OFFST_STOP_CLKSAMP1_FIELD 3
415#define STV090x_WIDTH_STOP_CLKSAMP1_FIELD 1
416#define STV090x_OFFST_STOP_CLKVIT2_FIELD 2
417#define STV090x_WIDTH_STOP_CLKVIT2_FIELD 1
418#define STV090x_OFFST_STOP_CLKVIT1_FIELD 1
419#define STV090x_WIDTH_STOP_CLKVIT1_FIELD 1
420#define STV090x_OFFST_STOP_CLKTS_FIELD 0
421#define STV090x_WIDTH_STOP_CLKTS_FIELD 1
422
423#define STV090x_TSTTNR0 0xf1df
424#define STV090x_OFFST_SEL_FSK_FIELD 7
425#define STV090x_WIDTH_SEL_FSK_FIELD 1
426#define STV090x_OFFST_FSK_PON_FIELD 2
427#define STV090x_WIDTH_FSK_PON_FIELD 1
428
429#define STV090x_TSTTNR1 0xf1e0
430#define STV090x_OFFST_ADC1_PON_FIELD 1
431#define STV090x_WIDTH_ADC1_PON_FIELD 1
432#define STV090x_OFFST_ADC1_INMODE_FIELD 0
433#define STV090x_WIDTH_ADC1_INMODE_FIELD 1
434
435#define STV090x_TSTTNR2 0xf1e1
436#define STV090x_OFFST_DISEQC1_PON_FIELD 5
437#define STV090x_WIDTH_DISEQC1_PON_FIELD 1
438
439#define STV090x_TSTTNR3 0xf1e2
440#define STV090x_OFFST_ADC2_PON_FIELD 1
441#define STV090x_WIDTH_ADC2_PON_FIELD 1
442#define STV090x_OFFST_ADC2_INMODE_FIELD 0
443#define STV090x_WIDTH_ADC2_INMODE_FIELD 1
444
445#define STV090x_TSTTNR4 0xf1e3
446#define STV090x_OFFST_DISEQC2_PON_FIELD 5
447#define STV090x_WIDTH_DISEQC2_PON_FIELD 1
448
449#define STV090x_FSKTFC2 0xf170
450#define STV090x_OFFST_FSKT_KMOD_FIELD 2
451#define STV090x_WIDTH_FSKT_KMOD_FIELD 6
452#define STV090x_OFFST_FSKT_CAR_FIELD 0
453#define STV090x_WIDTH_FSKT_CAR_FIELD 2
454
455#define STV090x_FSKTFC1 0xf171
456#define STV090x_OFFST_FSKTC1_CAR_FIELD 0
457#define STV090x_WIDTH_FSKTC1_CAR_FIELD 8
458
459#define STV090x_FSKTFC0 0xf172
460#define STV090x_OFFST_FSKTC0_CAR_FIELD 0
461#define STV090x_WIDTH_FSKTC0_CAR_FIELD 8
462
463#define STV090x_FSKTDELTAF1 0xf173
464#define STV090x_OFFST_FSKTF1_DELTAF_FIELD 0
465#define STV090x_WIDTH_FSKTF1_DELTAF_FIELD 4
466
467#define STV090x_FSKTDELTAF0 0xf174
468#define STV090x_OFFST_FSKTF0_DELTAF_FIELD 0
469#define STV090x_WIDTH_FSKTF0_DELTAF_FIELD 8
470
471#define STV090x_FSKTCTRL 0xf175
472#define STV090x_OFFST_FSKT_EN_SGN_FIELD 6
473#define STV090x_WIDTH_FSKT_EN_SGN_FIELD 1
474#define STV090x_OFFST_FSKT_MOD_SGN_FIELD 5
475#define STV090x_WIDTH_FSKT_MOD_SGN_FIELD 1
476#define STV090x_OFFST_FSKT_MOD_EN_FIELD 2
477#define STV090x_WIDTH_FSKT_MOD_EN_FIELD 3
478#define STV090x_OFFST_FSKT_DACMODE_FIELD 0
479#define STV090x_WIDTH_FSKT_DACMODE_FIELD 2
480
481#define STV090x_FSKRFC2 0xf176
482#define STV090x_OFFST_FSKRC2_DETSGN_FIELD 6
483#define STV090x_WIDTH_FSKRC2_DETSGN_FIELD 1
484#define STV090x_OFFST_FSKRC2_OUTSGN_FIELD 5
485#define STV090x_WIDTH_FSKRC2_OUTSGN_FIELD 1
486#define STV090x_OFFST_FSKRC2_KAGC_FIELD 2
487#define STV090x_WIDTH_FSKRC2_KAGC_FIELD 3
488#define STV090x_OFFST_FSKRC2_CAR_FIELD 0
489#define STV090x_WIDTH_FSKRC2_CAR_FIELD 2
490
491#define STV090x_FSKRFC1 0xf177
492#define STV090x_OFFST_FSKRC1_CAR_FIELD 0
493#define STV090x_WIDTH_FSKRC1_CAR_FIELD 8
494
495#define STV090x_FSKRFC0 0xf178
496#define STV090x_OFFST_FSKRC0_CAR_FIELD 0
497#define STV090x_WIDTH_FSKRC0_CAR_FIELD 8
498
499#define STV090x_FSKRK1 0xf179
500#define STV090x_OFFST_FSKR_K1_EXP_FIELD 5
501#define STV090x_WIDTH_FSKR_K1_EXP_FIELD 3
502#define STV090x_OFFST_FSKR_K1_MANT_FIELD 0
503#define STV090x_WIDTH_FSKR_K1_MANT_FIELD 5
504
505#define STV090x_FSKRK2 0xf17a
506#define STV090x_OFFST_FSKR_K2_EXP_FIELD 5
507#define STV090x_WIDTH_FSKR_K2_EXP_FIELD 3
508#define STV090x_OFFST_FSKR_K2_MANT_FIELD 0
509#define STV090x_WIDTH_FSKR_K2_MANT_FIELD 5
510
511#define STV090x_FSKRAGCR 0xf17b
512#define STV090x_OFFST_FSKR_OUTCTL_FIELD 6
513#define STV090x_WIDTH_FSKR_OUTCTL_FIELD 2
514#define STV090x_OFFST_FSKR_AGC_REF_FIELD 0
515#define STV090x_WIDTH_FSKR_AGC_REF_FIELD 6
516
517#define STV090x_FSKRAGC 0xf17c
518#define STV090x_OFFST_FSKR_AGC_ACCU_FIELD 0
519#define STV090x_WIDTH_FSKR_AGC_ACCU_FIELD 8
520
521#define STV090x_FSKRALPHA 0xf17d
522#define STV090x_OFFST_FSKR_ALPHA_EXP_FIELD 2
523#define STV090x_WIDTH_FSKR_ALPHA_EXP_FIELD 3
524#define STV090x_OFFST_FSKR_ALPHA_M_FIELD 0
525#define STV090x_WIDTH_FSKR_ALPHA_M_FIELD 2
526
527#define STV090x_FSKRPLTH1 0xf17e
528#define STV090x_OFFST_FSKR_BETA_FIELD 4
529#define STV090x_WIDTH_FSKR_BETA_FIELD 4
530#define STV090x_OFFST_FSKR_PLL_TRESH1_FIELD 0
531#define STV090x_WIDTH_FSKR_PLL_TRESH1_FIELD 4
532
533#define STV090x_FSKRPLTH0 0xf17f
534#define STV090x_OFFST_FSKR_PLL_TRESH0_FIELD 0
535#define STV090x_WIDTH_FSKR_PLL_TRESH0_FIELD 8
536
537#define STV090x_FSKRDF1 0xf180
538#define STV090x_OFFST_FSKR_DELTAF1_FIELD 0
539#define STV090x_WIDTH_FSKR_DELTAF1_FIELD 5
540
541#define STV090x_FSKRDF0 0xf181
542#define STV090x_OFFST_FSKR_DELTAF0_FIELD 0
543#define STV090x_WIDTH_FSKR_DELTAF0_FIELD 8
544
545#define STV090x_FSKRSTEPP 0xf182
546#define STV090x_OFFST_FSKR_STEP_PLUS_FIELD 0
547#define STV090x_WIDTH_FSKR_STEP_PLUS_FIELD 8
548
549#define STV090x_FSKRSTEPM 0xf183
550#define STV090x_OFFST_FSKR_STEP_MINUS_FIELD 0
551#define STV090x_WIDTH_FSKR_STEP_MINUS_FIELD 8
552
553#define STV090x_FSKRDET1 0xf184
554#define STV090x_OFFST_FSKR_CARDET1_ACCU_FIELD 0
555#define STV090x_WIDTH_FSKR_CARDET1_ACCU_FIELD 4
556
557#define STV090x_FSKRDET0 0xf185
558#define STV090x_OFFST_FSKR_CARDET0_ACCU_FIELD 0
559#define STV090x_WIDTH_FSKR_CARDET0_ACCU_FIELD 8
560
561#define STV090x_FSKRDTH1 0xf186
562#define STV090x_OFFST_FSKR_CARLOSS_THRESH1_FIELD 4
563#define STV090x_WIDTH_FSKR_CARLOSS_THRESH1_FIELD 4
564#define STV090x_OFFST_FSKR_CARDET_THRESH1_FIELD 0
565#define STV090x_WIDTH_FSKR_CARDET_THRESH1_FIELD 4
566
567#define STV090x_FSKRDTH0 0xf187
568#define STV090x_OFFST_FSKR_CARDET_THRESH0_FIELD 0
569#define STV090x_WIDTH_FSKR_CARDET_THRESH0_FIELD 8
570
571#define STV090x_FSKRLOSS 0xf188
572#define STV090x_OFFST_FSKR_CARLOSS_THRESH_FIELD 0
573#define STV090x_WIDTH_FSKR_CARLOSS_THRESH_FIELD 8
574
575#define STV090x_Px_DISTXCTL(__x) (0xF1A0 - (__x - 1) * 0x10)
576#define STV090x_P1_DISTXCTL STV090x_Px_DISTXCTL(1)
577#define STV090x_P2_DISTXCTL STV090x_Px_DISTXCTL(2)
578#define STV090x_OFFST_Px_TIM_OFF_FIELD 7
579#define STV090x_WIDTH_Px_TIM_OFF_FIELD 1
580#define STV090x_OFFST_Px_DISEQC_RESET_FIELD 6
581#define STV090x_WIDTH_Px_DISEQC_RESET_FIELD 1
582#define STV090x_OFFST_Px_TIM_CMD_FIELD 4
583#define STV090x_WIDTH_Px_TIM_CMD_FIELD 2
584#define STV090x_OFFST_Px_DIS_PRECHARGE_FIELD 3
585#define STV090x_WIDTH_Px_DIS_PRECHARGE_FIELD 1
586#define STV090x_OFFST_Px_DISTX_MODE_FIELD 0
587#define STV090x_WIDTH_Px_DISTX_MODE_FIELD 3
588
589#define STV090x_Px_DISRXCTL(__x) (0xf1a1 - (__x - 1) * 0x10)
590#define STV090x_P1_DISRXCTL STV090x_Px_DISRXCTL(1)
591#define STV090x_P2_DISRXCTL STV090x_Px_DISRXCTL(2)
592#define STV090x_OFFST_Px_RECEIVER_ON_FIELD 7
593#define STV090x_WIDTH_Px_RECEIVER_ON_FIELD 1
594#define STV090x_OFFST_Px_IGNO_SHORT22K_FIELD 6
595#define STV090x_WIDTH_Px_IGNO_SHORT22K_FIELD 1
596#define STV090x_OFFST_Px_ONECHIP_TRX_FIELD 5
597#define STV090x_WIDTH_Px_ONECHIP_TRX_FIELD 1
598#define STV090x_OFFST_Px_EXT_ENVELOP_FIELD 4
599#define STV090x_WIDTH_Px_EXT_ENVELOP_FIELD 1
600#define STV090x_OFFST_Px_PIN_SELECT_FIELD 2
601#define STV090x_WIDTH_Px_PIN_SELECT_FIELD 2
602#define STV090x_OFFST_Px_IRQ_RXEND_FIELD 1
603#define STV090x_WIDTH_Px_IRQ_RXEND_FIELD 1
604#define STV090x_OFFST_Px_IRQ_4NBYTES_FIELD 0
605#define STV090x_WIDTH_Px_IRQ_4NBYTES_FIELD 1
606
607#define STV090x_Px_DISRX_ST0(__x) (0xf1a4 - (__x - 1) * 0x10)
608#define STV090x_P1_DISRX_ST0 STV090x_Px_DISRX_ST0(1)
609#define STV090x_P2_DISRX_ST0 STV090x_Px_DISRX_ST0(2)
610#define STV090x_OFFST_Px_RX_END_FIELD 7
611#define STV090x_WIDTH_Px_RX_END_FIELD 1
612#define STV090x_OFFST_Px_RX_ACTIVE_FIELD 6
613#define STV090x_WIDTH_Px_RX_ACTIVE_FIELD 1
614#define STV090x_OFFST_Px_SHORT_22KHZ_FIELD 5
615#define STV090x_WIDTH_Px_SHORT_22KHZ_FIELD 1
616#define STV090x_OFFST_Px_CONT_TONE_FIELD 4
617#define STV090x_WIDTH_Px_CONT_TONE_FIELD 1
618#define STV090x_OFFST_Px_FIFO_4BREADY_FIELD 3
619#define STV090x_WIDTH_Px_FIFO_4BREADY_FIELD 2
620#define STV090x_OFFST_Px_FIFO_EMPTY_FIELD 2
621#define STV090x_WIDTH_Px_FIFO_EMPTY_FIELD 1
622#define STV090x_OFFST_Px_ABORT_DISRX_FIELD 0
623#define STV090x_WIDTH_Px_ABORT_DISRX_FIELD 1
624
625#define STV090x_Px_DISRX_ST1(__x) (0xf1a5 - (__x - 1) * 0x10)
626#define STV090x_P1_DISRX_ST1 STV090x_Px_DISRX_ST1(1)
627#define STV090x_P2_DISRX_ST1 STV090x_Px_DISRX_ST1(2)
628#define STV090x_OFFST_Px_RX_FAIL_FIELD 7
629#define STV090x_WIDTH_Px_RX_FAIL_FIELD 1
630#define STV090x_OFFST_Px_FIFO_PARITYFAIL_FIELD 6
631#define STV090x_WIDTH_Px_FIFO_PARITYFAIL_FIELD 1
632#define STV090x_OFFST_Px_RX_NONBYTE_FIELD 5
633#define STV090x_WIDTH_Px_RX_NONBYTE_FIELD 1
634#define STV090x_OFFST_Px_FIFO_OVERFLOW_FIELD 4
635#define STV090x_WIDTH_Px_FIFO_OVERFLOW_FIELD 1
636#define STV090x_OFFST_Px_FIFO_BYTENBR_FIELD 0
637#define STV090x_WIDTH_Px_FIFO_BYTENBR_FIELD 4
638
639#define STV090x_Px_DISRXDATA(__x) (0xf1a6 - (__x - 1) * 0x10)
640#define STV090x_P1_DISRXDATA STV090x_Px_DISRXDATA(1)
641#define STV090x_P2_DISRXDATA STV090x_Px_DISRXDATA(2)
642#define STV090x_OFFST_Px_DISRX_DATA_FIELD 0
643#define STV090x_WIDTH_Px_DISRX_DATA_FIELD 8
644
645#define STV090x_Px_DISTXDATA(__x) (0xf1a7 - (__x - 1) * 0x10)
646#define STV090x_P1_DISTXDATA STV090x_Px_DISTXDATA(1)
647#define STV090x_P2_DISTXDATA STV090x_Px_DISTXDATA(2)
648#define STV090x_OFFST_Px_DISEQC_FIFO_FIELD 0
649#define STV090x_WIDTH_Px_DISEQC_FIFO_FIELD 8
650
651#define STV090x_Px_DISTXSTATUS(__x) (0xf1a8 - (__x - 1) * 0x10)
652#define STV090x_P1_DISTXSTATUS STV090x_Px_DISTXSTATUS(1)
653#define STV090x_P2_DISTXSTATUS STV090x_Px_DISTXSTATUS(2)
654#define STV090x_OFFST_Px_TX_FAIL_FIELD 7
655#define STV090x_WIDTH_Px_TX_FAIL_FIELD 1
656#define STV090x_OFFST_Px_FIFO_FULL_FIELD 6
657#define STV090x_WIDTH_Px_FIFO_FULL_FIELD 1
658#define STV090x_OFFST_Px_TX_IDLE_FIELD 5
659#define STV090x_WIDTH_Px_TX_IDLE_FIELD 1
660#define STV090x_OFFST_Px_GAP_BURST_FIELD 4
661#define STV090x_WIDTH_Px_GAP_BURST_FIELD 1
662#define STV090x_OFFST_Px_TXFIFO_BYTES_FIELD 0
663#define STV090x_WIDTH_Px_TXFIFO_BYTES_FIELD 4
664
665#define STV090x_Px_F22TX(__x) (0xf1a9 - (__x - 1) * 0x10)
666#define STV090x_P1_F22TX STV090x_Px_F22TX(1)
667#define STV090x_P2_F22TX STV090x_Px_F22TX(2)
668#define STV090x_OFFST_Px_F22_REG_FIELD 0
669#define STV090x_WIDTH_Px_F22_REG_FIELD 8
670
671#define STV090x_Px_F22RX(__x) (0xf1aa - (__x - 1) * 0x10)
672#define STV090x_P1_F22RX STV090x_Px_F22RX(1)
673#define STV090x_P2_F22RX STV090x_Px_F22RX(2)
674#define STV090x_OFFST_Px_F22RX_REG_FIELD 0
675#define STV090x_WIDTH_Px_F22RX_REG_FIELD 8
676
677#define STV090x_Px_ACRPRESC(__x) (0xf1ac - (__x - 1) * 0x10)
678#define STV090x_P1_ACRPRESC STV090x_Px_ACRPRESC(1)
679#define STV090x_P2_ACRPRESC STV090x_Px_ACRPRESC(2)
680#define STV090x_OFFST_Px_ACR_PRESC_FIELD 0
681#define STV090x_WIDTH_Px_ACR_PRESC_FIELD 3
682
683#define STV090x_Px_ACRDIV(__x) (0xf1ad - (__x - 1) * 0x10)
684#define STV090x_P1_ACRDIV STV090x_Px_ACRDIV(1)
685#define STV090x_P2_ACRDIV STV090x_Px_ACRDIV(2)
686#define STV090x_OFFST_Px_ACR_DIV_FIELD 0
687#define STV090x_WIDTH_Px_ACR_DIV_FIELD 8
688
689#define STV090x_Px_IQCONST(__x) (0xF400 - (__x - 1) * 0x200)
690#define STV090x_P1_IQCONST STV090x_Px_IQCONST(1)
691#define STV090x_P2_IQCONST STV090x_Px_IQCONST(2)
692#define STV090x_OFFST_Px_CONSTEL_SELECT_FIELD 5
693#define STV090x_WIDTH_Px_CONSTEL_SELECT_FIELD 2
694
695#define STV090x_Px_NOSCFG(__x) (0xF401 - (__x - 1) * 0x200)
696#define STV090x_P1_NOSCFG STV090x_Px_NOSCFG(1)
697#define STV090x_P2_NOSCFG STV090x_Px_NOSCFG(2)
698#define STV090x_OFFST_Px_NOSPLH_BETA_FIELD 3
699#define STV090x_WIDTH_Px_NOSPLH_BETA_FIELD 2
700#define STV090x_OFFST_Px_NOSDATA_BETA_FIELD 0
701#define STV090x_WIDTH_Px_NOSDATA_BETA_FIELD 3
702
703#define STV090x_Px_ISYMB(__x) (0xF402 - (__x - 1) * 0x200)
704#define STV090x_P1_ISYMB STV090x_Px_ISYMB(1)
705#define STV090x_P2_ISYMB STV090x_Px_ISYMB(2)
706#define STV090x_OFFST_Px_I_SYMBOL_FIELD 0
707#define STV090x_WIDTH_Px_I_SYMBOL_FIELD 8
708
709#define STV090x_Px_QSYMB(__x) (0xF403 - (__x - 1) * 0x200)
710#define STV090x_P1_QSYMB STV090x_Px_QSYMB(1)
711#define STV090x_P2_QSYMB STV090x_Px_QSYMB(2)
712#define STV090x_OFFST_Px_Q_SYMBOL_FIELD 0
713#define STV090x_WIDTH_Px_Q_SYMBOL_FIELD 8
714
715#define STV090x_Px_AGC1CFG(__x) (0xF404 - (__x - 1) * 0x200)
716#define STV090x_P1_AGC1CFG STV090x_Px_AGC1CFG(1)
717#define STV090x_P2_AGC1CFG STV090x_Px_AGC1CFG(2)
718#define STV090x_OFFST_Px_DC_FROZEN_FIELD 7
719#define STV090x_WIDTH_Px_DC_FROZEN_FIELD 1
720#define STV090x_OFFST_Px_DC_CORRECT_FIELD 6
721#define STV090x_WIDTH_Px_DC_CORRECT_FIELD 1
722#define STV090x_OFFST_Px_AMM_FROZEN_FIELD 5
723#define STV090x_WIDTH_Px_AMM_FROZEN_FIELD 1
724#define STV090x_OFFST_Px_AMM_CORRECT_FIELD 4
725#define STV090x_WIDTH_Px_AMM_CORRECT_FIELD 1
726#define STV090x_OFFST_Px_QUAD_FROZEN_FIELD 3
727#define STV090x_WIDTH_Px_QUAD_FROZEN_FIELD 1
728#define STV090x_OFFST_Px_QUAD_CORRECT_FIELD 2
729#define STV090x_WIDTH_Px_QUAD_CORRECT_FIELD 1
730
731#define STV090x_Px_AGC1CN(__x) (0xF406 - (__x - 1) * 0x200)
732#define STV090x_P1_AGC1CN STV090x_Px_AGC1CN(1)
733#define STV090x_P2_AGC1CN STV090x_Px_AGC1CN(2)
734#define STV090x_WIDTH_Px_AGC1_LOCKED_FIELD 7
735#define STV090x_OFFST_Px_AGC1_LOCKED_FIELD 1
736#define STV090x_OFFST_Px_AGC1_MINPOWER_FIELD 4
737#define STV090x_WIDTH_Px_AGC1_MINPOWER_FIELD 1
738#define STV090x_OFFST_Px_AGCOUT_FAST_FIELD 3
739#define STV090x_WIDTH_Px_AGCOUT_FAST_FIELD 1
740#define STV090x_OFFST_Px_AGCIQ_BETA_FIELD 0
741#define STV090x_WIDTH_Px_AGCIQ_BETA_FIELD 3
742
743#define STV090x_Px_AGC1REF(__x) (0xF407 - (__x - 1) * 0x200)
744#define STV090x_P1_AGC1REF STV090x_Px_AGC1REF(1)
745#define STV090x_P2_AGC1REF STV090x_Px_AGC1REF(2)
746#define STV090x_OFFST_Px_AGCIQ_REF_FIELD 0
747#define STV090x_WIDTH_Px_AGCIQ_REF_FIELD 8
748
749#define STV090x_Px_IDCCOMP(__x) (0xF408 - (__x - 1) * 0x200)
750#define STV090x_P1_IDCCOMP STV090x_Px_IDCCOMP(1)
751#define STV090x_P2_IDCCOMP STV090x_Px_IDCCOMP(2)
752#define STV090x_OFFST_Px_IAVERAGE_ADJ_FIELD 0
753#define STV090x_WIDTH_Px_IAVERAGE_ADJ_FIELD 8
754
755#define STV090x_Px_QDCCOMP(__x) (0xF409 - (__x - 1) * 0x200)
756#define STV090x_P1_QDCCOMP STV090x_Px_QDCCOMP(1)
757#define STV090x_P2_QDCCOMP STV090x_Px_QDCCOMP(2)
758#define STV090x_OFFST_Px_QAVERAGE_ADJ_FIELD 0
759#define STV090x_WIDTH_Px_QAVERAGE_ADJ_FIELD 8
760
761#define STV090x_Px_POWERI(__x) (0xF40A - (__x - 1) * 0x200)
762#define STV090x_P1_POWERI STV090x_Px_POWERI(1)
763#define STV090x_P2_POWERI STV090x_Px_POWERI(2)
764#define STV090x_OFFST_Px_POWER_I_FIELD 0
765#define STV090x_WIDTH_Px_POWER_I_FIELD 8
766
767#define STV090x_Px_POWERQ(__x) (0xF40B - (__x - 1) * 0x200)
768#define STV090x_P1_POWERQ STV090x_Px_POWERQ(1)
769#define STV090x_P2_POWERQ STV090x_Px_POWERQ(2)
770#define STV090x_OFFST_Px_POWER_Q_FIELD 0
771#define STV090x_WIDTH_Px_POWER_Q_FIELD 8
772
773#define STV090x_Px_AGC1AMM(__x) (0xF40C - (__x - 1) * 0x200)
774#define STV090x_P1_AGC1AMM STV090x_Px_AGC1AMM(1)
775#define STV090x_P2_AGC1AMM STV090x_Px_AGC1AMM(2)
776#define STV090x_OFFST_Px_AMM_VALUE_FIELD 0
777#define STV090x_WIDTH_Px_AMM_VALUE_FIELD 8
778
779#define STV090x_Px_AGC1QUAD(__x) (0xF40D - (__x - 1) * 0x200)
780#define STV090x_P1_AGC1QUAD STV090x_Px_AGC1QUAD(1)
781#define STV090x_P2_AGC1QUAD STV090x_Px_AGC1QUAD(2)
782#define STV090x_OFFST_Px_QUAD_VALUE_FIELD 0
783#define STV090x_WIDTH_Px_QUAD_VALUE_FIELD 8
784
785#define STV090x_Px_AGCIQINy(__x, __y) (0xF40F - (__x-1) * 0x200 - __y * 0x1)
786#define STV090x_P1_AGCIQIN0 STV090x_Px_AGCIQINy(1, 0)
787#define STV090x_P1_AGCIQIN1 STV090x_Px_AGCIQINy(1, 1)
788#define STV090x_P2_AGCIQIN0 STV090x_Px_AGCIQINy(2, 0)
789#define STV090x_P2_AGCIQIN1 STV090x_Px_AGCIQINy(2, 1)
790#define STV090x_OFFST_Px_AGCIQ_VALUE_FIELD 0
791#define STV090x_WIDTH_Px_AGCIQ_VALUE_FIELD 8
792
793#define STV090x_Px_DEMOD(__x) (0xF410 - (__x - 1) * 0x200)
794#define STV090x_P1_DEMOD STV090x_Px_DEMOD(1)
795#define STV090x_P2_DEMOD STV090x_Px_DEMOD(2)
796#define STV090x_OFFST_Px_MANUAL_S2ROLLOFF_FIELD 7
797#define STV090x_WIDTH_Px_MANUAL_S2ROLLOFF_FIELD 1
798#define STV090x_OFFST_Px_DEMOD_STOP_FIELD 6
799#define STV090x_WIDTH_Px_DEMOD_STOP_FIELD 1
800#define STV090x_OFFST_Px_SPECINV_CONTROL_FIELD 4
801#define STV090x_WIDTH_Px_SPECINV_CONTROL_FIELD 2
802#define STV090x_OFFST_Px_FORCE_ENASAMP_FIELD 3
803#define STV090x_WIDTH_Px_FORCE_ENASAMP_FIELD 1
804#define STV090x_OFFST_Px_MANUAL_SXROLLOFF_FIELD 2
805#define STV090x_WIDTH_Px_MANUAL_SXROLLOFF_FIELD 1
806#define STV090x_OFFST_Px_ROLLOFF_CONTROL_FIELD 0
807#define STV090x_WIDTH_Px_ROLLOFF_CONTROL_FIELD 2
808
809#define STV090x_Px_DMDMODCOD(__x) (0xF411 - (__x - 1) * 0x200)
810#define STV090x_P1_DMDMODCOD STV090x_Px_DMDMODCOD(1)
811#define STV090x_P2_DMDMODCOD STV090x_Px_DMDMODCOD(2)
812#define STV090x_OFFST_Px_MANUAL_MODCOD_FIELD 7
813#define STV090x_WIDTH_Px_MANUAL_MODCOD_FIELD 1
814#define STV090x_OFFST_Px_DEMOD_MODCOD_FIELD 2
815#define STV090x_WIDTH_Px_DEMOD_MODCOD_FIELD 5
816#define STV090x_OFFST_Px_DEMOD_TYPE_FIELD 0
817#define STV090x_WIDTH_Px_DEMOD_TYPE_FIELD 2
818
819#define STV090x_Px_DSTATUS(__x) (0xF412 - (__x - 1) * 0x200)
820#define STV090x_P1_DSTATUS STV090x_Px_DSTATUS(1)
821#define STV090x_P2_DSTATUS STV090x_Px_DSTATUS(2)
822#define STV090x_OFFST_Px_CAR_LOCK_FIELD 7
823#define STV090x_WIDTH_Px_CAR_LOCK_FIELD 1
824#define STV090x_OFFST_Px_TMGLOCK_QUALITY_FIELD 5
825#define STV090x_WIDTH_Px_TMGLOCK_QUALITY_FIELD 2
826#define STV090x_OFFST_Px_LOCK_DEFINITIF_FIELD 3
827#define STV090x_WIDTH_Px_LOCK_DEFINITIF_FIELD 1
828
829#define STV090x_Px_DSTATUS2(__x) (0xF413 - (__x - 1) * 0x200)
830#define STV090x_P1_DSTATUS2 STV090x_Px_DSTATUS2(1)
831#define STV090x_P2_DSTATUS2 STV090x_Px_DSTATUS2(2)
832#define STV090x_OFFST_Px_DEMOD_DELOCK_FIELD 7
833#define STV090x_WIDTH_Px_DEMOD_DELOCK_FIELD 1
834#define STV090x_OFFST_Px_AGC1_NOSIGNALACK_FIELD 3
835#define STV090x_WIDTH_Px_AGC1_NOSIGNALACK_FIELD 1
836#define STV090x_OFFST_Px_AGC2_OVERFLOW_FIELD 2
837#define STV090x_WIDTH_Px_AGC2_OVERFLOW_FIELD 1
838#define STV090x_OFFST_Px_CFR_OVERFLOW_FIELD 1
839#define STV090x_WIDTH_Px_CFR_OVERFLOW_FIELD 1
840#define STV090x_OFFST_Px_GAMMA_OVERUNDER_FIELD 0
841#define STV090x_WIDTH_Px_GAMMA_OVERUNDER_FIELD 1
842
843#define STV090x_Px_DMDCFGMD(__x) (0xF414 - (__x - 1) * 0x200)
844#define STV090x_P1_DMDCFGMD STV090x_Px_DMDCFGMD(1)
845#define STV090x_P2_DMDCFGMD STV090x_Px_DMDCFGMD(2)
846#define STV090x_OFFST_Px_DVBS2_ENABLE_FIELD 7
847#define STV090x_WIDTH_Px_DVBS2_ENABLE_FIELD 1
848#define STV090x_OFFST_Px_DVBS1_ENABLE_FIELD 6
849#define STV090x_WIDTH_Px_DVBS1_ENABLE_FIELD 1
850#define STV090x_OFFST_Px_CFR_AUTOSCAN_FIELD 5 /* check */
851#define STV090x_WIDTH_Px_CFR_AUTOSCAN_FIELD 1
852#define STV090x_OFFST_Px_SCAN_ENABLE_FIELD 4 /* check */
853#define STV090x_WIDTH_Px_SCAN_ENABLE_FIELD 1
854#define STV090x_OFFST_Px_TUN_AUTOSCAN_FIELD 3
855#define STV090x_WIDTH_Px_TUN_AUTOSCAN_FIELD 1
856#define STV090x_OFFST_Px_NOFORCE_RELOCK_FIELD 2
857#define STV090x_WIDTH_Px_NOFORCE_RELOCK_FIELD 1
858#define STV090x_OFFST_Px_TUN_RNG_FIELD 0
859#define STV090x_WIDTH_Px_TUN_RNG_FIELD 2
860
861#define STV090x_Px_DMDCFG2(__x) (0xF415 - (__x - 1) * 0x200)
862#define STV090x_P1_DMDCFG2 STV090x_Px_DMDCFG2(1)
863#define STV090x_P2_DMDCFG2 STV090x_Px_DMDCFG2(2)
864#define STV090x_OFFST_Px_S1S2_SEQUENTIAL_FIELD 6
865#define STV090x_WIDTH_Px_S1S2_SEQUENTIAL_FIELD 1
866
867#define STV090x_Px_DMDISTATE(__x) (0xF416 - (__x - 1) * 0x200)
868#define STV090x_P1_DMDISTATE STV090x_Px_DMDISTATE(1)
869#define STV090x_P2_DMDISTATE STV090x_Px_DMDISTATE(2)
870#define STV090x_OFFST_Px_I2C_DEMOD_MODE_FIELD 0
871#define STV090x_WIDTH_Px_I2C_DEMOD_MODE_FIELD 5
872
873#define STV090x_Px_DMDTOM(__x) (0xF417 - (__x - 1) * 0x200) /* check */
874#define STV090x_P1_DMDTOM STV090x_Px_DMDTOM(1)
875#define STV090x_P2_DMDTOM STV090x_Px_DMDTOM(2)
876
877#define STV090x_Px_DMDSTATE(__x) (0xF41B - (__x - 1) * 0x200)
878#define STV090x_P1_DMDSTATE STV090x_Px_DMDSTATE(1)
879#define STV090x_P2_DMDSTATE STV090x_Px_DMDSTATE(2)
880#define STV090x_OFFST_Px_HEADER_MODE_FIELD 5
881#define STV090x_WIDTH_Px_HEADER_MODE_FIELD 2
882
883#define STV090x_Px_DMDFLYW(__x) (0xF41C - (__x - 1) * 0x200)
884#define STV090x_P1_DMDFLYW STV090x_Px_DMDFLYW(1)
885#define STV090x_P2_DMDFLYW STV090x_Px_DMDFLYW(2)
886#define STV090x_OFFST_Px_I2C_IRQVAL_FIELD 4
887#define STV090x_WIDTH_Px_I2C_IRQVAL_FIELD 4
888#define STV090x_OFFST_Px_FLYWHEEL_CPT_FIELD 0 /* check */
889#define STV090x_WIDTH_Px_FLYWHEEL_CPT_FIELD 4
890
891#define STV090x_Px_DSTATUS3(__x) (0xF41D - (__x - 1) * 0x200)
892#define STV090x_P1_DSTATUS3 STV090x_Px_DSTATUS3(1)
893#define STV090x_P2_DSTATUS3 STV090x_Px_DSTATUS3(2)
894#define STV090x_OFFST_Px_DEMOD_CFGMODE_FIELD 5
895#define STV090x_WIDTH_Px_DEMOD_CFGMODE_FIELD 2
896
897#define STV090x_Px_DMDCFG3(__x) (0xF41E - (__x - 1) * 0x200)
898#define STV090x_P1_DMDCFG3 STV090x_Px_DMDCFG3(1)
899#define STV090x_P2_DMDCFG3 STV090x_Px_DMDCFG3(2)
900#define STV090x_OFFST_Px_NOSTOP_FIFOFULL_FIELD 3
901#define STV090x_WIDTH_Px_NOSTOP_FIFOFULL_FIELD 1
902
903#define STV090x_Px_DMDCFG4(__x) (0xf41f - (__x - 1) * 0x200)
904#define STV090x_P1_DMDCFG4 STV090x_Px_DMDCFG4(1)
905#define STV090x_P2_DMDCFG4 STV090x_Px_DMDCFG4(2)
906
907#define STV090x_Px_CORRELMANT(__x) (0xF420 - (__x - 1) * 0x200)
908#define STV090x_P1_CORRELMANT STV090x_Px_CORRELMANT(1)
909#define STV090x_P2_CORRELMANT STV090x_Px_CORRELMANT(2)
910#define STV090x_OFFST_Px_CORREL_MANT_FIELD 0
911#define STV090x_WIDTH_Px_CORREL_MANT_FIELD 8
912
913#define STV090x_Px_CORRELABS(__x) (0xF421 - (__x - 1) * 0x200)
914#define STV090x_P1_CORRELABS STV090x_Px_CORRELABS(1)
915#define STV090x_P2_CORRELABS STV090x_Px_CORRELABS(2)
916#define STV090x_OFFST_Px_CORREL_ABS_FIELD 0
917#define STV090x_WIDTH_Px_CORREL_ABS_FIELD 8
918
919#define STV090x_Px_CORRELEXP(__x) (0xF422 - (__x - 1) * 0x200)
920#define STV090x_P1_CORRELEXP STV090x_Px_CORRELEXP(1)
921#define STV090x_P2_CORRELEXP STV090x_Px_CORRELEXP(2)
922#define STV090x_OFFST_Px_CORREL_ABSEXP_FIELD 4
923#define STV090x_WIDTH_Px_CORREL_ABSEXP_FIELD 4
924#define STV090x_OFFST_Px_CORREL_EXP_FIELD 0
925#define STV090x_WIDTH_Px_CORREL_EXP_FIELD 4
926
927#define STV090x_Px_PLHMODCOD(__x) (0xF424 - (__x - 1) * 0x200)
928#define STV090x_P1_PLHMODCOD STV090x_Px_PLHMODCOD(1)
929#define STV090x_P2_PLHMODCOD STV090x_Px_PLHMODCOD(2)
930#define STV090x_OFFST_Px_SPECINV_DEMOD_FIELD 7
931#define STV090x_WIDTH_Px_SPECINV_DEMOD_FIELD 1
932#define STV090x_OFFST_Px_PLH_MODCOD_FIELD 2
933#define STV090x_WIDTH_Px_PLH_MODCOD_FIELD 5
934#define STV090x_OFFST_Px_PLH_TYPE_FIELD 0
935#define STV090x_WIDTH_Px_PLH_TYPE_FIELD 2
936
937#define STV090x_Px_AGCK32(__x) (0xf42b - (__x - 1) * 0x200)
938#define STV090x_P1_AGCK32 STV090x_Px_AGCK32(1)
939#define STV090x_P2_AGCK32 STV090x_Px_AGCK32(2)
940
941#define STV090x_Px_AGC2O(__x) (0xF42C - (__x - 1) * 0x200)
942#define STV090x_P1_AGC2O STV090x_Px_AGC2O(1)
943#define STV090x_P2_AGC2O STV090x_Px_AGC2O(2)
944
945#define STV090x_Px_AGC2REF(__x) (0xF42D - (__x - 1) * 0x200)
946#define STV090x_P1_AGC2REF STV090x_Px_AGC2REF(1)
947#define STV090x_P2_AGC2REF STV090x_Px_AGC2REF(2)
948#define STV090x_OFFST_Px_AGC2_REF_FIELD 0
949#define STV090x_WIDTH_Px_AGC2_REF_FIELD 8
950
951#define STV090x_Px_AGC1ADJ(__x) (0xF42E - (__x - 1) * 0x200)
952#define STV090x_P1_AGC1ADJ STV090x_Px_AGC1ADJ(1)
953#define STV090x_P2_AGC1ADJ STV090x_Px_AGC1ADJ(2)
954#define STV090x_OFFST_Px_AGC1_ADJUSTED_FIELD 0
955#define STV090x_WIDTH_Px_AGC1_ADJUSTED_FIELD 7
956
957#define STV090x_Px_AGC2Iy(__x, __y) (0xF437 - (__x - 1) * 0x200 - __y * 0x1)
958#define STV090x_P1_AGC2I0 STV090x_Px_AGC2Iy(1, 0)
959#define STV090x_P1_AGC2I1 STV090x_Px_AGC2Iy(1, 1)
960#define STV090x_P2_AGC2I0 STV090x_Px_AGC2Iy(2, 0)
961#define STV090x_P2_AGC2I1 STV090x_Px_AGC2Iy(2, 1)
962#define STV090x_OFFST_Px_AGC2_INTEGRATOR_FIELD 0
963#define STV090x_WIDTH_Px_AGC2_INTEGRATOR_FIELD 8
964
965#define STV090x_Px_CARCFG(__x) (0xF438 - (__x - 1) * 0x200)
966#define STV090x_P1_CARCFG STV090x_Px_CARCFG(1)
967#define STV090x_P2_CARCFG STV090x_Px_CARCFG(2)
968#define STV090x_OFFST_Px_EN_CAR2CENTER_FIELD 5
969#define STV090x_WIDTH_Px_EN_CAR2CENTER_FIELD 1
970#define STV090x_OFFST_Px_ROTATON_FIELD 2
971#define STV090x_WIDTH_Px_ROTATON_FIELD 1
972#define STV090x_OFFST_Px_PH_DET_ALGO_FIELD 0
973#define STV090x_WIDTH_Px_PH_DET_ALGO_FIELD 2
974
975#define STV090x_Px_ACLC(__x) (0xF439 - (__x - 1) * 0x200)
976#define STV090x_P1_ACLC STV090x_Px_ACLC(1)
977#define STV090x_P2_ACLC STV090x_Px_ACLC(2)
978#define STV090x_OFFST_Px_CAR_ALPHA_MANT_FIELD 4
979#define STV090x_WIDTH_Px_CAR_ALPHA_MANT_FIELD 2
980#define STV090x_OFFST_Px_CAR_ALPHA_EXP_FIELD 0
981#define STV090x_WIDTH_Px_CAR_ALPHA_EXP_FIELD 4
982
983#define STV090x_Px_BCLC(__x) (0xF43A - (__x - 1) * 0x200)
984#define STV090x_P1_BCLC STV090x_Px_BCLC(1)
985#define STV090x_P2_BCLC STV090x_Px_BCLC(2)
986#define STV090x_OFFST_Px_CAR_BETA_MANT_FIELD 4
987#define STV090x_WIDTH_Px_CAR_BETA_MANT_FIELD 2
988#define STV090x_OFFST_Px_CAR_BETA_EXP_FIELD 0
989#define STV090x_WIDTH_Px_CAR_BETA_EXP_FIELD 4
990
991#define STV090x_Px_CARFREQ(__x) (0xF43D - (__x - 1) * 0x200)
992#define STV090x_P1_CARFREQ STV090x_Px_CARFREQ(1)
993#define STV090x_P2_CARFREQ STV090x_Px_CARFREQ(2)
994#define STV090x_OFFST_Px_KC_COARSE_EXP_FIELD 4
995#define STV090x_WIDTH_Px_KC_COARSE_EXP_FIELD 4
996#define STV090x_OFFST_Px_BETA_FREQ_FIELD 0
997#define STV090x_WIDTH_Px_BETA_FREQ_FIELD 4
998
999#define STV090x_Px_CARHDR(__x) (0xF43E - (__x - 1) * 0x200)
1000#define STV090x_P1_CARHDR STV090x_Px_CARHDR(1)
1001#define STV090x_P2_CARHDR STV090x_Px_CARHDR(2)
1002#define STV090x_OFFST_Px_FREQ_HDR_FIELD 0
1003#define STV090x_WIDTH_Px_FREQ_HDR_FIELD 8
1004
1005#define STV090x_Px_LDT(__x) (0xF43F - (__x - 1) * 0x200)
1006#define STV090x_P1_LDT STV090x_Px_LDT(1)
1007#define STV090x_P2_LDT STV090x_Px_LDT(2)
1008#define STV090x_OFFST_Px_CARLOCK_THRES_FIELD 0
1009#define STV090x_WIDTH_Px_CARLOCK_THRES_FIELD 8
1010
1011#define STV090x_Px_LDT2(__x) (0xF440 - (__x - 1) * 0x200)
1012#define STV090x_P1_LDT2 STV090x_Px_LDT2(1)
1013#define STV090x_P2_LDT2 STV090x_Px_LDT2(2)
1014#define STV090x_OFFST_Px_CARLOCK_THRES2_FIELD 0
1015#define STV090x_WIDTH_Px_CARLOCK_THRES2_FIELD 8
1016
1017#define STV090x_Px_CFRICFG(__x) (0xF441 - (__x - 1) * 0x200)
1018#define STV090x_P1_CFRICFG STV090x_Px_CFRICFG(1)
1019#define STV090x_P2_CFRICFG STV090x_Px_CFRICFG(2)
1020#define STV090x_OFFST_Px_NEG_CFRSTEP_FIELD 0
1021#define STV090x_WIDTH_Px_NEG_CFRSTEP_FIELD 1
1022
1023#define STV090x_Pn_CFRUPy(__x, __y) (0xF443 - (__x - 1) * 0x200 - __y * 0x1)
1024#define STV090x_P1_CFRUP0 STV090x_Pn_CFRUPy(1, 0)
1025#define STV090x_P1_CFRUP1 STV090x_Pn_CFRUPy(1, 1)
1026#define STV090x_P2_CFRUP0 STV090x_Pn_CFRUPy(2, 0)
1027#define STV090x_P2_CFRUP1 STV090x_Pn_CFRUPy(2, 1)
1028#define STV090x_OFFST_Px_CFR_UP_FIELD 0
1029#define STV090x_WIDTH_Px_CFR_UP_FIELD 8
1030
1031#define STV090x_Pn_CFRLOWy(__x, __y) (0xF447 - (__x - 1) * 0x200 - __y * 0x1)
1032#define STV090x_P1_CFRLOW0 STV090x_Pn_CFRLOWy(1, 0)
1033#define STV090x_P1_CFRLOW1 STV090x_Pn_CFRLOWy(1, 1)
1034#define STV090x_P2_CFRLOW0 STV090x_Pn_CFRLOWy(2, 0)
1035#define STV090x_P2_CFRLOW1 STV090x_Pn_CFRLOWy(2, 1)
1036#define STV090x_OFFST_Px_CFR_LOW_FIELD 0
1037#define STV090x_WIDTH_Px_CFR_LOW_FIELD 8
1038
1039#define STV090x_Pn_CFRINITy(__x, __y) (0xF449 - (__x - 1) * 0x200 - __y * 0x1)
1040#define STV090x_P1_CFRINIT0 STV090x_Pn_CFRINITy(1, 0)
1041#define STV090x_P1_CFRINIT1 STV090x_Pn_CFRINITy(1, 1)
1042#define STV090x_P2_CFRINIT0 STV090x_Pn_CFRINITy(2, 0)
1043#define STV090x_P2_CFRINIT1 STV090x_Pn_CFRINITy(2, 1)
1044#define STV090x_OFFST_Px_CFR_INIT_FIELD 0
1045#define STV090x_WIDTH_Px_CFR_INIT_FIELD 8
1046
1047#define STV090x_Px_CFRINC1(__x) (0xF44A - (__x - 1) * 0x200)
1048#define STV090x_P1_CFRINC1 STV090x_Px_CFRINC1(1)
1049#define STV090x_P2_CFRINC1 STV090x_Px_CFRINC1(2)
1050#define STV090x_OFFST_Px_CFR_INC1_FIELD 0
1051#define STV090x_WIDTH_Px_CFR_INC1_FIELD 7
1052
1053#define STV090x_Px_CFRINC0(__x) (0xF44B - (__x - 1) * 0x200)
1054#define STV090x_P1_CFRINC0 STV090x_Px_CFRINC0(1)
1055#define STV090x_P2_CFRINC0 STV090x_Px_CFRINC0(2)
1056#define STV090x_OFFST_Px_CFR_INC0_FIELD 4
1057#define STV090x_WIDTH_Px_CFR_INC0_FIELD 4
1058
1059#define STV090x_Pn_CFRy(__x, __y) (0xF44E - (__x - 1) * 0x200 - __y * 0x1)
1060#define STV090x_P1_CFR0 STV090x_Pn_CFRy(1, 0)
1061#define STV090x_P1_CFR1 STV090x_Pn_CFRy(1, 1)
1062#define STV090x_P1_CFR2 STV090x_Pn_CFRy(1, 2)
1063#define STV090x_P2_CFR0 STV090x_Pn_CFRy(2, 0)
1064#define STV090x_P2_CFR1 STV090x_Pn_CFRy(2, 1)
1065#define STV090x_P2_CFR2 STV090x_Pn_CFRy(2, 2)
1066#define STV090x_OFFST_Px_CAR_FREQ_FIELD 0
1067#define STV090x_WIDTH_Px_CAR_FREQ_FIELD 8
1068
1069#define STV090x_Px_LDI(__x) (0xF44F - (__x - 1) * 0x200)
1070#define STV090x_P1_LDI STV090x_Px_LDI(1)
1071#define STV090x_P2_LDI STV090x_Px_LDI(2)
1072#define STV090x_OFFST_Px_LOCK_DET_INTEGR_FIELD 0
1073#define STV090x_WIDTH_Px_LOCK_DET_INTEGR_FIELD 8
1074
1075#define STV090x_Px_TMGCFG(__x) (0xF450 - (__x - 1) * 0x200)
1076#define STV090x_P1_TMGCFG STV090x_Px_TMGCFG(1)
1077#define STV090x_P2_TMGCFG STV090x_Px_TMGCFG(2)
1078#define STV090x_OFFST_Px_TMGLOCK_BETA_FIELD 6
1079#define STV090x_WIDTH_Px_TMGLOCK_BETA_FIELD 2
1080#define STV090x_OFFST_Px_DO_TIMING_FIELD 4
1081#define STV090x_WIDTH_Px_DO_TIMING_FIELD 1
1082#define STV090x_OFFST_Px_TMG_MINFREQ_FIELD 0
1083#define STV090x_WIDTH_Px_TMG_MINFREQ_FIELD 2
1084
1085#define STV090x_Px_RTC(__x) (0xF451 - (__x - 1) * 0x200)
1086#define STV090x_P1_RTC STV090x_Px_RTC(1)
1087#define STV090x_P2_RTC STV090x_Px_RTC(2)
1088#define STV090x_OFFST_Px_TMGALPHA_EXP_FIELD 4
1089#define STV090x_WIDTH_Px_TMGALPHA_EXP_FIELD 4
1090#define STV090x_OFFST_Px_TMGBETA_EXP_FIELD 0
1091#define STV090x_WIDTH_Px_TMGBETA_EXP_FIELD 4
1092
1093#define STV090x_Px_RTCS2(__x) (0xF452 - (__x - 1) * 0x200)
1094#define STV090x_P1_RTCS2 STV090x_Px_RTCS2(1)
1095#define STV090x_P2_RTCS2 STV090x_Px_RTCS2(2)
1096#define STV090x_OFFST_Px_TMGALPHAS2_EXP_FIELD 4
1097#define STV090x_WIDTH_Px_TMGALPHAS2_EXP_FIELD 4
1098#define STV090x_OFFST_Px_TMGBETAS2_EXP_FIELD 0
1099#define STV090x_WIDTH_Px_TMGBETAS2_EXP_FIELD 4
1100
1101#define STV090x_Px_TMGTHRISE(__x) (0xF453 - (__x - 1) * 0x200)
1102#define STV090x_P1_TMGTHRISE STV090x_Px_TMGTHRISE(1)
1103#define STV090x_P2_TMGTHRISE STV090x_Px_TMGTHRISE(2)
1104#define STV090x_OFFST_Px_TMGLOCK_THRISE_FIELD 0
1105#define STV090x_WIDTH_Px_TMGLOCK_THRISE_FIELD 8
1106
1107#define STV090x_Px_TMGTHFALL(__x) (0xF454 - (__x - 1) * 0x200)
1108#define STV090x_P1_TMGTHFALL STV090x_Px_TMGTHFALL(1)
1109#define STV090x_P2_TMGTHFALL STV090x_Px_TMGTHFALL(2)
1110#define STV090x_OFFST_Px_TMGLOCK_THFALL_FIELD 0
1111#define STV090x_WIDTH_Px_TMGLOCK_THFALL_FIELD 8
1112
1113#define STV090x_Px_SFRUPRATIO(__x) (0xF455 - (__x - 1) * 0x200)
1114#define STV090x_P1_SFRUPRATIO STV090x_Px_SFRUPRATIO(1)
1115#define STV090x_P2_SFRUPRATIO STV090x_Px_SFRUPRATIO(2)
1116#define STV090x_OFFST_Px_SFR_UPRATIO_FIELD 0
1117#define STV090x_WIDTH_Px_SFR_UPRATIO_FIELD 8
1118
1119#define STV090x_Px_SFRLOWRATIO(__x) (0xF456 - (__x - 1) * 0x200)
1120#define STV090x_P1_SFRLOWRATIO STV090x_Px_SFRLOWRATIO(1)
1121#define STV090x_P2_SFRLOWRATIO STV090x_Px_SFRLOWRATIO(2)
1122#define STV090x_OFFST_Px_SFR_LOWRATIO_FIELD 0
1123#define STV090x_WIDTH_Px_SFR_LOWRATIO_FIELD 8
1124
1125#define STV090x_Px_KREFTMG(__x) (0xF458 - (__x - 1) * 0x200)
1126#define STV090x_P1_KREFTMG STV090x_Px_KREFTMG(1)
1127#define STV090x_P2_KREFTMG STV090x_Px_KREFTMG(2)
1128#define STV090x_OFFST_Px_KREF_TMG_FIELD 0
1129#define STV090x_WIDTH_Px_KREF_TMG_FIELD 8
1130
1131#define STV090x_Px_SFRSTEP(__x) (0xF459 - (__x - 1) * 0x200)
1132#define STV090x_P1_SFRSTEP STV090x_Px_SFRSTEP(1)
1133#define STV090x_P2_SFRSTEP STV090x_Px_SFRSTEP(2)
1134#define STV090x_OFFST_Px_SFR_SCANSTEP_FIELD 4
1135#define STV090x_WIDTH_Px_SFR_SCANSTEP_FIELD 4
1136#define STV090x_OFFST_Px_SFR_CENTERSTEP_FIELD 0
1137#define STV090x_WIDTH_Px_SFR_CENTERSTEP_FIELD 4
1138
1139#define STV090x_Px_TMGCFG2(__x) (0xF45A - (__x - 1) * 0x200)
1140#define STV090x_P1_TMGCFG2 STV090x_Px_TMGCFG2(1)
1141#define STV090x_P2_TMGCFG2 STV090x_Px_TMGCFG2(2)
1142#define STV090x_OFFST_Px_SFRRATIO_FINE_FIELD 0
1143#define STV090x_WIDTH_Px_SFRRATIO_FINE_FIELD 1
1144
1145#define STV090x_Px_SFRINIT1(__x) (0xF45E - (__x - 1) * 0x200)
1146#define STV090x_P1_SFRINIT1 STV090x_Px_SFRINIT1(1)
1147#define STV090x_P2_SFRINIT1 STV090x_Px_SFRINIT1(2)
1148#define STV090x_OFFST_Px_SFR_INIT_FIELD 0
1149#define STV090x_WIDTH_Px_SFR_INIT_FIELD 8
1150
1151#define STV090x_Px_SFRINIT0(__x) (0xF45F - (__x - 1) * 0x200)
1152#define STV090x_P1_SFRINIT0 STV090x_Px_SFRINIT0(1)
1153#define STV090x_P2_SFRINIT0 STV090x_Px_SFRINIT0(2)
1154#define STV090x_OFFST_Px_SFR_INIT_FIELD 0
1155#define STV090x_WIDTH_Px_SFR_INIT_FIELD 8
1156
1157#define STV090x_Px_SFRUP1(__x) (0xF460 - (__x - 1) * 0x200)
1158#define STV090x_P1_SFRUP1 STV090x_Px_SFRUP1(1)
1159#define STV090x_P2_SFRUP1 STV090x_Px_SFRUP1(2)
1160#define STV090x_OFFST_Px_SYMB_FREQ_UP1_FIELD 0
1161#define STV090x_WIDTH_Px_SYMB_FREQ_UP1_FIELD 7
1162
1163#define STV090x_Px_SFRUP0(__x) (0xF461 - (__x - 1) * 0x200)
1164#define STV090x_P1_SFRUP0 STV090x_Px_SFRUP0(1)
1165#define STV090x_P2_SFRUP0 STV090x_Px_SFRUP0(2)
1166#define STV090x_OFFST_Px_SYMB_FREQ_UP0_FIELD 0
1167#define STV090x_WIDTH_Px_SYMB_FREQ_UP0_FIELD 8
1168
1169#define STV090x_Px_SFRLOW1(__x) (0xF462 - (__x - 1) * 0x200)
1170#define STV090x_P1_SFRLOW1 STV090x_Px_SFRLOW1(1)
1171#define STV090x_P2_SFRLOW1 STV090x_Px_SFRLOW1(2)
1172#define STV090x_OFFST_Px_SYMB_FREQ_LOW1_FIELD 0
1173#define STV090x_WIDTH_Px_SYMB_FREQ_LOW1_FIELD 7
1174
1175#define STV090x_Px_SFRLOW0(__x) (0xF463 - (__x - 1) * 0x200)
1176#define STV090x_P1_SFRLOW0 STV090x_Px_SFRLOW0(1)
1177#define STV090x_P2_SFRLOW0 STV090x_Px_SFRLOW0(2)
1178#define STV090x_OFFST_Px_SYMB_FREQ_LOW0_FIELD 0
1179#define STV090x_WIDTH_Px_SYMB_FREQ_LOW0_FIELD 8
1180
1181#define STV090x_Px_SFRy(__x, __y) (0xF464 - (__x-1) * 0x200 + (3 - __y))
1182#define STV090x_P1_SFR0 STV090x_Px_SFRy(1, 0)
1183#define STV090x_P1_SFR1 STV090x_Px_SFRy(1, 1)
1184#define STV090x_P1_SFR2 STV090x_Px_SFRy(1, 2)
1185#define STV090x_P1_SFR3 STV090x_Px_SFRy(1, 3)
1186#define STV090x_P2_SFR0 STV090x_Px_SFRy(2, 0)
1187#define STV090x_P2_SFR1 STV090x_Px_SFRy(2, 1)
1188#define STV090x_P2_SFR2 STV090x_Px_SFRy(2, 2)
1189#define STV090x_P2_SFR3 STV090x_Px_SFRy(2, 3)
1190#define STV090x_OFFST_Px_SYMB_FREQ_FIELD 0
1191#define STV090x_WIDTH_Px_SYMB_FREQ_FIELD 32
1192
1193#define STV090x_Px_TMGREG2(__x) (0xF468 - (__x - 1) * 0x200)
1194#define STV090x_P1_TMGREG2 STV090x_Px_TMGREG2(1)
1195#define STV090x_P2_TMGREG2 STV090x_Px_TMGREG2(2)
1196#define STV090x_OFFST_Px_TMGREG_FIELD 0
1197#define STV090x_WIDTH_Px_TMGREG_FIELD 8
1198
1199#define STV090x_Px_TMGREG1(__x) (0xF469 - (__x - 1) * 0x200)
1200#define STV090x_P1_TMGREG1 STV090x_Px_TMGREG1(1)
1201#define STV090x_P2_TMGREG1 STV090x_Px_TMGREG1(2)
1202#define STV090x_OFFST_Px_TMGREG_FIELD 0
1203#define STV090x_WIDTH_Px_TMGREG_FIELD 8
1204
1205#define STV090x_Px_TMGREG0(__x) (0xF46A - (__x - 1) * 0x200)
1206#define STV090x_P1_TMGREG0 STV090x_Px_TMGREG0(1)
1207#define STV090x_P2_TMGREG0 STV090x_Px_TMGREG0(2)
1208#define STV090x_OFFST_Px_TMGREG_FIELD 0
1209#define STV090x_WIDTH_Px_TMGREG_FIELD 8
1210
1211#define STV090x_Px_TMGLOCKy(__x, __y) (0xF46C - (__x - 1) * 0x200 - __y * 0x1)
1212#define STV090x_P1_TMGLOCK0 STV090x_Px_TMGLOCKy(1, 0)
1213#define STV090x_P1_TMGLOCK1 STV090x_Px_TMGLOCKy(1, 1)
1214#define STV090x_P2_TMGLOCK0 STV090x_Px_TMGLOCKy(2, 0)
1215#define STV090x_P2_TMGLOCK1 STV090x_Px_TMGLOCKy(2, 1)
1216#define STV090x_OFFST_Px_TMGLOCK_LEVEL_FIELD 0
1217#define STV090x_WIDTH_Px_TMGLOCK_LEVEL_FIELD 8
1218
1219#define STV090x_Px_TMGOBS(__x) (0xF46D - (__x - 1) * 0x200)
1220#define STV090x_P1_TMGOBS STV090x_Px_TMGOBS(1)
1221#define STV090x_P2_TMGOBS STV090x_Px_TMGOBS(2)
1222#define STV090x_OFFST_Px_ROLLOFF_STATUS_FIELD 6
1223#define STV090x_WIDTH_Px_ROLLOFF_STATUS_FIELD 2
1224
1225#define STV090x_Px_EQUALCFG(__x) (0xF46F - (__x - 1) * 0x200)
1226#define STV090x_P1_EQUALCFG STV090x_Px_EQUALCFG(1)
1227#define STV090x_P2_EQUALCFG STV090x_Px_EQUALCFG(2)
1228#define STV090x_OFFST_Px_EQUAL_ON_FIELD 6
1229#define STV090x_WIDTH_Px_EQUAL_ON_FIELD 1
1230#define STV090x_OFFST_Px_MU_EQUALDFE_FIELD 0
1231#define STV090x_WIDTH_Px_MU_EQUALDFE_FIELD 3
1232
1233#define STV090x_Px_EQUAIy(__x, __y) (0xf470 - (__x - 1) * 0x200 + (__y - 1))
1234#define STV090x_P1_EQUAI1 STV090x_Px_EQUAIy(1, 1)
1235#define STV090x_P1_EQUAI2 STV090x_Px_EQUAIy(1, 2)
1236#define STV090x_P1_EQUAI3 STV090x_Px_EQUAIy(1, 3)
1237#define STV090x_P1_EQUAI4 STV090x_Px_EQUAIy(1, 4)
1238#define STV090x_P1_EQUAI5 STV090x_Px_EQUAIy(1, 5)
1239#define STV090x_P1_EQUAI6 STV090x_Px_EQUAIy(1, 6)
1240#define STV090x_P1_EQUAI7 STV090x_Px_EQUAIy(1, 7)
1241#define STV090x_P1_EQUAI8 STV090x_Px_EQUAIy(1, 8)
1242
1243#define STV090x_P2_EQUAI1 STV090x_Px_EQUAIy(2, 1)
1244#define STV090x_P2_EQUAI2 STV090x_Px_EQUAIy(2, 2)
1245#define STV090x_P2_EQUAI3 STV090x_Px_EQUAIy(2, 3)
1246#define STV090x_P2_EQUAI4 STV090x_Px_EQUAIy(2, 4)
1247#define STV090x_P2_EQUAI5 STV090x_Px_EQUAIy(2, 5)
1248#define STV090x_P2_EQUAI6 STV090x_Px_EQUAIy(2, 6)
1249#define STV090x_P2_EQUAI7 STV090x_Px_EQUAIy(2, 7)
1250#define STV090x_P2_EQUAI8 STV090x_Px_EQUAIy(2, 8)
1251#define STV090x_OFFST_Px_EQUA_ACCIy_FIELD 0
1252#define STV090x_WIDTH_Px_EQUA_ACCIy_FIELD 8
1253
1254#define STV090x_Px_EQUAQy(__x, __y) (0xf471 - (__x - 1) * 0x200 + (__y - 1))
1255#define STV090x_P1_EQUAQ1 STV090x_Px_EQUAQy(1, 1)
1256#define STV090x_P1_EQUAQ2 STV090x_Px_EQUAQy(1, 2)
1257#define STV090x_P1_EQUAQ3 STV090x_Px_EQUAQy(1, 3)
1258#define STV090x_P1_EQUAQ4 STV090x_Px_EQUAQy(1, 4)
1259#define STV090x_P1_EQUAQ5 STV090x_Px_EQUAQy(1, 5)
1260#define STV090x_P1_EQUAQ6 STV090x_Px_EQUAQy(1, 6)
1261#define STV090x_P1_EQUAQ7 STV090x_Px_EQUAQy(1, 7)
1262#define STV090x_P1_EQUAQ8 STV090x_Px_EQUAQy(1, 8)
1263
1264#define STV090x_P2_EQUAQ1 STV090x_Px_EQUAQy(2, 1)
1265#define STV090x_P2_EQUAQ2 STV090x_Px_EQUAQy(2, 2)
1266#define STV090x_P2_EQUAQ3 STV090x_Px_EQUAQy(2, 3)
1267#define STV090x_P2_EQUAQ4 STV090x_Px_EQUAQy(2, 4)
1268#define STV090x_P2_EQUAQ5 STV090x_Px_EQUAQy(2, 5)
1269#define STV090x_P2_EQUAQ6 STV090x_Px_EQUAQy(2, 6)
1270#define STV090x_P2_EQUAQ7 STV090x_Px_EQUAQy(2, 7)
1271#define STV090x_P2_EQUAQ8 STV090x_Px_EQUAQy(2, 8)
1272#define STV090x_OFFST_Px_EQUA_ACCQy_FIELD 0
1273#define STV090x_WIDTH_Px_EQUA_ACCQy_FIELD 8
1274
1275#define STV090x_Px_NNOSDATATy(__x, __y) (0xf481 - (__x - 1) * 0x200 - __y * 0x1)
1276#define STV090x_P1_NNOSDATAT0 STV090x_Px_NNOSDATATy(1, 0)
1277#define STV090x_P1_NNOSDATAT1 STV090x_Px_NNOSDATATy(1, 1)
1278#define STV090x_P2_NNOSDATAT0 STV090x_Px_NNOSDATATy(2, 0)
1279#define STV090x_P2_NNOSDATAT1 STV090x_Px_NNOSDATATy(2, 1)
1280#define STV090x_OFFST_Px_NOSDATAT_NORMED_FIELD 0
1281#define STV090x_WIDTH_Px_NOSDATAT_NORMED_FIELD 8
1282
1283#define STV090x_Px_NNOSDATAy(__x, __y) (0xf483 - (__x - 1) * 0x200 - __y * 0x1)
1284#define STV090x_P1_NNOSDATA0 STV090x_Px_NNOSDATAy(1, 0)
1285#define STV090x_P1_NNOSDATA1 STV090x_Px_NNOSDATAy(1, 1)
1286#define STV090x_P2_NNOSDATA0 STV090x_Px_NNOSDATAy(2, 0)
1287#define STV090x_P2_NNOSDATA1 STV090x_Px_NNOSDATAy(2, 1)
1288#define STV090x_OFFST_Px_NOSDATA_NORMED_FIELD 0
1289#define STV090x_WIDTH_Px_NOSDATA_NORMED_FIELD 8
1290
1291#define STV090x_Px_NNOSPLHTy(__x, __y) (0xf485 - (__x - 1) * 0x200 - __y * 0x1)
1292#define STV090x_P1_NNOSPLHT0 STV090x_Px_NNOSPLHTy(1, 0)
1293#define STV090x_P1_NNOSPLHT1 STV090x_Px_NNOSPLHTy(1, 1)
1294#define STV090x_P2_NNOSPLHT0 STV090x_Px_NNOSPLHTy(2, 0)
1295#define STV090x_P2_NNOSPLHT1 STV090x_Px_NNOSPLHTy(2, 1)
1296#define STV090x_OFFST_Px_NOSPLHT_NORMED_FIELD 0
1297#define STV090x_WIDTH_Px_NOSPLHT_NORMED_FIELD 8
1298
1299#define STV090x_Px_NNOSPLHy(__x, __y) (0xf487 - (__x - 1) * 0x200 - __y * 0x1)
1300#define STV090x_P1_NNOSPLH0 STV090x_Px_NNOSPLHy(1, 0)
1301#define STV090x_P1_NNOSPLH1 STV090x_Px_NNOSPLHy(1, 1)
1302#define STV090x_P2_NNOSPLH0 STV090x_Px_NNOSPLHy(2, 0)
1303#define STV090x_P2_NNOSPLH1 STV090x_Px_NNOSPLHy(2, 1)
1304#define STV090x_OFFST_Px_NOSPLH_NORMED_FIELD 0
1305#define STV090x_WIDTH_Px_NOSPLH_NORMED_FIELD 8
1306
1307#define STV090x_Px_NOSDATATy(__x, __y) (0xf489 - (__x - 1) * 0x200 - __y * 0x1)
1308#define STV090x_P1_NOSDATAT0 STV090x_Px_NOSDATATy(1, 0)
1309#define STV090x_P1_NOSDATAT1 STV090x_Px_NOSDATATy(1, 1)
1310#define STV090x_P2_NOSDATAT0 STV090x_Px_NOSDATATy(2, 0)
1311#define STV090x_P2_NOSDATAT1 STV090x_Px_NOSDATATy(2, 1)
1312#define STV090x_OFFST_Px_NOSDATAT_UNNORMED_FIELD 0
1313#define STV090x_WIDTH_Px_NOSDATAT_UNNORMED_FIELD 8
1314
1315#define STV090x_Px_NOSDATAy(__x, __y) (0xf48b - (__x - 1) * 0x200 - __y * 0x1)
1316#define STV090x_P1_NOSDATA0 STV090x_Px_NOSDATAy(1, 0)
1317#define STV090x_P1_NOSDATA1 STV090x_Px_NOSDATAy(1, 1)
1318#define STV090x_P2_NOSDATA0 STV090x_Px_NOSDATAy(2, 0)
1319#define STV090x_P2_NOSDATA1 STV090x_Px_NOSDATAy(2, 1)
1320#define STV090x_OFFST_Px_NOSDATA_UNNORMED_FIELD 0
1321#define STV090x_WIDTH_Px_NOSDATA_UNNORMED_FIELD 8
1322
1323#define STV090x_Px_NOSPLHTy(__x, __y) (0xf48d - (__x - 1) * 0x200 - __y * 0x1)
1324#define STV090x_P1_NOSPLHT0 STV090x_Px_NOSPLHTy(1, 0)
1325#define STV090x_P1_NOSPLHT1 STV090x_Px_NOSPLHTy(1, 1)
1326#define STV090x_P2_NOSPLHT0 STV090x_Px_NOSPLHTy(2, 0)
1327#define STV090x_P2_NOSPLHT1 STV090x_Px_NOSPLHTy(2, 1)
1328#define STV090x_OFFST_Px_NOSPLHT_UNNORMED_FIELD 0
1329#define STV090x_WIDTH_Px_NOSPLHT_UNNORMED_FIELD 8
1330
1331#define STV090x_Px_NOSPLHy(__x, __y) (0xf48f - (__x - 1) * 0x200 - __y * 0x1)
1332#define STv090x_P1_NOSPLH0 STV090x_Px_NOSPLHy(1, 0)
1333#define STv090x_P1_NOSPLH1 STV090x_Px_NOSPLHy(1, 1)
1334#define STv090x_P2_NOSPLH0 STV090x_Px_NOSPLHy(2, 0)
1335#define STv090x_P2_NOSPLH1 STV090x_Px_NOSPLHy(2, 1)
1336#define STV090x_OFFST_Px_NOSPLH_UNNORMED_FIELD 0
1337#define STV090x_WIDTH_Px_NOSPLH_UNNORMED_FIELD 8
1338
1339#define STV090x_Px_CAR2CFG(__x) (0xf490 - (__x - 1) * 0x200)
1340#define STV090x_P1_CAR2CFG STV090x_Px_CAR2CFG(1)
1341#define STV090x_P2_CAR2CFG STV090x_Px_CAR2CFG(2)
1342#define STV090x_OFFST_Px_PN4_SELECT_FIELD 6
1343#define STV090x_WIDTH_Px_PN4_SELECT_FIELD 1
1344#define STV090x_OFFST_Px_CFR2_STOPDVBS1_FIELD 5
1345#define STV090x_WIDTH_Px_CFR2_STOPDVBS1_FIELD 1
1346#define STV090x_OFFST_Px_ROTA2ON_FIELD 2
1347#define STV090x_WIDTH_Px_ROTA2ON_FIELD 1
1348#define STV090x_OFFST_Px_PH_DET_ALGO2_FIELD 0
1349#define STV090x_WIDTH_Px_PH_DET_ALGO2_FIELD 2
1350
1351#define STV090x_Px_ACLC2(__x) (0xf491 - (__x - 1) * 0x200)
1352#define STV090x_P1_ACLC2 STV090x_Px_ACLC2(1)
1353#define STV090x_P2_ACLC2 STV090x_Px_ACLC2(2)
1354#define STV090x_OFFST_Px_CAR2_ALPHA_MANT_FIELD 4
1355#define STV090x_WIDTH_Px_CAR2_ALPHA_MANT_FIELD 2
1356#define STV090x_OFFST_Px_CAR2_ALPHA_EXP_FIELD 0
1357#define STV090x_WIDTH_Px_CAR2_ALPHA_EXP_FIELD 4
1358
1359#define STV090x_Px_BCLC2(__x) (0xf492 - (__x - 1) * 0x200)
1360#define STV090x_P1_BCLC2 STV090x_Px_BCLC2(1)
1361#define STV090x_P2_BCLC2 STV090x_Px_BCLC2(2)
1362#define STV090x_OFFST_Px_CAR2_BETA_MANT_FIELD 4
1363#define STV090x_WIDTH_Px_CAR2_BETA_MANT_FIELD 2
1364#define STV090x_OFFST_Px_CAR2_BETA_EXP_FIELD 0
1365#define STV090x_WIDTH_Px_CAR2_BETA_EXP_FIELD 4
1366
1367#define STV090x_Px_ACLC2S2Q(__x) (0xf497 - (__x - 1) * 0x200)
1368#define STV090x_P1_ACLC2S2Q STV090x_Px_ACLC2S2Q(1)
1369#define STV090x_P2_ACLC2S2Q STV090x_Px_ACLC2S2Q(2)
1370#define STV090x_OFFST_Px_ENAB_SPSKSYMB_FIELD 7
1371#define STV090x_WIDTH_Px_ENAB_SPSKSYMB_FIELD 1
1372#define STV090x_OFFST_Px_CAR2S2_Q_ALPH_M_FIELD 4
1373#define STV090x_WIDTH_Px_CAR2S2_Q_ALPH_M_FIELD 2
1374#define STV090x_OFFST_Px_CAR2S2_Q_ALPH_E_FIELD 0
1375#define STV090x_WIDTH_Px_CAR2S2_Q_ALPH_E_FIELD 4
1376
1377#define STV090x_Px_ACLC2S28(__x) (0xf498 - (__x - 1) * 0x200)
1378#define STV090x_P1_ACLC2S28 STV090x_Px_ACLC2S28(1)
1379#define STV090x_P2_ACLC2S28 STV090x_Px_ACLC2S28(2)
1380#define STV090x_OFFST_Px_CAR2S2_8_ALPH_M_FIELD 4
1381#define STV090x_WIDTH_Px_CAR2S2_8_ALPH_M_FIELD 2
1382#define STV090x_OFFST_Px_CAR2S2_8_ALPH_E_FIELD 0
1383#define STV090x_WIDTH_Px_CAR2S2_8_ALPH_E_FIELD 4
1384
1385#define STV090x_Px_ACLC2S216A(__x) (0xf499 - (__x - 1) * 0x200)
1386#define STV090x_P1_ACLC2S216A STV090x_Px_ACLC2S216A(1)
1387#define STV090x_P2_ACLC2S216A STV090x_Px_ACLC2S216A(2)
1388#define STV090x_OFFST_Px_CAR2S2_16A_ALPH_M_FIELD 4
1389#define STV090x_WIDTH_Px_CAR2S2_16A_ALPH_M_FIELD 2
1390#define STV090x_OFFST_Px_CAR2S2_16A_ALPH_E_FIELD 0
1391#define STV090x_WIDTH_Px_CAR2S2_16A_ALPH_E_FIELD 4
1392
1393#define STV090x_Px_ACLC2S232A(__x) (0xf499 - (__x - 1) * 0x200)
1394#define STV090x_P1_ACLC2S232A STV090x_Px_ACLC2S232A(1)
1395#define STV090x_P2_ACLC2S232A STV090x_Px_ACLC2S232A(2)
1396#define STV090x_OFFST_Px_CAR2S2_32A_ALPH_M_FIELD 4
1397#define STV090x_WIDTH_Px_CAR2S2_32A_ALPH_M_FIELD 2
1398#define STV090x_OFFST_Px_CAR2S2_32A_ALPH_E_FIELD 0
1399#define STV090x_WIDTH_Px_CAR2S2_32A_ALPH_E_FIELD 4
1400
1401#define STV090x_Px_BCLC2S2Q(__x) (0xf49c - (__x - 1) * 0x200)
1402#define STV090x_P1_BCLC2S2Q STV090x_Px_BCLC2S2Q(1)
1403#define STV090x_P2_BCLC2S2Q STV090x_Px_BCLC2S2Q(2)
1404#define STV090x_OFFST_Px_CAR2S2_Q_BETA_M_FIELD 4
1405#define STV090x_WIDTH_Px_CAR2S2_Q_BETA_M_FIELD 2
1406#define STV090x_OFFST_Px_CAR2S2_Q_BETA_E_FIELD 0
1407#define STV090x_WIDTH_Px_CAR2S2_Q_BETA_E_FIELD 4
1408
1409#define STV090x_Px_BCLC2S28(__x) (0xf49d - (__x - 1) * 0x200)
1410#define STV090x_P1_BCLC2S28 STV090x_Px_BCLC2S28(1)
1411#define STV090x_P2_BCLC2S28 STV090x_Px_BCLC2S28(1)
1412#define STV090x_OFFST_Px_CAR2S2_8_BETA_M_FIELD 4
1413#define STV090x_WIDTH_Px_CAR2S2_8_BETA_M_FIELD 2
1414#define STV090x_OFFST_Px_CAR2S2_8_BETA_E_FIELD 0
1415#define STV090x_WIDTH_Px_CAR2S2_8_BETA_E_FIELD 4
1416
1417#define STV090x_Px_BCLC2S216A(__x) (0xf49d - (__x - 1) * 0x200)
1418#define STV090x_P1_BCLC2S216A STV090x_Px_BCLC2S216A(1)
1419#define STV090x_P2_BCLC2S216A STV090x_Px_BCLC2S216A(1)
1420#define STV090x_OFFST_Px_CAR2S2_16A_BETA_M_FIELD 4
1421#define STV090x_WIDTH_Px_CAR2S2_16A_BETA_M_FIELD 2
1422#define STV090x_OFFST_Px_CAR2S2_16A_BETA_E_FIELD 0
1423#define STV090x_WIDTH_Px_CAR2S2_16A_BETA_E_FIELD 4
1424
1425#define STV090x_Px_BCLC2S232A(__x) (0xf49d - (__x - 1) * 0x200)
1426#define STV090x_P1_BCLC2S232A STV090x_Px_BCLC2S232A(1)
1427#define STV090x_P2_BCLC2S232A STV090x_Px_BCLC2S232A(1)
1428#define STV090x_OFFST_Px_CAR2S2_32A_BETA_M_FIELD 4
1429#define STV090x_WIDTH_Px_CAR2S2_32A_BETA_M_FIELD 2
1430#define STV090x_OFFST_Px_CAR2S2_32A_BETA_E_FIELD 0
1431#define STV090x_WIDTH_Px_CAR2S2_32A_BETA_E_FIELD 4
1432
1433#define STV090x_Px_PLROOT2(__x) (0xf4ac - (__x - 1) * 0x200)
1434#define STV090x_P1_PLROOT2 STV090x_Px_PLROOT2(1)
1435#define STV090x_P2_PLROOT2 STV090x_Px_PLROOT2(2)
1436#define STV090x_OFFST_Px_PLSCRAMB_MODE_FIELD 2
1437#define STV090x_WIDTH_Px_PLSCRAMB_MODE_FIELD 2
1438#define STV090x_OFFST_Px_PLSCRAMB_ROOT_FIELD 0
1439#define STV090x_WIDTH_Px_PLSCRAMB_ROOT_FIELD 2
1440
1441#define STV090x_Px_PLROOT1(__x) (0xf4ad - (__x - 1) * 0x200)
1442#define STV090x_P1_PLROOT1 STV090x_Px_PLROOT1(1)
1443#define STV090x_P2_PLROOT1 STV090x_Px_PLROOT1(2)
1444#define STV090x_OFFST_Px_PLSCRAMB_ROOT1_FIELD 0
1445#define STV090x_WIDTH_Px_PLSCRAMB_ROOT1_FIELD 8
1446
1447#define STV090x_Px_PLROOT0(__x) (0xf4ae - (__x - 1) * 0x200)
1448#define STV090x_P1_PLROOT0 STV090x_Px_PLROOT0(1)
1449#define STV090x_P2_PLROOT0 STV090x_Px_PLROOT0(2)
1450#define STV090x_OFFST_Px_PLSCRAMB_ROOT0_FIELD 0
1451#define STV090x_WIDTH_Px_PLSCRAMB_ROOT0_FIELD 8
1452
1453#define STV090x_Px_MODCODLST0(__x) (0xf4b0 - (__x - 1) * 0x200) /* check */
1454#define STV090x_P1_MODCODLST0 STV090x_Px_MODCODLST0(1)
1455#define STV090x_P2_MODCODLST0 STV090x_Px_MODCODLST0(2)
1456
1457#define STV090x_Px_MODCODLST1(__x) (0xf4b1 - (__x - 1) * 0x200)
1458#define STV090x_P1_MODCODLST1 STV090x_Px_MODCODLST1(1)
1459#define STV090x_P2_MODCODLST1 STV090x_Px_MODCODLST1(2)
1460#define STV090x_OFFST_Px_DIS_MODCOD29_FIELD 4
1461#define STV090x_WIDTH_Px_DIS_MODCOD29T_FIELD 4
1462#define STV090x_OFFST_Px_DIS_32PSK_9_10_FIELD 0
1463#define STV090x_WIDTH_Px_DIS_32PSK_9_10_FIELD 4
1464
1465#define STV090x_Px_MODCODLST2(__x) (0xf4b2 - (__x - 1) * 0x200)
1466#define STV090x_P1_MODCODLST2 STV090x_Px_MODCODLST2(1)
1467#define STV090x_P2_MODCODLST2 STV090x_Px_MODCODLST2(2)
1468#define STV090x_OFFST_Px_DIS_32PSK_8_9_FIELD 4
1469#define STV090x_WIDTH_Px_DIS_32PSK_8_9_FIELD 4
1470#define STV090x_OFFST_Px_DIS_32PSK_5_6_FIELD 0
1471#define STV090x_WIDTH_Px_DIS_32PSK_5_6_FIELD 4
1472
1473#define STV090x_Px_MODCODLST3(__x) (0xf4b3 - (__x - 1) * 0x200)
1474#define STV090x_P1_MODCODLST3 STV090x_Px_MODCODLST3(1)
1475#define STV090x_P2_MODCODLST3 STV090x_Px_MODCODLST3(2)
1476#define STV090x_OFFST_Px_DIS_32PSK_4_5_FIELD 4
1477#define STV090x_WIDTH_Px_DIS_32PSK_4_5_FIELD 4
1478#define STV090x_OFFST_Px_DIS_32PSK_3_4_FIELD 0
1479#define STV090x_WIDTH_Px_DIS_32PSK_3_4_FIELD 4
1480
1481#define STV090x_Px_MODCODLST4(__x) (0xf4b4 - (__x - 1) * 0x200)
1482#define STV090x_P1_MODCODLST4 STV090x_Px_MODCODLST4(1)
1483#define STV090x_P2_MODCODLST4 STV090x_Px_MODCODLST4(2)
1484#define STV090x_OFFST_Px_DIS_16PSK_9_10_FIELD 4
1485#define STV090x_WIDTH_Px_DIS_16PSK_9_10_FIELD 4
1486#define STV090x_OFFST_Px_DIS_16PSK_8_9_FIELD 0
1487#define STV090x_WIDTH_Px_DIS_16PSK_8_9_FIELD 4
1488
1489#define STV090x_Px_MODCODLST5(__x) (0xf4b5 - (__x - 1) * 0x200)
1490#define STV090x_P1_MODCODLST5 STV090x_Px_MODCODLST5(1)
1491#define STV090x_P2_MODCODLST5 STV090x_Px_MODCODLST5(2)
1492#define STV090x_OFFST_Px_DIS_16PSK_5_6_FIELD 4
1493#define STV090x_WIDTH_Px_DIS_16PSK_5_6_FIELD 4
1494#define STV090x_OFFST_Px_DIS_16PSK_4_5_FIELD 0
1495#define STV090x_WIDTH_Px_DIS_16PSK_4_5_FIELD 4
1496
1497#define STV090x_Px_MODCODLST6(__x) (0xf4b6 - (__x - 1) * 0x200)
1498#define STV090x_P1_MODCODLST6 STV090x_Px_MODCODLST6(1)
1499#define STV090x_P2_MODCODLST6 STV090x_Px_MODCODLST6(2)
1500#define STV090x_OFFST_Px_DIS_16PSK_3_4_FIELD 4
1501#define STV090x_WIDTH_Px_DIS_16PSK_3_4_FIELD 4
1502#define STV090x_OFFST_Px_DIS_16PSK_2_3_FIELD 0
1503#define STV090x_WIDTH_Px_DIS_16PSK_2_3_FIELD 4
1504
1505#define STV090x_Px_MODCODLST7(__x) (0xf4b7 - (__x - 1) * 0x200)
1506#define STV090x_P1_MODCODLST7 STV090x_Px_MODCODLST7(1)
1507#define STV090x_P2_MODCODLST7 STV090x_Px_MODCODLST7(2)
1508#define STV090x_OFFST_Px_DIS_8P_9_10_FIELD 4
1509#define STV090x_WIDTH_Px_DIS_8P_9_10_FIELD 4
1510#define STV090x_OFFST_Px_DIS_8P_8_9_FIELD 0
1511#define STV090x_WIDTH_Px_DIS_8P_8_9_FIELD 4
1512
1513#define STV090x_Px_MODCODLST8(__x) (0xf4b8 - (__x - 1) * 0x200)
1514#define STV090x_P1_MODCODLST8 STV090x_Px_MODCODLST8(1)
1515#define STV090x_P2_MODCODLST8 STV090x_Px_MODCODLST8(2)
1516#define STV090x_OFFST_Px_DIS_8P_5_6_FIELD 4
1517#define STV090x_WIDTH_Px_DIS_8P_5_6_FIELD 4
1518#define STV090x_OFFST_Px_DIS_8P_3_4_FIELD 0
1519#define STV090x_WIDTH_Px_DIS_8P_3_4_FIELD 4
1520
1521#define STV090x_Px_MODCODLST9(__x) (0xf4b9 - (__x - 1) * 0x200)
1522#define STV090x_P1_MODCODLST9 STV090x_Px_MODCODLST9(1)
1523#define STV090x_P2_MODCODLST9 STV090x_Px_MODCODLST9(2)
1524#define STV090x_OFFST_Px_DIS_8P_2_3_FIELD 4
1525#define STV090x_WIDTH_Px_DIS_8P_2_3_FIELD 4
1526#define STV090x_OFFST_Px_DIS_8P_3_5_FIELD 0
1527#define STV090x_WIDTH_Px_DIS_8P_3_5_FIELD 4
1528
1529#define STV090x_Px_MODCODLSTA(__x) (0xf4ba - (__x - 1) * 0x200)
1530#define STV090x_P1_MODCODLSTA STV090x_Px_MODCODLSTA(1)
1531#define STV090x_P2_MODCODLSTA STV090x_Px_MODCODLSTA(2)
1532#define STV090x_OFFST_Px_DIS_QP_9_10_FIELD 4
1533#define STV090x_WIDTH_Px_DIS_QP_9_10_FIELD 4
1534#define STV090x_OFFST_Px_DIS_QP_8_9_FIELD 0
1535#define STV090x_WIDTH_Px_DIS_QP_8_9_FIELD 4
1536
1537#define STV090x_Px_MODCODLSTB(__x) (0xf4bb - (__x - 1) * 0x200)
1538#define STV090x_P1_MODCODLSTB STV090x_Px_MODCODLSTB(1)
1539#define STV090x_P2_MODCODLSTB STV090x_Px_MODCODLSTB(2)
1540#define STV090x_OFFST_Px_DIS_QP_5_6_FIELD 4
1541#define STV090x_WIDTH_Px_DIS_QP_5_6_FIELD 4
1542#define STV090x_OFFST_Px_DIS_QP_4_5_FIELD 0
1543#define STV090x_WIDTH_Px_DIS_QP_4_5_FIELD 4
1544
1545#define STV090x_Px_MODCODLSTC(__x) (0xf4bc - (__x - 1) * 0x200)
1546#define STV090x_P1_MODCODLSTC STV090x_Px_MODCODLSTC(1)
1547#define STV090x_P2_MODCODLSTC STV090x_Px_MODCODLSTC(2)
1548#define STV090x_OFFST_Px_DIS_QP_3_4_FIELD 4
1549#define STV090x_WIDTH_Px_DIS_QP_3_4_FIELD 4
1550#define STV090x_OFFST_Px_DIS_QP_2_3_FIELD 0
1551#define STV090x_WIDTH_Px_DIS_QP_2_3_FIELD 4
1552
1553#define STV090x_Px_MODCODLSTD(__x) (0xf4bd - (__x - 1) * 0x200)
1554#define STV090x_P1_MODCODLSTD STV090x_Px_MODCODLSTD(1)
1555#define STV090x_P2_MODCODLSTD STV090x_Px_MODCODLSTD(2)
1556#define STV090x_OFFST_Px_DIS_QP_3_5_FIELD 4
1557#define STV090x_WIDTH_Px_DIS_QP_3_5_FIELD 4
1558#define STV090x_OFFST_Px_DIS_QP_1_2_FIELD 0
1559#define STV090x_WIDTH_Px_DIS_QP_1_2_FIELD 4
1560
1561#define STV090x_Px_MODCODLSTE(__x) (0xf4be - (__x - 1) * 0x200)
1562#define STV090x_P1_MODCODLSTE STV090x_Px_MODCODLSTE(1)
1563#define STV090x_P2_MODCODLSTE STV090x_Px_MODCODLSTE(2)
1564#define STV090x_OFFST_Px_DIS_QP_2_5_FIELD 4
1565#define STV090x_WIDTH_Px_DIS_QP_2_5_FIELD 4
1566#define STV090x_OFFST_Px_DIS_QP_1_3_FIELD 0
1567#define STV090x_WIDTH_Px_DIS_QP_1_3_FIELD 4
1568
1569#define STV090x_Px_MODCODLSTF(__x) (0xf4bf - (__x - 1) * 0x200)
1570#define STV090x_P1_MODCODLSTF STV090x_Px_MODCODLSTF(1)
1571#define STV090x_P2_MODCODLSTF STV090x_Px_MODCODLSTF(2)
1572#define STV090x_OFFST_Px_DIS_QP_1_4_FIELD 4
1573#define STV090x_WIDTH_Px_DIS_QP_1_4_FIELD 4
1574
1575#define STV090x_Px_GAUSSR0(__x) (0xf4c0 - (__x - 1) * 0x200)
1576#define STV090x_P1_GAUSSR0 STV090x_Px_GAUSSR0(1)
1577#define STV090x_P2_GAUSSR0 STV090x_Px_GAUSSR0(2)
1578#define STV090x_OFFST_Px_EN_CCIMODE_FIELD 7
1579#define STV090x_WIDTH_Px_EN_CCIMODE_FIELD 1
1580#define STV090x_OFFST_Px_R0_GAUSSIEN_FIELD 0
1581#define STV090x_WIDTH_Px_R0_GAUSSIEN_FIELD 7
1582
1583#define STV090x_Px_CCIR0(__x) (0xf4c1 - (__x - 1) * 0x200)
1584#define STV090x_P1_CCIR0 STV090x_Px_CCIR0(1)
1585#define STV090x_P2_CCIR0 STV090x_Px_CCIR0(2)
1586#define STV090x_OFFST_Px_CCIDETECT_PLH_FIELD 7
1587#define STV090x_WIDTH_Px_CCIDETECT_PLH_FIELD 1
1588#define STV090x_OFFST_Px_R0_CCI_FIELD 0
1589#define STV090x_WIDTH_Px_R0_CCI_FIELD 7
1590
1591#define STV090x_Px_CCIQUANT(__x) (0xf4c2 - (__x - 1) * 0x200)
1592#define STV090x_P1_CCIQUANT STV090x_Px_CCIQUANT(1)
1593#define STV090x_P2_CCIQUANT STV090x_Px_CCIQUANT(2)
1594#define STV090x_OFFST_Px_CCI_BETA_FIELD 5
1595#define STV090x_WIDTH_Px_CCI_BETA_FIELD 3
1596#define STV090x_OFFST_Px_CCI_QUANT_FIELD 0
1597#define STV090x_WIDTH_Px_CCI_QUANT_FIELD 5
1598
1599#define STV090x_Px_CCITHRESH(__x) (0xf4c3 - (__x - 1) * 0x200)
1600#define STV090x_P1_CCITHRESH STV090x_Px_CCITHRESH(1)
1601#define STV090x_P2_CCITHRESH STV090x_Px_CCITHRESH(2)
1602#define STV090x_OFFST_Px_CCI_THRESHOLD_FIELD 0
1603#define STV090x_WIDTH_Px_CCI_THRESHOLD_FIELD 8
1604
1605#define STV090x_Px_CCIACC(__x) (0xf4c4 - (__x - 1) * 0x200)
1606#define STV090x_P1_CCIACC STV090x_Px_CCIACC(1)
1607#define STV090x_P2_CCIACC STV090x_Px_CCIACC(1)
1608#define STV090x_OFFST_Px_CCI_VALUE_FIELD 0
1609#define STV090x_WIDTH_Px_CCI_VALUE_FIELD 8
1610
1611#define STV090x_Px_DMDRESCFG(__x) (0xF4C6 - (__x - 1) * 0x200)
1612#define STV090x_P1_DMDRESCFG STV090x_Px_DMDRESCFG(1)
1613#define STV090x_P2_DMDRESCFG STV090x_Px_DMDRESCFG(2)
1614#define STV090x_OFFST_Px_DMDRES_RESET_FIELD 7
1615#define STV090x_WIDTH_Px_DMDRES_RESET_FIELD 1
1616
1617#define STV090x_Px_DMDRESADR(__x) (0xF4C7 - (__x - 1) * 0x200)
1618#define STV090x_P1_DMDRESADR STV090x_Px_DMDRESADR(1)
1619#define STV090x_P2_DMDRESADR STV090x_Px_DMDRESADR(2)
1620#define STV090x_OFFST_Px_DMDRES_RESNBR_FIELD 0
1621#define STV090x_WIDTH_Px_DMDRES_RESNBR_FIELD 4
1622
1623#define STV090x_Px_DMDRESDATAy(__x, __y) (0xF4C8 - (__x - 1) * 0x200 + (7 - __y))
1624#define STV090x_P1_DMDRESDATA0 STV090x_Px_DMDRESDATAy(1, 0)
1625#define STV090x_P1_DMDRESDATA1 STV090x_Px_DMDRESDATAy(1, 1)
1626#define STV090x_P1_DMDRESDATA2 STV090x_Px_DMDRESDATAy(1, 2)
1627#define STV090x_P1_DMDRESDATA3 STV090x_Px_DMDRESDATAy(1, 3)
1628#define STV090x_P1_DMDRESDATA4 STV090x_Px_DMDRESDATAy(1, 4)
1629#define STV090x_P1_DMDRESDATA5 STV090x_Px_DMDRESDATAy(1, 5)
1630#define STV090x_P1_DMDRESDATA6 STV090x_Px_DMDRESDATAy(1, 6)
1631#define STV090x_P1_DMDRESDATA7 STV090x_Px_DMDRESDATAy(1, 7)
1632#define STV090x_P2_DMDRESDATA0 STV090x_Px_DMDRESDATAy(2, 0)
1633#define STV090x_P2_DMDRESDATA1 STV090x_Px_DMDRESDATAy(2, 1)
1634#define STV090x_P2_DMDRESDATA2 STV090x_Px_DMDRESDATAy(2, 2)
1635#define STV090x_P2_DMDRESDATA3 STV090x_Px_DMDRESDATAy(2, 3)
1636#define STV090x_P2_DMDRESDATA4 STV090x_Px_DMDRESDATAy(2, 4)
1637#define STV090x_P2_DMDRESDATA5 STV090x_Px_DMDRESDATAy(2, 5)
1638#define STV090x_P2_DMDRESDATA6 STV090x_Px_DMDRESDATAy(2, 6)
1639#define STV090x_P2_DMDRESDATA7 STV090x_Px_DMDRESDATAy(2, 7)
1640#define STV090x_OFFST_Px_DMDRES_DATA_FIELD 0
1641#define STV090x_WIDTH_Px_DMDRES_DATA_FIELD 8
1642
1643#define STV090x_Px_FFEIy(__x, __y) (0xf4d0 - (__x - 1) * 0x200 + 0x2 * (__y - 1))
1644#define STV090x_P1_FFEI1 STV090x_Px_FFEIy(1, 1)
1645#define STV090x_P1_FFEI2 STV090x_Px_FFEIy(1, 2)
1646#define STV090x_P1_FFEI3 STV090x_Px_FFEIy(1, 3)
1647#define STV090x_P1_FFEI4 STV090x_Px_FFEIy(1, 4)
1648#define STV090x_P2_FFEI1 STV090x_Px_FFEIy(2, 1)
1649#define STV090x_P2_FFEI2 STV090x_Px_FFEIy(2, 2)
1650#define STV090x_P2_FFEI3 STV090x_Px_FFEIy(2, 3)
1651#define STV090x_P2_FFEI4 STV090x_Px_FFEIy(2, 4)
1652#define STV090x_OFFST_Px_FFE_ACCIy_FIELD 0
1653#define STV090x_WIDTH_Px_FFE_ACCIy_FIELD 8
1654
1655#define STV090x_Px_FFEQy(__x, __y) (0xf4d1 - (__x - 1) * 0x200 + 0x2 * (__y - 1))
1656#define STV090x_P1_FFEQ1 STV090x_Px_FFEQy(1, 1)
1657#define STV090x_P1_FFEQ2 STV090x_Px_FFEQy(1, 2)
1658#define STV090x_P1_FFEQ3 STV090x_Px_FFEQy(1, 3)
1659#define STV090x_P1_FFEQ4 STV090x_Px_FFEQy(1, 4)
1660#define STV090x_P2_FFEQ1 STV090x_Px_FFEQy(2, 1)
1661#define STV090x_P2_FFEQ2 STV090x_Px_FFEQy(2, 2)
1662#define STV090x_P2_FFEQ3 STV090x_Px_FFEQy(2, 3)
1663#define STV090x_P2_FFEQ4 STV090x_Px_FFEQy(2, 4)
1664#define STV090x_OFFST_Px_FFE_ACCQy_FIELD 0
1665#define STV090x_WIDTH_Px_FFE_ACCQy_FIELD 8
1666
1667#define STV090x_Px_FFECFG(__x) (0xf4d8 - (__x - 1) * 0x200)
1668#define STV090x_P1_FFECFG STV090x_Px_FFECFG(1)
1669#define STV090x_P2_FFECFG STV090x_Px_FFECFG(2)
1670#define STV090x_OFFST_Px_EQUALFFE_ON_FIELD 6
1671#define STV090x_WIDTH_Px_EQUALFFE_ON_FIELD 1
1672
1673#define STV090x_Px_SMAPCOEF7(__x) (0xf500 - (__x - 1) * 0x200)
1674#define STV090x_P1_SMAPCOEF7 STV090x_Px_SMAPCOEF7(1)
1675#define STV090x_P2_SMAPCOEF7 STV090x_Px_SMAPCOEF7(2)
1676#define STV090x_OFFST_Px_DIS_QSCALE_FIELD 7
1677#define STV090x_WIDTH_Px_DIS_QSCALE_FIELD 1
1678#define STV090x_OFFST_Px_SMAPCOEF_Q_LLR12_FIELD 0
1679#define STV090x_WIDTH_Px_SMAPCOEF_Q_LLR12_FIELD 7
1680
1681#define STV090x_Px_SMAPCOEF6(__x) (0xf501 - (__x - 1) * 0x200)
1682#define STV090x_P1_SMAPCOEF6 STV090x_Px_SMAPCOEF6(1)
1683#define STV090x_P2_SMAPCOEF6 STV090x_Px_SMAPCOEF6(2)
1684#define STV090x_OFFST_Px_ADJ_8PSKLLR1_FIELD 2
1685#define STV090x_WIDTH_Px_ADJ_8PSKLLR1_FIELD 1
1686#define STV090x_OFFST_Px_OLD_8PSKLLR1_FIELD 1
1687#define STV090x_WIDTH_Px_OLD_8PSKLLR1_FIELD 1
1688#define STV090x_OFFST_Px_DIS_AB8PSK_FIELD 0
1689#define STV090x_WIDTH_Px_DIS_AB8PSK_FIELD 1
1690
1691#define STV090x_Px_SMAPCOEF5(__x) (0xf502 - (__x - 1) * 0x200)
1692#define STV090x_P1_SMAPCOEF5 STV090x_Px_SMAPCOEF5(1)
1693#define STV090x_P2_SMAPCOEF5 STV090x_Px_SMAPCOEF5(2)
1694#define STV090x_OFFST_Px_DIS_8SCALE_FIELD 7
1695#define STV090x_WIDTH_Px_DIS_8SCALE_FIELD 1
1696#define STV090x_OFFST_Px_SMAPCOEF_8P_LLR23_FIELD 0
1697#define STV090x_WIDTH_Px_SMAPCOEF_8P_LLR23_FIELD 7
1698
1699#define STV090x_Px_DMDPLHSTAT(__x) (0xF520 - (__x - 1) * 0x200)
1700#define STV090x_P1_DMDPLHSTAT STV090x_Px_DMDPLHSTAT(1)
1701#define STV090x_P2_DMDPLHSTAT STV090x_Px_DMDPLHSTAT(2)
1702#define STV090x_OFFST_Px_PLH_STATISTIC_FIELD 0
1703#define STV090x_WIDTH_Px_PLH_STATISTIC_FIELD 8
1704
1705#define STV090x_Px_LOCKTIMEy(__x, __y) (0xF525 - (__x - 1) * 0x200 - __y * 0x1)
1706#define STV090x_P1_LOCKTIME0 STV090x_Px_LOCKTIMEy(1, 0)
1707#define STV090x_P1_LOCKTIME1 STV090x_Px_LOCKTIMEy(1, 1)
1708#define STV090x_P1_LOCKTIME2 STV090x_Px_LOCKTIMEy(1, 2)
1709#define STV090x_P1_LOCKTIME3 STV090x_Px_LOCKTIMEy(1, 3)
1710#define STV090x_P2_LOCKTIME0 STV090x_Px_LOCKTIMEy(2, 0)
1711#define STV090x_P2_LOCKTIME1 STV090x_Px_LOCKTIMEy(2, 1)
1712#define STV090x_P2_LOCKTIME2 STV090x_Px_LOCKTIMEy(2, 2)
1713#define STV090x_P2_LOCKTIME3 STV090x_Px_LOCKTIMEy(2, 3)
1714#define STV090x_OFFST_Px_DEMOD_LOCKTIME_FIELD 0
1715#define STV090x_WIDTH_Px_DEMOD_LOCKTIME_FIELD 8
1716
1717#define STV090x_Px_TNRCFG(__x) (0xf4e0 - (__x - 1) * 0x200) /* check */
1718#define STV090x_P1_TNRCFG STV090x_Px_TNRCFG(1)
1719#define STV090x_P2_TNRCFG STV090x_Px_TNRCFG(2)
1720
1721#define STV090x_Px_TNRCFG2(__x) (0xf4e1 - (__x - 1) * 0x200)
1722#define STV090x_P1_TNRCFG2 STV090x_Px_TNRCFG2(1)
1723#define STV090x_P2_TNRCFG2 STV090x_Px_TNRCFG2(2)
1724#define STV090x_OFFST_Px_TUN_IQSWAP_FIELD 7
1725#define STV090x_WIDTH_Px_TUN_IQSWAP_FIELD 1
1726
1727#define STV090x_Px_VITSCALE(__x) (0xf532 - (__x - 1) * 0x200)
1728#define STV090x_P1_VITSCALE STV090x_Px_VITSCALE(1)
1729#define STV090x_P2_VITSCALE STV090x_Px_VITSCALE(2)
1730#define STV090x_OFFST_Px_NVTH_NOSRANGE_FIELD 7
1731#define STV090x_WIDTH_Px_NVTH_NOSRANGE_FIELD 1
1732#define STV090x_OFFST_Px_VERROR_MAXMODE_FIELD 6
1733#define STV090x_WIDTH_Px_VERROR_MAXMODE_FIELD 1
1734#define STV090x_OFFST_Px_NSLOWSN_LOCKED_FIELD 3
1735#define STV090x_WIDTH_Px_NSLOWSN_LOCKED_FIELD 1
1736#define STV090x_OFFST_Px_DIS_RSFLOCK_FIELD 1
1737#define STV090x_WIDTH_Px_DIS_RSFLOCK_FIELD 1
1738
1739#define STV090x_Px_FECM(__x) (0xf533 - (__x - 1) * 0x200)
1740#define STV090x_P1_FECM STV090x_Px_FECM(1)
1741#define STV090x_P2_FECM STV090x_Px_FECM(2)
1742#define STV090x_OFFST_Px_DSS_DVB_FIELD 7
1743#define STV090x_WIDTH_Px_DSS_DVB_FIELD 1
1744#define STV090x_OFFST_Px_DSS_SRCH_FIELD 4
1745#define STV090x_WIDTH_Px_DSS_SRCH_FIELD 1
1746#define STV090x_OFFST_Px_SYNCVIT_FIELD 1
1747#define STV090x_WIDTH_Px_SYNCVIT_FIELD 1
1748#define STV090x_OFFST_Px_IQINV_FIELD 0
1749#define STV090x_WIDTH_Px_IQINV_FIELD 1
1750
1751#define STV090x_Px_VTH12(__x) (0xf534 - (__x - 1) * 0x200)
1752#define STV090x_P1_VTH12 STV090x_Px_VTH12(1)
1753#define STV090x_P2_VTH12 STV090x_Px_VTH12(2)
1754#define STV090x_OFFST_Px_VTH12_FIELD 0
1755#define STV090x_WIDTH_Px_VTH12_FIELD 8
1756
1757#define STV090x_Px_VTH23(__x) (0xf535 - (__x - 1) * 0x200)
1758#define STV090x_P1_VTH23 STV090x_Px_VTH23(1)
1759#define STV090x_P2_VTH23 STV090x_Px_VTH23(2)
1760#define STV090x_OFFST_Px_VTH23_FIELD 0
1761#define STV090x_WIDTH_Px_VTH23_FIELD 8
1762
1763#define STV090x_Px_VTH34(__x) (0xf536 - (__x - 1) * 0x200)
1764#define STV090x_P1_VTH34 STV090x_Px_VTH34(1)
1765#define STV090x_P2_VTH34 STV090x_Px_VTH34(2)
1766#define STV090x_OFFST_Px_VTH34_FIELD 0
1767#define STV090x_WIDTH_Px_VTH34_FIELD 8
1768
1769#define STV090x_Px_VTH56(__x) (0xf537 - (__x - 1) * 0x200)
1770#define STV090x_P1_VTH56 STV090x_Px_VTH56(1)
1771#define STV090x_P2_VTH56 STV090x_Px_VTH56(2)
1772#define STV090x_OFFST_Px_VTH56_FIELD 0
1773#define STV090x_WIDTH_Px_VTH56_FIELD 8
1774
1775#define STV090x_Px_VTH67(__x) (0xf538 - (__x - 1) * 0x200)
1776#define STV090x_P1_VTH67 STV090x_Px_VTH67(1)
1777#define STV090x_P2_VTH67 STV090x_Px_VTH67(2)
1778#define STV090x_OFFST_Px_VTH67_FIELD 0
1779#define STV090x_WIDTH_Px_VTH67_FIELD 8
1780
1781#define STV090x_Px_VTH78(__x) (0xf539 - (__x - 1) * 0x200)
1782#define STV090x_P1_VTH78 STV090x_Px_VTH78(1)
1783#define STV090x_P2_VTH78 STV090x_Px_VTH78(2)
1784#define STV090x_OFFST_Px_VTH78_FIELD 0
1785#define STV090x_WIDTH_Px_VTH78_FIELD 8
1786
1787#define STV090x_Px_VITCURPUN(__x) (0xf53a - (__x - 1) * 0x200)
1788#define STV090x_P1_VITCURPUN STV090x_Px_VITCURPUN(1)
1789#define STV090x_P2_VITCURPUN STV090x_Px_VITCURPUN(2)
1790#define STV090x_OFFST_Px_VIT_CURPUN_FIELD 0
1791#define STV090x_WIDTH_Px_VIT_CURPUN_FIELD 5
1792
1793#define STV090x_Px_VERROR(__x) (0xf53b - (__x - 1) * 0x200)
1794#define STV090x_P1_VERROR STV090x_Px_VERROR(1)
1795#define STV090x_P2_VERROR STV090x_Px_VERROR(2)
1796#define STV090x_OFFST_Px_REGERR_VIT_FIELD 0
1797#define STV090x_WIDTH_Px_REGERR_VIT_FIELD 8
1798
1799#define STV090x_Px_PRVIT(__x) (0xf53c - (__x - 1) * 0x200)
1800#define STV090x_P1_PRVIT STV090x_Px_PRVIT(1)
1801#define STV090x_P2_PRVIT STV090x_Px_PRVIT(2)
1802#define STV090x_OFFST_Px_DIS_VTHLOCK_FIELD 6
1803#define STV090x_WIDTH_Px_DIS_VTHLOCK_FIELD 1
1804#define STV090x_OFFST_Px_E7_8VIT_FIELD 5
1805#define STV090x_WIDTH_Px_E7_8VIT_FIELD 1
1806#define STV090x_OFFST_Px_E6_7VIT_FIELD 4
1807#define STV090x_WIDTH_Px_E6_7VIT_FIELD 1
1808#define STV090x_OFFST_Px_E5_6VIT_FIELD 3
1809#define STV090x_WIDTH_Px_E5_6VIT_FIELD 1
1810#define STV090x_OFFST_Px_E3_4VIT_FIELD 2
1811#define STV090x_WIDTH_Px_E3_4VIT_FIELD 1
1812#define STV090x_OFFST_Px_E2_3VIT_FIELD 1
1813#define STV090x_WIDTH_Px_E2_3VIT_FIELD 1
1814#define STV090x_OFFST_Px_E1_2VIT_FIELD 0
1815#define STV090x_WIDTH_Px_E1_2VIT_FIELD 1
1816
1817#define STV090x_Px_VAVSRVIT(__x) (0xf53d - (__x - 1) * 0x200)
1818#define STV090x_P1_VAVSRVIT STV090x_Px_VAVSRVIT(1)
1819#define STV090x_P2_VAVSRVIT STV090x_Px_VAVSRVIT(2)
1820#define STV090x_OFFST_Px_SNVIT_FIELD 4
1821#define STV090x_WIDTH_Px_SNVIT_FIELD 2
1822#define STV090x_OFFST_Px_TOVVIT_FIELD 2
1823#define STV090x_WIDTH_Px_TOVVIT_FIELD 2
1824#define STV090x_OFFST_Px_HYPVIT_FIELD 0
1825#define STV090x_WIDTH_Px_HYPVIT_FIELD 2
1826
1827#define STV090x_Px_VSTATUSVIT(__x) (0xf53e - (__x - 1) * 0x200)
1828#define STV090x_P1_VSTATUSVIT STV090x_Px_VSTATUSVIT(1)
1829#define STV090x_P2_VSTATUSVIT STV090x_Px_VSTATUSVIT(2)
1830#define STV090x_OFFST_Px_PRFVIT_FIELD 4
1831#define STV090x_WIDTH_Px_PRFVIT_FIELD 1
1832#define STV090x_OFFST_Px_LOCKEDVIT_FIELD 3
1833#define STV090x_WIDTH_Px_LOCKEDVIT_FIELD 1
1834
1835#define STV090x_Px_VTHINUSE(__x) (0xf53f - (__x - 1) * 0x200)
1836#define STV090x_P1_VTHINUSE STV090x_Px_VTHINUSE(1)
1837#define STV090x_P2_VTHINUSE STV090x_Px_VTHINUSE(2)
1838#define STV090x_OFFST_Px_VIT_INUSE_FIELD 0
1839#define STV090x_WIDTH_Px_VIT_INUSE_FIELD 8
1840
1841#define STV090x_Px_KDIV12(__x) (0xf540 - (__x - 1) * 0x200)
1842#define STV090x_P1_KDIV12 STV090x_Px_KDIV12(1)
1843#define STV090x_P2_KDIV12 STV090x_Px_KDIV12(2)
1844#define STV090x_OFFST_Px_K_DIVIDER_12_FIELD 0
1845#define STV090x_WIDTH_Px_K_DIVIDER_12_FIELD 7
1846
1847#define STV090x_Px_KDIV23(__x) (0xf541 - (__x - 1) * 0x200)
1848#define STV090x_P1_KDIV23 STV090x_Px_KDIV23(1)
1849#define STV090x_P2_KDIV23 STV090x_Px_KDIV23(2)
1850#define STV090x_OFFST_Px_K_DIVIDER_23_FIELD 0
1851#define STV090x_WIDTH_Px_K_DIVIDER_23_FIELD 7
1852
1853#define STV090x_Px_KDIV34(__x) (0xf542 - (__x - 1) * 0x200)
1854#define STV090x_P1_KDIV34 STV090x_Px_KDIV34(1)
1855#define STV090x_P2_KDIV34 STV090x_Px_KDIV34(2)
1856#define STV090x_OFFST_Px_K_DIVIDER_34_FIELD 0
1857#define STV090x_WIDTH_Px_K_DIVIDER_34_FIELD 7
1858
1859#define STV090x_Px_KDIV56(__x) (0xf543 - (__x - 1) * 0x200)
1860#define STV090x_P1_KDIV56 STV090x_Px_KDIV56(1)
1861#define STV090x_P2_KDIV56 STV090x_Px_KDIV56(2)
1862#define STV090x_OFFST_Px_K_DIVIDER_56_FIELD 0
1863#define STV090x_WIDTH_Px_K_DIVIDER_56_FIELD 7
1864
1865#define STV090x_Px_KDIV67(__x) (0xf544 - (__x - 1) * 0x200)
1866#define STV090x_P1_KDIV67 STV090x_Px_KDIV67(1)
1867#define STV090x_P2_KDIV67 STV090x_Px_KDIV67(2)
1868#define STV090x_OFFST_Px_K_DIVIDER_67_FIELD 0
1869#define STV090x_WIDTH_Px_K_DIVIDER_67_FIELD 7
1870
1871#define STV090x_Px_KDIV78(__x) (0xf545 - (__x - 1) * 0x200)
1872#define STV090x_P1_KDIV78 STV090x_Px_KDIV78(1)
1873#define STV090x_P2_KDIV78 STV090x_Px_KDIV78(2)
1874#define STV090x_OFFST_Px_K_DIVIDER_78_FIELD 0
1875#define STV090x_WIDTH_Px_K_DIVIDER_78_FIELD 7
1876
1877#define STV090x_Px_PDELCTRL1(__x) (0xf550 - (__x - 1) * 0x200)
1878#define STV090x_P1_PDELCTRL1 STV090x_Px_PDELCTRL1(1)
1879#define STV090x_P2_PDELCTRL1 STV090x_Px_PDELCTRL1(2)
1880#define STV090x_OFFST_Px_INV_MISMASK_FIELD 7
1881#define STV090x_WIDTH_Px_INV_MISMASK_FIELD 1
1882#define STV090x_OFFST_Px_FILTER_EN_FIELD 5
1883#define STV090x_WIDTH_Px_FILTER_EN_FIELD 1
1884#define STV090x_OFFST_Px_EN_MIS00_FIELD 1
1885#define STV090x_WIDTH_Px_EN_MIS00_FIELD 1
1886#define STV090x_OFFST_Px_ALGOSWRST_FIELD 0
1887#define STV090x_WIDTH_Px_ALGOSWRST_FIELD 1
1888
1889#define STV090x_Px_PDELCTRL2(__x) (0xf551 - (__x - 1) * 0x200)
1890#define STV090x_P1_PDELCTRL2 STV090x_Px_PDELCTRL2(1)
1891#define STV090x_P2_PDELCTRL2 STV090x_Px_PDELCTRL2(2)
1892#define STV090x_OFFST_Px_FORCE_CONTINUOUS 7
1893#define STV090x_WIDTH_Px_FORCE_CONTINUOUS 1
1894#define STV090x_OFFST_Px_RESET_UPKO_COUNT 6
1895#define STV090x_WIDTH_Px_RESET_UPKO_COUNT 1
1896#define STV090x_OFFST_Px_USER_PKTDELIN_NB 5
1897#define STV090x_WIDTH_Px_USER_PKTDELIN_NB 1
1898#define STV090x_OFFST_Px_FORCE_LOCKED 4
1899#define STV090x_WIDTH_Px_FORCE_LOCKED 1
1900#define STV090x_OFFST_Px_DATA_UNBBSCRAM 3
1901#define STV090x_WIDTH_Px_DATA_UNBBSCRAM 1
1902#define STV090x_OFFST_Px_FORCE_LONGPACKET 2
1903#define STV090x_WIDTH_Px_FORCE_LONGPACKET 1
1904#define STV090x_OFFST_Px_FRAME_MODE_FIELD 1
1905#define STV090x_WIDTH_Px_FRAME_MODE_FIELD 1
1906
1907#define STV090x_Px_HYSTTHRESH(__x) (0xf554 - (__x - 1) * 0x200)
1908#define STV090x_P1_HYSTTHRESH STV090x_Px_HYSTTHRESH(1)
1909#define STV090x_P2_HYSTTHRESH STV090x_Px_HYSTTHRESH(2)
1910#define STV090x_OFFST_Px_UNLCK_THRESH_FIELD 4
1911#define STV090x_WIDTH_Px_UNLCK_THRESH_FIELD 4
1912#define STV090x_OFFST_Px_DELIN_LCK_THRESH_FIELD 0
1913#define STV090x_WIDTH_Px_DELIN_LCK_THRESH_FIELD 4
1914
1915#define STV090x_Px_ISIENTRY(__x) (0xf55e - (__x - 1) * 0x200)
1916#define STV090x_P1_ISIENTRY STV090x_Px_ISIENTRY(1)
1917#define STV090x_P2_ISIENTRY STV090x_Px_ISIENTRY(2)
1918#define STV090x_OFFST_Px_ISI_ENTRY_FIELD 0
1919#define STV090x_WIDTH_Px_ISI_ENTRY_FIELD 8
1920
1921#define STV090x_Px_ISIBITENA(__x) (0xf55f - (__x - 1) * 0x200)
1922#define STV090x_P1_ISIBITENA STV090x_Px_ISIBITENA(1)
1923#define STV090x_P2_ISIBITENA STV090x_Px_ISIBITENA(2)
1924#define STV090x_OFFST_Px_ISI_BIT_EN_FIELD 0
1925#define STV090x_WIDTH_Px_ISI_BIT_EN_FIELD 8
1926
1927#define STV090x_Px_MATSTRy(__x, __y) (0xf561 - (__x - 1) * 0x200 - __y * 0x1)
1928#define STV090x_P1_MATSTR0 STV090x_Px_MATSTRy(1, 0)
1929#define STV090x_P1_MATSTR1 STV090x_Px_MATSTRy(1, 1)
1930#define STV090x_P2_MATSTR0 STV090x_Px_MATSTRy(2, 0)
1931#define STV090x_P2_MATSTR1 STV090x_Px_MATSTRy(2, 1)
1932#define STV090x_OFFST_Px_MATYPE_CURRENT_FIELD 0
1933#define STV090x_WIDTH_Px_MATYPE_CURRENT_FIELD 8
1934
1935#define STV090x_Px_UPLSTRy(__x, __y) (0xf563 - (__x - 1) * 0x200 - __y * 0x1)
1936#define STV090x_P1_UPLSTR0 STV090x_Px_UPLSTRy(1, 0)
1937#define STV090x_P1_UPLSTR1 STV090x_Px_UPLSTRy(1, 1)
1938#define STV090x_P2_UPLSTR0 STV090x_Px_UPLSTRy(2, 0)
1939#define STV090x_P2_UPLSTR1 STV090x_Px_UPLSTRy(2, 1)
1940#define STV090x_OFFST_Px_UPL_CURRENT_FIELD 0
1941#define STV090x_WIDTH_Px_UPL_CURRENT_FIELD 8
1942
1943#define STV090x_Px_DFLSTRy(__x, __y) (0xf565 - (__x - 1) * 0x200 - __y * 0x1)
1944#define STV090x_P1_DFLSTR0 STV090x_Px_DFLSTRy(1, 0)
1945#define STV090x_P1_DFLSTR1 STV090x_Px_DFLSTRy(1, 1)
1946#define STV090x_P2_DFLSTR0 STV090x_Px_DFLSTRy(2, 0)
1947#define STV090x_P2_DFLSTR1 STV090x_Px_DFLSTRy(2, 1)
1948#define STV090x_OFFST_Px_DFL_CURRENT_FIELD 0
1949#define STV090x_WIDTH_Px_DFL_CURRENT_FIELD 8
1950
1951#define STV090x_Px_SYNCSTR(__x) (0xf566 - (__x - 1) * 0x200)
1952#define STV090x_P1_SYNCSTR STV090x_Px_SYNCSTR(1)
1953#define STV090x_P2_SYNCSTR STV090x_Px_SYNCSTR(2)
1954#define STV090x_OFFST_Px_SYNC_CURRENT_FIELD 0
1955#define STV090x_WIDTH_Px_SYNC_CURRENT_FIELD 8
1956
1957#define STV090x_Px_SYNCDSTRy(__x, __y) (0xf568 - (__x - 1) * 0x200 - __y * 0x1)
1958#define STV090x_P1_SYNCDSTR0 STV090x_Px_SYNCDSTRy(1, 0)
1959#define STV090x_P1_SYNCDSTR1 STV090x_Px_SYNCDSTRy(1, 1)
1960#define STV090x_P2_SYNCDSTR0 STV090x_Px_SYNCDSTRy(2, 0)
1961#define STV090x_P2_SYNCDSTR1 STV090x_Px_SYNCDSTRy(2, 1)
1962#define STV090x_OFFST_Px_SYNCD_CURRENT_FIELD 0
1963#define STV090x_WIDTH_Px_SYNCD_CURRENT_FIELD 8
1964
1965#define STV090x_Px_PDELSTATUS1(__x) (0xf569 - (__x - 1) * 0x200)
1966#define STV090x_P1_PDELSTATUS1 STV090x_Px_PDELSTATUS1(1)
1967#define STV090x_P2_PDELSTATUS1 STV090x_Px_PDELSTATUS1(2)
1968#define STV090x_OFFST_Px_PKTDELIN_LOCK_FIELD 1
1969#define STV090x_WIDTH_Px_PKTDELIN_LOCK_FIELD 1
1970#define STV090x_OFFST_Px_FIRST_LOCK_FIELD 0
1971#define STV090x_WIDTH_Px_FIRST_LOCK_FIELD 1
1972
1973#define STV090x_Px_PDELSTATUS2(__x) (0xf56a - (__x - 1) * 0x200)
1974#define STV090x_P1_PDELSTATUS2 STV090x_Px_PDELSTATUS2(1)
1975#define STV090x_P2_PDELSTATUS2 STV090x_Px_PDELSTATUS2(2)
1976#define STV090x_OFFST_Px_FRAME_MODCOD_FIELD 2
1977#define STV090x_WIDTH_Px_FRAME_MODCOD_FIELD 5
1978#define STV090x_OFFST_Px_FRAME_TYPE_FIELD 0
1979#define STV090x_WIDTH_Px_FRAME_TYPE_FIELD 2
1980
1981#define STV090x_Px_BBFCRCKO1(__x) (0xf56b - (__x - 1) * 0x200)
1982#define STV090x_P1_BBFCRCKO1 STV090x_Px_BBFCRCKO1(1)
1983#define STV090x_P2_BBFCRCKO1 STV090x_Px_BBFCRCKO1(2)
1984#define STV090x_OFFST_Px_BBHCRC_KOCNT_FIELD 0
1985#define STV090x_WIDTH_Px_BBHCRC_KOCNT_FIELD 8
1986
1987#define STV090x_Px_BBFCRCKO0(__x) (0xf56c - (__x - 1) * 0x200)
1988#define STV090x_P1_BBFCRCKO0 STV090x_Px_BBFCRCKO0(1)
1989#define STV090x_P2_BBFCRCKO0 STV090x_Px_BBFCRCKO0(2)
1990#define STV090x_OFFST_Px_BBHCRC_KOCNT_FIELD 0
1991#define STV090x_WIDTH_Px_BBHCRC_KOCNT_FIELD 8
1992
1993#define STV090x_Px_UPCRCKO1(__x) (0xf56d - (__x - 1) * 0x200)
1994#define STV090x_P1_UPCRCKO1 STV090x_Px_UPCRCKO1(1)
1995#define STV090x_P2_UPCRCKO1 STV090x_Px_UPCRCKO1(2)
1996#define STV090x_OFFST_Px_PKTCRC_KOCNT_FIELD 0
1997#define STV090x_WIDTH_Px_PKTCRC_KOCNT_FIELD 8
1998
1999#define STV090x_Px_UPCRCKO0(__x) (0xf56e - (__x - 1) * 0x200)
2000#define STV090x_P1_UPCRCKO0 STV090x_Px_UPCRCKO0(1)
2001#define STV090x_P2_UPCRCKO0 STV090x_Px_UPCRCKO0(2)
2002#define STV090x_OFFST_Px_PKTCRC_KOCNT_FIELD 0
2003#define STV090x_WIDTH_Px_PKTCRC_KOCNT_FIELD 8
2004
2005#define STV090x_NBITER_NFx(__x) (0xFA03 + (__x - 4) * 0x1)
2006#define STV090x_NBITER_NF4 STV090x_NBITER_NFx(4)
2007#define STV090x_NBITER_NF5 STV090x_NBITER_NFx(5)
2008#define STV090x_NBITER_NF6 STV090x_NBITER_NFx(6)
2009#define STV090x_NBITER_NF7 STV090x_NBITER_NFx(7)
2010#define STV090x_NBITER_NF8 STV090x_NBITER_NFx(8)
2011#define STV090x_NBITER_NF9 STV090x_NBITER_NFx(9)
2012#define STV090x_NBITER_NF10 STV090x_NBITER_NFx(10)
2013#define STV090x_NBITER_NF11 STV090x_NBITER_NFx(11)
2014#define STV090x_NBITER_NF12 STV090x_NBITER_NFx(12)
2015#define STV090x_NBITER_NF13 STV090x_NBITER_NFx(13)
2016#define STV090x_NBITER_NF14 STV090x_NBITER_NFx(14)
2017#define STV090x_NBITER_NF15 STV090x_NBITER_NFx(15)
2018#define STV090x_NBITER_NF16 STV090x_NBITER_NFx(16)
2019#define STV090x_NBITER_NF17 STV090x_NBITER_NFx(17)
2020
2021#define STV090x_NBITERNOERR 0xFA3F
2022#define STV090x_OFFST_NBITER_STOP_CRIT_FIELD 0
2023#define STV090x_WIDTH_NBITER_STOP_CRIT_FIELD 4
2024
2025#define STV090x_GAINLLR_NFx(__x) (0xFA43 + (__x - 4) * 0x1)
2026#define STV090x_GAINLLR_NF4 STV090x_GAINLLR_NFx(4)
2027#define STV090x_OFFST_GAINLLR_NF_QP_1_2_FIELD 0
2028#define STV090x_WIDTH_GAINLLR_NF_QP_1_2_FIELD 7
2029
2030#define STV090x_GAINLLR_NF5 STV090x_GAINLLR_NFx(5)
2031#define STV090x_OFFST_GAINLLR_NF_QP_3_5_FIELD 0
2032#define STV090x_WIDTH_GAINLLR_NF_QP_3_5_FIELD 7
2033
2034#define STV090x_GAINLLR_NF6 STV090x_GAINLLR_NFx(6)
2035#define STV090x_OFFST_GAINLLR_NF_QP_2_3_FIELD 0
2036#define STV090x_WIDTH_GAINLLR_NF_QP_2_3_FIELD 7
2037
2038#define STV090x_GAINLLR_NF7 STV090x_GAINLLR_NFx(7)
2039#define STV090x_OFFST_GAINLLR_NF_QP_3_4_FIELD 0
2040#define STV090x_WIDTH_GAINLLR_NF_QP_3_4_FIELD 7
2041
2042#define STV090x_GAINLLR_NF8 STV090x_GAINLLR_NFx(8)
2043#define STV090x_OFFST_GAINLLR_NF_QP_4_5_FIELD 0
2044#define STV090x_WIDTH_GAINLLR_NF_QP_4_5_FIELD 7
2045
2046#define STV090x_GAINLLR_NF9 STV090x_GAINLLR_NFx(9)
2047#define STV090x_OFFST_GAINLLR_NF_QP_5_6_FIELD 0
2048#define STV090x_WIDTH_GAINLLR_NF_QP_5_6_FIELD 7
2049
2050#define STV090x_GAINLLR_NF10 STV090x_GAINLLR_NFx(10)
2051#define STV090x_OFFST_GAINLLR_NF_QP_8_9_FIELD 0
2052#define STV090x_WIDTH_GAINLLR_NF_QP_8_9_FIELD 7
2053
2054#define STV090x_GAINLLR_NF11 STV090x_GAINLLR_NFx(11)
2055#define STV090x_OFFST_GAINLLR_NF_QP_9_10_FIELD 0
2056#define STV090x_WIDTH_GAINLLR_NF_QP_9_10_FIELD 7
2057
2058#define STV090x_GAINLLR_NF12 STV090x_GAINLLR_NFx(12)
2059#define STV090x_OFFST_GAINLLR_NF_8P_3_5_FIELD 0
2060#define STV090x_WIDTH_GAINLLR_NF_8P_3_5_FIELD 7
2061
2062#define STV090x_GAINLLR_NF13 STV090x_GAINLLR_NFx(13)
2063#define STV090x_OFFST_GAINLLR_NF_8P_2_3_FIELD 0
2064#define STV090x_WIDTH_GAINLLR_NF_8P_2_3_FIELD 7
2065
2066#define STV090x_GAINLLR_NF14 STV090x_GAINLLR_NFx(14)
2067#define STV090x_OFFST_GAINLLR_NF_8P_3_4_FIELD 0
2068#define STV090x_WIDTH_GAINLLR_NF_8P_3_4_FIELD 7
2069
2070#define STV090x_GAINLLR_NF15 STV090x_GAINLLR_NFx(15)
2071#define STV090x_OFFST_GAINLLR_NF_8P_5_6_FIELD 0
2072#define STV090x_WIDTH_GAINLLR_NF_8P_5_6_FIELD 7
2073
2074#define STV090x_GAINLLR_NF16 STV090x_GAINLLR_NFx(16)
2075#define STV090x_OFFST_GAINLLR_NF_8P_8_9_FIELD 0
2076#define STV090x_WIDTH_GAINLLR_NF_8P_8_9_FIELD 7
2077
2078#define STV090x_GAINLLR_NF17 STV090x_GAINLLR_NFx(17)
2079#define STV090x_OFFST_GAINLLR_NF_8P_9_10_FIELD 0
2080#define STV090x_WIDTH_GAINLLR_NF_8P_9_10_FIELD 7
2081
2082#define STV090x_GENCFG 0xFA86
2083#define STV090x_OFFST_BROADCAST_FIELD 4
2084#define STV090x_WIDTH_BROADCAST_FIELD 1
2085#define STV090x_OFFST_PRIORITY_FIELD 1
2086#define STV090x_WIDTH_PRIORITY_FIELD 1
2087#define STV090x_OFFST_DDEMOD_FIELD 0
2088#define STV090x_WIDTH_DDEMOD_FIELD 1
2089
2090#define STV090x_LDPCERRx(__x) (0xFA97 - (__x * 0x1))
2091#define STV090x_LDPCERR0 STV090x_LDPCERRx(0)
2092#define STV090x_LDPCERR1 STV090x_LDPCERRx(1)
2093#define STV090x_OFFST_Px_LDPC_ERRORS_COUNTER_FIELD 0
2094#define STV090x_WIDTH_Px_LDPC_ERRORS_COUNTER_FIELD 8
2095
2096#define STV090x_BCHERR 0xFA98
2097#define STV090x_OFFST_Px_ERRORFLAG_FIELD 4
2098#define STV090x_WIDTH_Px_ERRORFLAG_FIELD 1
2099#define STV090x_OFFST_Px_BCH_ERRORS_COUNTER_FIELD 0
2100#define STV090x_WIDTH_Px_BCH_ERRORS_COUNTER_FIELD 4
2101
2102#define STV090x_Px_TSSTATEM(__x) (0xF570 - (__x - 1) * 0x200)
2103#define STV090x_P1_TSSTATEM STV090x_Px_TSSTATEM(1)
2104#define STV090x_P2_TSSTATEM STV090x_Px_TSSTATEM(2)
2105#define STV090x_OFFST_Px_TSDIL_ON_FIELD 7
2106#define STV090x_WIDTH_Px_TSDIL_ON_FIELD 1
2107#define STV090x_OFFST_Px_TSRS_ON_FIELD 5
2108#define STV090x_WIDTH_Px_TSRS_ON_FIELD 1
2109
2110#define STV090x_Px_TSCFGH(__x) (0xF572 - (__x - 1) * 0x200)
2111#define STV090x_P1_TSCFGH STV090x_Px_TSCFGH(1)
2112#define STV090x_P2_TSCFGH STV090x_Px_TSCFGH(2)
2113#define STV090x_OFFST_Px_TSFIFO_DVBCI_FIELD 7
2114#define STV090x_WIDTH_Px_TSFIFO_DVBCI_FIELD 1
2115#define STV090x_OFFST_Px_TSFIFO_SERIAL_FIELD 6
2116#define STV090x_WIDTH_Px_TSFIFO_SERIAL_FIELD 1
2117#define STV090x_OFFST_Px_TSFIFO_TEIUPDATE_FIELD 5
2118#define STV090x_WIDTH_Px_TSFIFO_TEIUPDATE_FIELD 1
2119#define STV090x_OFFST_Px_TSFIFO_DUTY50_FIELD 4
2120#define STV090x_WIDTH_Px_TSFIFO_DUTY50_FIELD 1
2121#define STV090x_OFFST_Px_TSFIFO_HSGNLOUT_FIELD 3
2122#define STV090x_WIDTH_Px_TSFIFO_HSGNLOUT_FIELD 1
2123#define STV090x_OFFST_Px_TSFIFO_ERRORMODE_FIELD 1
2124#define STV090x_WIDTH_Px_TSFIFO_ERRORMODE_FIELD 2
2125#define STV090x_OFFST_Px_RST_HWARE_FIELD 0
2126#define STV090x_WIDTH_Px_RST_HWARE_FIELD 1
2127
2128#define STV090x_Px_TSCFGM(__x) (0xF573 - (__x - 1) * 0x200)
2129#define STV090x_P1_TSCFGM STV090x_Px_TSCFGM(1)
2130#define STV090x_P2_TSCFGM STV090x_Px_TSCFGM(2)
2131#define STV090x_OFFST_Px_TSFIFO_MANSPEED_FIELD 6
2132#define STV090x_WIDTH_Px_TSFIFO_MANSPEED_FIELD 2
2133#define STV090x_OFFST_Px_TSFIFO_PERMDATA_FIELD 5
2134#define STV090x_WIDTH_Px_TSFIFO_PERMDATA_FIELD 1
2135#define STV090x_OFFST_Px_TSFIFO_INVDATA_FIELD 0
2136#define STV090x_WIDTH_Px_TSFIFO_INVDATA_FIELD 1
2137
2138#define STV090x_Px_TSCFGL(__x) (0xF574 - (__x - 1) * 0x200)
2139#define STV090x_P1_TSCFGL STV090x_Px_TSCFGL(1)
2140#define STV090x_P2_TSCFGL STV090x_Px_TSCFGL(2)
2141#define STV090x_OFFST_Px_TSFIFO_BCLKDEL1CK_FIELD 6
2142#define STV090x_WIDTH_Px_TSFIFO_BCLKDEL1CK_FIELD 2
2143#define STV090x_OFFST_Px_BCHERROR_MODE_FIELD 4
2144#define STV090x_WIDTH_Px_BCHERROR_MODE_FIELD 2
2145#define STV090x_OFFST_Px_TSFIFO_NSGNL2DATA_FIELD 3
2146#define STV090x_WIDTH_Px_TSFIFO_NSGNL2DATA_FIELD 1
2147#define STV090x_OFFST_Px_TSFIFO_EMBINDVB_FIELD 2
2148#define STV090x_WIDTH_Px_TSFIFO_EMBINDVB_FIELD 1
2149#define STV090x_OFFST_Px_TSFIFO_DPUNACT_FIELD 1
2150#define STV090x_WIDTH_Px_TSFIFO_DPUNACT_FIELD 1
2151
2152#define STV090x_Px_TSINSDELH(__x) (0xF576 - (__x - 1) * 0x200)
2153#define STV090x_P1_TSINSDELH STV090x_Px_TSINSDELH(1)
2154#define STV090x_P2_TSINSDELH STV090x_Px_TSINSDELH(2)
2155#define STV090x_OFFST_Px_TSDEL_SYNCBYTE_FIELD 7
2156#define STV090x_WIDTH_Px_TSDEL_SYNCBYTE_FIELD 1
2157#define STV090x_OFFST_Px_TSDEL_XXHEADER_FIELD 6
2158#define STV090x_WIDTH_Px_TSDEL_XXHEADER_FIELD 1
2159
2160#define STV090x_Px_TSSPEED(__x) (0xF580 - (__x - 1) * 0x200)
2161#define STV090x_P1_TSSPEED STV090x_Px_TSSPEED(1)
2162#define STV090x_P2_TSSPEED STV090x_Px_TSSPEED(2)
2163#define STV090x_OFFST_Px_TSFIFO_OUTSPEED_FIELD 0
2164#define STV090x_WIDTH_Px_TSFIFO_OUTSPEED_FIELD 8
2165
2166#define STV090x_Px_TSSTATUS(__x) (0xF581 - (__x - 1) * 0x200)
2167#define STV090x_P1_TSSTATUS STV090x_Px_TSSTATUS(1)
2168#define STV090x_P2_TSSTATUS STV090x_Px_TSSTATUS(2)
2169#define STV090x_OFFST_Px_TSFIFO_LINEOK_FIELD 7
2170#define STV090x_WIDTH_Px_TSFIFO_LINEOK_FIELD 1
2171#define STV090x_OFFST_Px_TSFIFO_ERROR_FIELD 6
2172#define STV090x_WIDTH_Px_TSFIFO_ERROR_FIELD 1
2173
2174#define STV090x_Px_TSSTATUS2(__x) (0xF582 - (__x - 1) * 0x200)
2175#define STV090x_P1_TSSTATUS2 STV090x_Px_TSSTATUS2(1)
2176#define STV090x_P2_TSSTATUS2 STV090x_Px_TSSTATUS2(2)
2177#define STV090x_OFFST_Px_TSFIFO_DEMODSEL_FIELD 7
2178#define STV090x_WIDTH_Px_TSFIFO_DEMODSEL_FIELD 1
2179#define STV090x_OFFST_Px_TSFIFOSPEED_STORE_FIELD 6
2180#define STV090x_WIDTH_Px_TSFIFOSPEED_STORE_FIELD 1
2181#define STV090x_OFFST_Px_DILXX_RESET_FIELD 5
2182#define STV090x_WIDTH_Px_DILXX_RESET_FIELD 1
2183#define STV090x_OFFST_Px_TSSERIAL_IMPOS_FIELD 5
2184#define STV090x_WIDTH_Px_TSSERIAL_IMPOS_FIELD 1
2185#define STV090x_OFFST_Px_SCRAMBDETECT_FIELD 1
2186#define STV090x_WIDTH_Px_SCRAMBDETECT_FIELD 1
2187
2188#define STV090x_Px_TSBITRATEy(__x, __y) (0xF584 - (__x - 1) * 0x200 - __y * 0x1)
2189#define STV090x_P1_TSBITRATE0 STV090x_Px_TSBITRATEy(1, 0)
2190#define STV090x_P1_TSBITRATE1 STV090x_Px_TSBITRATEy(1, 1)
2191#define STV090x_P2_TSBITRATE0 STV090x_Px_TSBITRATEy(2, 0)
2192#define STV090x_P2_TSBITRATE1 STV090x_Px_TSBITRATEy(2, 1)
2193#define STV090x_OFFST_Px_TSFIFO_BITRATE_FIELD 7
2194#define STV090x_WIDTH_Px_TSFIFO_BITRATE_FIELD 8
2195
2196#define STV090x_Px_ERRCTRL1(__x) (0xF598 - (__x - 1) * 0x200)
2197#define STV090x_P1_ERRCTRL1 STV090x_Px_ERRCTRL1(1)
2198#define STV090x_P2_ERRCTRL1 STV090x_Px_ERRCTRL1(2)
2199#define STV090x_OFFST_Px_ERR_SOURCE_FIELD 4
2200#define STV090x_WIDTH_Px_ERR_SOURCE_FIELD 4
2201#define STV090x_OFFST_Px_NUM_EVENT_FIELD 0
2202#define STV090x_WIDTH_Px_NUM_EVENT_FIELD 3
2203
2204#define STV090x_Px_ERRCNT12(__x) (0xF599 - (__x - 1) * 0x200)
2205#define STV090x_P1_ERRCNT12 STV090x_Px_ERRCNT12(1)
2206#define STV090x_P2_ERRCNT12 STV090x_Px_ERRCNT12(2)
2207#define STV090x_OFFST_Px_ERRCNT1_OLDVALUE_FIELD 7
2208#define STV090x_WIDTH_Px_ERRCNT1_OLDVALUE_FIELD 1
2209#define STV090x_OFFST_Px_ERR_CNT12_FIELD 0
2210#define STV090x_WIDTH_Px_ERR_CNT12_FIELD 7
2211
2212#define STV090x_Px_ERRCNT11(__x) (0xF59A - (__x - 1) * 0x200)
2213#define STV090x_P1_ERRCNT11 STV090x_Px_ERRCNT11(1)
2214#define STV090x_P2_ERRCNT11 STV090x_Px_ERRCNT11(2)
2215#define STV090x_OFFST_Px_ERR_CNT11_FIELD 0
2216#define STV090x_WIDTH_Px_ERR_CNT11_FIELD 8
2217
2218#define STV090x_Px_ERRCNT10(__x) (0xF59B - (__x - 1) * 0x200)
2219#define STV090x_P1_ERRCNT10 STV090x_Px_ERRCNT10(1)
2220#define STV090x_P2_ERRCNT10 STV090x_Px_ERRCNT10(2)
2221#define STV090x_OFFST_Px_ERR_CNT10_FIELD 0
2222#define STV090x_WIDTH_Px_ERR_CNT10_FIELD 8
2223
2224#define STV090x_Px_ERRCTRL2(__x) (0xF59C - (__x - 1) * 0x200)
2225#define STV090x_P1_ERRCTRL2 STV090x_Px_ERRCTRL2(1)
2226#define STV090x_P2_ERRCTRL2 STV090x_Px_ERRCTRL2(2)
2227#define STV090x_OFFST_Px_ERR_SOURCE2_FIELD 4
2228#define STV090x_WIDTH_Px_ERR_SOURCE2_FIELD 4
2229#define STV090x_OFFST_Px_NUM_EVENT2_FIELD 0
2230#define STV090x_WIDTH_Px_NUM_EVENT2_FIELD 3
2231
2232#define STV090x_Px_ERRCNT22(__x) (0xF59D - (__x - 1) * 0x200)
2233#define STV090x_P1_ERRCNT22 STV090x_Px_ERRCNT22(1)
2234#define STV090x_P2_ERRCNT22 STV090x_Px_ERRCNT22(2)
2235#define STV090x_OFFST_Px_ERRCNT2_OLDVALUE_FIELD 7
2236#define STV090x_WIDTH_Px_ERRCNT2_OLDVALUE_FIELD 1
2237#define STV090x_OFFST_Px_ERR_CNT2_FIELD 0
2238#define STV090x_WIDTH_Px_ERR_CNT2_FIELD 7
2239
2240#define STV090x_Px_ERRCNT21(__x) (0xF59E - (__x - 1) * 0x200)
2241#define STV090x_P1_ERRCNT21 STV090x_Px_ERRCNT21(1)
2242#define STV090x_P2_ERRCNT21 STV090x_Px_ERRCNT21(2)
2243#define STV090x_OFFST_Px_ERR_CNT21_FIELD 0
2244#define STV090x_WIDTH_Px_ERR_CNT21_FIELD 8
2245
2246#define STV090x_Px_ERRCNT20(__x) (0xF59F - (__x - 1) * 0x200)
2247#define STV090x_P1_ERRCNT20 STV090x_Px_ERRCNT20(1)
2248#define STV090x_P2_ERRCNT20 STV090x_Px_ERRCNT20(2)
2249#define STV090x_OFFST_Px_ERR_CNT20_FIELD 0
2250#define STV090x_WIDTH_Px_ERR_CNT20_FIELD 8
2251
2252#define STV090x_Px_FECSPY(__x) (0xF5A0 - (__x - 1) * 0x200)
2253#define STV090x_P1_FECSPY STV090x_Px_FECSPY(1)
2254#define STV090x_P2_FECSPY STV090x_Px_FECSPY(2)
2255#define STV090x_OFFST_Px_SPY_ENABLE_FIELD 7
2256#define STV090x_WIDTH_Px_SPY_ENABLE_FIELD 1
2257#define STV090x_OFFST_Px_BERMETER_DATAMAODE_FIELD 2
2258#define STV090x_WIDTH_Px_BERMETER_DATAMAODE_FIELD 2
2259
2260#define STV090x_Px_FSPYCFG(__x) (0xF5A1 - (__x - 1) * 0x200)
2261#define STV090x_P1_FSPYCFG STV090x_Px_FSPYCFG(1)
2262#define STV090x_P2_FSPYCFG STV090x_Px_FSPYCFG(2)
2263#define STV090x_OFFST_Px_RST_ON_ERROR_FIELD 5
2264#define STV090x_WIDTH_Px_RST_ON_ERROR_FIELD 1
2265#define STV090x_OFFST_Px_ONE_SHOT_FIELD 4
2266#define STV090x_WIDTH_Px_ONE_SHOT_FIELD 1
2267#define STV090x_OFFST_Px_I2C_MODE_FIELD 2
2268#define STV090x_WIDTH_Px_I2C_MODE_FIELD 2
2269
2270#define STV090x_Px_FSPYDATA(__x) (0xF5A2 - (__x - 1) * 0x200)
2271#define STV090x_P1_FSPYDATA STV090x_Px_FSPYDATA(1)
2272#define STV090x_P2_FSPYDATA STV090x_Px_FSPYDATA(2)
2273#define STV090x_OFFST_Px_SPY_STUFFING_FIELD 7
2274#define STV090x_WIDTH_Px_SPY_STUFFING_FIELD 1
2275#define STV090x_OFFST_Px_SPY_CNULLPKT_FIELD 5
2276#define STV090x_WIDTH_Px_SPY_CNULLPKT_FIELD 1
2277#define STV090x_OFFST_Px_SPY_OUTDATA_MODE_FIELD 0
2278#define STV090x_WIDTH_Px_SPY_OUTDATA_MODE_FIELD 5
2279
2280#define STV090x_Px_FSPYOUT(__x) (0xF5A3 - (__x - 1) * 0x200)
2281#define STV090x_P1_FSPYOUT STV090x_Px_FSPYOUT(1)
2282#define STV090x_P2_FSPYOUT STV090x_Px_FSPYOUT(2)
2283#define STV090x_OFFST_Px_FSPY_DIRECT_FIELD 7
2284#define STV090x_WIDTH_Px_FSPY_DIRECT_FIELD 1
2285#define STV090x_OFFST_Px_STUFF_MODE_FIELD 0
2286#define STV090x_WIDTH_Px_STUFF_MODE_FIELD 3
2287
2288#define STV090x_Px_FSTATUS(__x) (0xF5A4 - (__x - 1) * 0x200)
2289#define STV090x_P1_FSTATUS STV090x_Px_FSTATUS(1)
2290#define STV090x_P2_FSTATUS STV090x_Px_FSTATUS(2)
2291#define STV090x_OFFST_Px_SPY_ENDSIM_FIELD 7
2292#define STV090x_WIDTH_Px_SPY_ENDSIM_FIELD 1
2293#define STV090x_OFFST_Px_VALID_SIM_FIELD 6
2294#define STV090x_WIDTH_Px_VALID_SIM_FIELD 1
2295#define STV090x_OFFST_Px_FOUND_SIGNAL_FIELD 5
2296#define STV090x_WIDTH_Px_FOUND_SIGNAL_FIELD 1
2297#define STV090x_OFFST_Px_DSS_SYNCBYTE_FIELD 4
2298#define STV090x_WIDTH_Px_DSS_SYNCBYTE_FIELD 1
2299#define STV090x_OFFST_Px_RESULT_STATE_FIELD 0
2300#define STV090x_WIDTH_Px_RESULT_STATE_FIELD 4
2301
2302#define STV090x_Px_FBERCPT4(__x) (0xF5A8 - (__x - 1) * 0x200)
2303#define STV090x_P1_FBERCPT4 STV090x_Px_FBERCPT4(1)
2304#define STV090x_P2_FBERCPT4 STV090x_Px_FBERCPT4(2)
2305#define STV090x_OFFST_Px_FBERMETER_CPT_FIELD 0
2306#define STV090x_WIDTH_Px_FBERMETER_CPT_FIELD 8
2307
2308#define STV090x_Px_FBERCPT3(__x) (0xF5A9 - (__x - 1) * 0x200)
2309#define STV090x_P1_FBERCPT3 STV090x_Px_FBERCPT3(1)
2310#define STV090x_P2_FBERCPT3 STV090x_Px_FBERCPT3(2)
2311#define STV090x_OFFST_Px_FBERMETER_CPT_FIELD 0
2312#define STV090x_WIDTH_Px_FBERMETER_CPT_FIELD 8
2313
2314#define STV090x_Px_FBERCPT2(__x) (0xF5AA - (__x - 1) * 0x200)
2315#define STV090x_P1_FBERCPT2 STV090x_Px_FBERCPT2(1)
2316#define STV090x_P2_FBERCPT2 STV090x_Px_FBERCPT2(2)
2317#define STV090x_OFFST_Px_FBERMETER_CPT_FIELD 0
2318#define STV090x_WIDTH_Px_FBERMETER_CPT_FIELD 8
2319
2320#define STV090x_Px_FBERCPT1(__x) (0xF5AB - (__x - 1) * 0x200)
2321#define STV090x_P1_FBERCPT1 STV090x_Px_FBERCPT1(1)
2322#define STV090x_P2_FBERCPT1 STV090x_Px_FBERCPT1(2)
2323#define STV090x_OFFST_Px_FBERMETER_CPT_FIELD 0
2324#define STV090x_WIDTH_Px_FBERMETER_CPT_FIELD 8
2325
2326#define STV090x_Px_FBERCPT0(__x) (0xF5AC - (__x - 1) * 0x200)
2327#define STV090x_P1_FBERCPT0 STV090x_Px_FBERCPT0(1)
2328#define STV090x_P2_FBERCPT0 STV090x_Px_FBERCPT0(2)
2329#define STV090x_OFFST_Px_FBERMETER_CPT_FIELD 0
2330#define STV090x_WIDTH_Px_FBERMETER_CPT_FIELD 8
2331
2332#define STV090x_Px_FBERERRy(__x, __y) (0xF5AF - (__x - 1) * 0x200 - __y * 0x1)
2333#define STV090x_P1_FBERERR0 STV090x_Px_FBERERRy(1, 0)
2334#define STV090x_P1_FBERERR1 STV090x_Px_FBERERRy(1, 1)
2335#define STV090x_P1_FBERERR2 STV090x_Px_FBERERRy(1, 2)
2336#define STV090x_P2_FBERERR0 STV090x_Px_FBERERRy(2, 0)
2337#define STV090x_P2_FBERERR1 STV090x_Px_FBERERRy(2, 1)
2338#define STV090x_P2_FBERERR2 STV090x_Px_FBERERRy(2, 2)
2339#define STV090x_OFFST_Px_FBERMETER_CPT_ERR_FIELD 0
2340#define STV090x_WIDTH_Px_FBERMETER_CPT_ERR_FIELD 8
2341
2342#define STV090x_Px_FSPYBER(__x) (0xF5B2 - (__x - 1) * 0x200)
2343#define STV090x_P1_FSPYBER STV090x_Px_FSPYBER(1)
2344#define STV090x_P2_FSPYBER STV090x_Px_FSPYBER(2)
2345#define STV090x_OFFST_Px_FSPYBER_SYNCBYTE_FIELD 4
2346#define STV090x_WIDTH_Px_FSPYBER_SYNCBYTE_FIELD 1
2347#define STV090x_OFFST_Px_FSPYBER_UNSYNC_FIELD 3
2348#define STV090x_WIDTH_Px_FSPYBER_UNSYNC_FIELD 1
2349#define STV090x_OFFST_Px_FSPYBER_CTIME_FIELD 0
2350#define STV090x_WIDTH_Px_FSPYBER_CTIME_FIELD 3
2351
2352#define STV090x_RCCFGH 0xf600
2353
2354#define STV090x_TSGENERAL 0xF630
2355#define STV090x_OFFST_Px_MUXSTREAM_OUT_FIELD 3
2356#define STV090x_WIDTH_Px_MUXSTREAM_OUT_FIELD 1
2357#define STV090x_OFFST_Px_TSFIFO_PERMPARAL_FIELD 1
2358#define STV090x_WIDTH_Px_TSFIFO_PERMPARAL_FIELD 2
2359
2360#define STV090x_TSGENERAL1X 0xf670
2361#define STV090x_CFGEXT 0xfa80
2362
2363#define STV090x_TSTRES0 0xFF11
2364#define STV090x_OFFST_FRESFEC_FIELD 7
2365#define STV090x_WIDTH_FRESFEC_FIELD 1
2366
2367#define STV090x_Px_TSTDISRX(__x) (0xFF67 - (__x - 1) * 0x2)
2368#define STV090x_P1_TSTDISRX STV090x_Px_TSTDISRX(1)
2369#define STV090x_P2_TSTDISRX STV090x_Px_TSTDISRX(2)
2370#define STV090x_OFFST_Px_TSTDISRX_SELECT_FIELD 3
2371#define STV090x_WIDTH_Px_TSTDISRX_SELECT_FIELD 1
2372
2373#endif /* __STV090x_REG_H */
diff --git a/drivers/media/dvb/frontends/stv6110x.c b/drivers/media/dvb/frontends/stv6110x.c
new file mode 100644
index 000000000000..3d8a2e01c9c4
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv6110x.c
@@ -0,0 +1,373 @@
1/*
2 STV6110(A) Silicon tuner driver
3
4 Copyright (C) Manu Abraham <abraham.manu@gmail.com>
5
6 Copyright (C) ST Microelectronics
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21*/
22
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/string.h>
27
28#include "dvb_frontend.h"
29
30#include "stv6110x_reg.h"
31#include "stv6110x.h"
32#include "stv6110x_priv.h"
33
34static unsigned int verbose;
35module_param(verbose, int, 0644);
36MODULE_PARM_DESC(verbose, "Set Verbosity level");
37
38static u8 stv6110x_regs[] = {0x07, 0x11, 0xdc, 0x85, 0x17, 0x01, 0xe6, 0x1e};
39
40static int stv6110x_read_reg(struct stv6110x_state *stv6110x, u8 reg, u8 *data)
41{
42 int ret;
43 const struct stv6110x_config *config = stv6110x->config;
44 u8 b0[] = { reg };
45 u8 b1[] = { 0 };
46 struct i2c_msg msg[] = {
47 { .addr = config->addr, .flags = 0, .buf = b0, .len = 1 },
48 { .addr = config->addr, .flags = I2C_M_RD, .buf = b1, .len = 1 }
49 };
50
51 ret = i2c_transfer(stv6110x->i2c, msg, 2);
52 if (ret != 2) {
53 dprintk(FE_ERROR, 1, "I/O Error");
54 return -EREMOTEIO;
55 }
56 *data = b1[0];
57
58 return 0;
59}
60
61static int stv6110x_write_reg(struct stv6110x_state *stv6110x, u8 reg, u8 data)
62{
63 int ret;
64 const struct stv6110x_config *config = stv6110x->config;
65 u8 buf[] = { reg, data };
66 struct i2c_msg msg = { .addr = config->addr, .flags = 0, . buf = buf, .len = 2 };
67
68 ret = i2c_transfer(stv6110x->i2c, &msg, 1);
69 if (ret != 1) {
70 dprintk(FE_ERROR, 1, "I/O Error");
71 return -EREMOTEIO;
72 }
73
74 return 0;
75}
76
77static int stv6110x_init(struct dvb_frontend *fe)
78{
79 struct stv6110x_state *stv6110x = fe->tuner_priv;
80 int ret;
81 u8 i;
82
83 for (i = 0; i < ARRAY_SIZE(stv6110x_regs); i++) {
84 ret = stv6110x_write_reg(stv6110x, i, stv6110x_regs[i]);
85 if (ret < 0) {
86 dprintk(FE_ERROR, 1, "Initialization failed");
87 return -1;
88 }
89 }
90
91 return 0;
92}
93
94static int stv6110x_set_frequency(struct dvb_frontend *fe, u32 frequency)
95{
96 struct stv6110x_state *stv6110x = fe->tuner_priv;
97 u32 rDiv, divider;
98 s32 pVal, pCalc, rDivOpt = 0;
99 u8 i;
100
101 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL1], CTRL1_K, (REFCLOCK_MHz - 16));
102
103 if (frequency <= 1023000) {
104 STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_DIV4SEL, 1);
105 STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_PRESC32_ON, 0);
106 pVal = 40;
107 } else if (frequency <= 1300000) {
108 STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_DIV4SEL, 1);
109 STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_PRESC32_ON, 1);
110 pVal = 40;
111 } else if (frequency <= 2046000) {
112 STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_DIV4SEL, 0);
113 STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_PRESC32_ON, 0);
114 pVal = 20;
115 } else {
116 STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_DIV4SEL, 0);
117 STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_PRESC32_ON, 1);
118 pVal = 20;
119 }
120
121 for (rDiv = 0; rDiv <= 3; rDiv++) {
122 pCalc = (REFCLOCK_kHz / 100) / R_DIV(rDiv);
123
124 if ((abs((s32)(pCalc - pVal))) < (abs((s32)(1000 - pVal))))
125 rDivOpt = rDiv;
126 }
127
128 divider = (frequency * R_DIV(rDivOpt) * pVal) / REFCLOCK_kHz;
129 divider = (divider + 5) / 10;
130
131 STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_R_DIV, rDivOpt);
132 STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG1], TNG1_N_DIV_11_8, MSB(divider));
133 STV6110x_SETFIELD(stv6110x_regs[STV6110x_TNG0], TNG0_N_DIV_7_0, LSB(divider));
134
135 /* VCO Auto calibration */
136 STV6110x_SETFIELD(stv6110x_regs[STV6110x_STAT1], STAT1_CALVCO_STRT, 1);
137
138 stv6110x_write_reg(stv6110x, STV6110x_CTRL1, stv6110x_regs[STV6110x_CTRL1]);
139 stv6110x_write_reg(stv6110x, STV6110x_TNG1, stv6110x_regs[STV6110x_TNG1]);
140 stv6110x_write_reg(stv6110x, STV6110x_TNG0, stv6110x_regs[STV6110x_TNG0]);
141 stv6110x_write_reg(stv6110x, STV6110x_STAT1, stv6110x_regs[STV6110x_STAT1]);
142
143 for (i = 0; i < TRIALS; i++) {
144 stv6110x_read_reg(stv6110x, STV6110x_STAT1, &stv6110x_regs[STV6110x_STAT1]);
145 if (!STV6110x_GETFIELD(STAT1_CALVCO_STRT, stv6110x_regs[STV6110x_STAT1]))
146 break;
147 msleep(1);
148 }
149
150 return 0;
151}
152
153static int stv6110x_get_frequency(struct dvb_frontend *fe, u32 *frequency)
154{
155 struct stv6110x_state *stv6110x = fe->tuner_priv;
156
157 stv6110x_read_reg(stv6110x, STV6110x_TNG1, &stv6110x_regs[STV6110x_TNG1]);
158 stv6110x_read_reg(stv6110x, STV6110x_TNG0, &stv6110x_regs[STV6110x_TNG0]);
159
160 *frequency = (MAKEWORD16(STV6110x_GETFIELD(TNG1_N_DIV_11_8, stv6110x_regs[STV6110x_TNG1]),
161 STV6110x_GETFIELD(TNG0_N_DIV_7_0, stv6110x_regs[STV6110x_TNG0]))) * REFCLOCK_kHz;
162
163 *frequency /= (1 << (STV6110x_GETFIELD(TNG1_R_DIV, stv6110x_regs[STV6110x_TNG1]) +
164 STV6110x_GETFIELD(TNG1_DIV4SEL, stv6110x_regs[STV6110x_TNG1])));
165
166 *frequency >>= 2;
167
168 return 0;
169}
170
171static int stv6110x_set_bandwidth(struct dvb_frontend *fe, u32 bandwidth)
172{
173 struct stv6110x_state *stv6110x = fe->tuner_priv;
174 u32 halfbw;
175 u8 i;
176
177 halfbw = bandwidth >> 1;
178
179 if (halfbw > 36000000)
180 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL3], CTRL3_CF, 31); /* LPF */
181 else if (halfbw < 5000000)
182 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL3], CTRL3_CF, 0); /* LPF */
183 else
184 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL3], CTRL3_CF, ((halfbw / 1000000) - 5)); /* LPF */
185
186
187 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL3], CTRL3_RCCLK_OFF, 0x0); /* cal. clk activated */
188 STV6110x_SETFIELD(stv6110x_regs[STV6110x_STAT1], STAT1_CALRC_STRT, 0x1); /* LPF auto cal */
189
190 stv6110x_write_reg(stv6110x, STV6110x_CTRL3, stv6110x_regs[STV6110x_CTRL3]);
191 stv6110x_write_reg(stv6110x, STV6110x_STAT1, stv6110x_regs[STV6110x_STAT1]);
192
193 for (i = 0; i < TRIALS; i++) {
194 stv6110x_read_reg(stv6110x, STV6110x_STAT1, &stv6110x_regs[STV6110x_STAT1]);
195 if (!STV6110x_GETFIELD(STAT1_CALRC_STRT, stv6110x_regs[STV6110x_STAT1]))
196 break;
197 msleep(1);
198 }
199 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL3], CTRL3_RCCLK_OFF, 0x1); /* cal. done */
200 stv6110x_write_reg(stv6110x, STV6110x_CTRL3, stv6110x_regs[STV6110x_CTRL3]);
201
202 return 0;
203}
204
205static int stv6110x_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
206{
207 struct stv6110x_state *stv6110x = fe->tuner_priv;
208
209 stv6110x_read_reg(stv6110x, STV6110x_CTRL3, &stv6110x_regs[STV6110x_CTRL3]);
210 *bandwidth = (STV6110x_GETFIELD(CTRL3_CF, stv6110x_regs[STV6110x_CTRL3]) + 5) * 2000000;
211
212 return 0;
213}
214
215static int stv6110x_set_refclock(struct dvb_frontend *fe, u32 refclock)
216{
217 struct stv6110x_state *stv6110x = fe->tuner_priv;
218
219 /* setup divider */
220 switch (refclock) {
221 default:
222 case 1:
223 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL2], CTRL2_CO_DIV, 0);
224 break;
225 case 2:
226 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL2], CTRL2_CO_DIV, 1);
227 break;
228 case 4:
229 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL2], CTRL2_CO_DIV, 2);
230 break;
231 case 8:
232 case 0:
233 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL2], CTRL2_CO_DIV, 3);
234 break;
235 }
236 stv6110x_write_reg(stv6110x, STV6110x_CTRL2, stv6110x_regs[STV6110x_CTRL2]);
237
238 return 0;
239}
240
241static int stv6110x_get_bbgain(struct dvb_frontend *fe, u32 *gain)
242{
243 struct stv6110x_state *stv6110x = fe->tuner_priv;
244
245 stv6110x_read_reg(stv6110x, STV6110x_CTRL2, &stv6110x_regs[STV6110x_CTRL2]);
246 *gain = 2 * STV6110x_GETFIELD(CTRL2_BBGAIN, stv6110x_regs[STV6110x_CTRL2]);
247
248 return 0;
249}
250
251static int stv6110x_set_bbgain(struct dvb_frontend *fe, u32 gain)
252{
253 struct stv6110x_state *stv6110x = fe->tuner_priv;
254
255 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL2], CTRL2_BBGAIN, gain / 2);
256 stv6110x_write_reg(stv6110x, STV6110x_CTRL2, stv6110x_regs[STV6110x_CTRL2]);
257
258 return 0;
259}
260
261static int stv6110x_set_mode(struct dvb_frontend *fe, enum tuner_mode mode)
262{
263 struct stv6110x_state *stv6110x = fe->tuner_priv;
264 int ret;
265
266 switch (mode) {
267 case TUNER_SLEEP:
268 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL1], CTRL1_SYN, 0);
269 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL1], CTRL1_RX, 0);
270 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL1], CTRL1_LPT, 0);
271 break;
272
273 case TUNER_WAKE:
274 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL1], CTRL1_SYN, 1);
275 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL1], CTRL1_RX, 1);
276 STV6110x_SETFIELD(stv6110x_regs[STV6110x_CTRL1], CTRL1_LPT, 1);
277 break;
278 }
279
280 ret = stv6110x_write_reg(stv6110x, STV6110x_CTRL1, stv6110x_regs[STV6110x_CTRL1]);
281 if (ret < 0) {
282 dprintk(FE_ERROR, 1, "I/O Error");
283 return -EIO;
284 }
285
286 return 0;
287}
288
289static int stv6110x_sleep(struct dvb_frontend *fe)
290{
291 return stv6110x_set_mode(fe, TUNER_SLEEP);
292}
293
294static int stv6110x_get_status(struct dvb_frontend *fe, u32 *status)
295{
296 struct stv6110x_state *stv6110x = fe->tuner_priv;
297
298 stv6110x_read_reg(stv6110x, STV6110x_STAT1, &stv6110x_regs[STV6110x_STAT1]);
299
300 if (STV6110x_GETFIELD(STAT1_LOCK, stv6110x_regs[STV6110x_STAT1]))
301 *status = TUNER_PHASELOCKED;
302 else
303 *status = 0;
304
305 return 0;
306}
307
308
309static int stv6110x_release(struct dvb_frontend *fe)
310{
311 struct stv6110x_state *stv6110x = fe->tuner_priv;
312
313 fe->tuner_priv = NULL;
314 kfree(stv6110x);
315
316 return 0;
317}
318
319static struct dvb_tuner_ops stv6110x_ops = {
320 .info = {
321 .name = "STV6110(A) Silicon Tuner",
322 .frequency_min = 950000,
323 .frequency_max = 2150000,
324 .frequency_step = 0,
325 },
326
327 .init = stv6110x_init,
328 .sleep = stv6110x_sleep,
329 .release = stv6110x_release
330};
331
332static struct stv6110x_devctl stv6110x_ctl = {
333 .tuner_init = stv6110x_init,
334 .tuner_set_mode = stv6110x_set_mode,
335 .tuner_set_frequency = stv6110x_set_frequency,
336 .tuner_get_frequency = stv6110x_get_frequency,
337 .tuner_set_bandwidth = stv6110x_set_bandwidth,
338 .tuner_get_bandwidth = stv6110x_get_bandwidth,
339 .tuner_set_bbgain = stv6110x_set_bbgain,
340 .tuner_get_bbgain = stv6110x_get_bbgain,
341 .tuner_set_refclk = stv6110x_set_refclock,
342 .tuner_get_status = stv6110x_get_status,
343};
344
345struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
346 const struct stv6110x_config *config,
347 struct i2c_adapter *i2c)
348{
349 struct stv6110x_state *stv6110x;
350
351 stv6110x = kzalloc(sizeof (struct stv6110x_state), GFP_KERNEL);
352 if (stv6110x == NULL)
353 goto error;
354
355 stv6110x->i2c = i2c;
356 stv6110x->config = config;
357 stv6110x->devctl = &stv6110x_ctl;
358
359 fe->tuner_priv = stv6110x;
360 fe->ops.tuner_ops = stv6110x_ops;
361
362 printk("%s: Attaching STV6110x \n", __func__);
363 return stv6110x->devctl;
364
365error:
366 kfree(stv6110x);
367 return NULL;
368}
369EXPORT_SYMBOL(stv6110x_attach);
370
371MODULE_AUTHOR("Manu Abraham");
372MODULE_DESCRIPTION("STV6110x Silicon tuner");
373MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/stv6110x.h b/drivers/media/dvb/frontends/stv6110x.h
new file mode 100644
index 000000000000..a38257080e01
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv6110x.h
@@ -0,0 +1,71 @@
1/*
2 STV6110(A) Silicon tuner driver
3
4 Copyright (C) Manu Abraham <abraham.manu@gmail.com>
5
6 Copyright (C) ST Microelectronics
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21*/
22
23#ifndef __STV6110x_H
24#define __STV6110x_H
25
26struct stv6110x_config {
27 u8 addr;
28 u32 refclk;
29};
30
31enum tuner_mode {
32 TUNER_SLEEP = 1,
33 TUNER_WAKE,
34};
35
36enum tuner_status {
37 TUNER_PHASELOCKED = 1,
38};
39
40struct stv6110x_devctl {
41 int (*tuner_init) (struct dvb_frontend *fe);
42 int (*tuner_set_mode) (struct dvb_frontend *fe, enum tuner_mode mode);
43 int (*tuner_set_frequency) (struct dvb_frontend *fe, u32 frequency);
44 int (*tuner_get_frequency) (struct dvb_frontend *fe, u32 *frequency);
45 int (*tuner_set_bandwidth) (struct dvb_frontend *fe, u32 bandwidth);
46 int (*tuner_get_bandwidth) (struct dvb_frontend *fe, u32 *bandwidth);
47 int (*tuner_set_bbgain) (struct dvb_frontend *fe, u32 gain);
48 int (*tuner_get_bbgain) (struct dvb_frontend *fe, u32 *gain);
49 int (*tuner_set_refclk) (struct dvb_frontend *fe, u32 refclk);
50 int (*tuner_get_status) (struct dvb_frontend *fe, u32 *status);
51};
52
53
54#if defined(CONFIG_DVB_STV6110x) || (defined(CONFIG_DVB_STV6110x_MODULE) && defined(MODULE))
55
56extern struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
57 const struct stv6110x_config *config,
58 struct i2c_adapter *i2c);
59
60#else
61static inline struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
62 const struct stv6110x_config *config,
63 struct i2c_adapter *i2c)
64{
65 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
66 return NULL;
67}
68
69#endif /* CONFIG_DVB_STV6110x */
70
71#endif /* __STV6110x_H */
diff --git a/drivers/media/dvb/frontends/stv6110x_priv.h b/drivers/media/dvb/frontends/stv6110x_priv.h
new file mode 100644
index 000000000000..7260da633d49
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv6110x_priv.h
@@ -0,0 +1,75 @@
1/*
2 STV6110(A) Silicon tuner driver
3
4 Copyright (C) Manu Abraham <abraham.manu@gmail.com>
5
6 Copyright (C) ST Microelectronics
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21*/
22
23#ifndef __STV6110x_PRIV_H
24#define __STV6110x_PRIV_H
25
26#define FE_ERROR 0
27#define FE_NOTICE 1
28#define FE_INFO 2
29#define FE_DEBUG 3
30#define FE_DEBUGREG 4
31
32#define dprintk(__y, __z, format, arg...) do { \
33 if (__z) { \
34 if ((verbose > FE_ERROR) && (verbose > __y)) \
35 printk(KERN_ERR "%s: " format "\n", __func__ , ##arg); \
36 else if ((verbose > FE_NOTICE) && (verbose > __y)) \
37 printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg); \
38 else if ((verbose > FE_INFO) && (verbose > __y)) \
39 printk(KERN_INFO "%s: " format "\n", __func__ , ##arg); \
40 else if ((verbose > FE_DEBUG) && (verbose > __y)) \
41 printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg); \
42 } else { \
43 if (verbose > __y) \
44 printk(format, ##arg); \
45 } \
46} while (0)
47
48
49#define STV6110x_SETFIELD(mask, bitf, val) \
50 (mask = (mask & (~(((1 << STV6110x_WIDTH_##bitf) - 1) << \
51 STV6110x_OFFST_##bitf))) | \
52 (val << STV6110x_OFFST_##bitf))
53
54#define STV6110x_GETFIELD(bitf, val) \
55 ((val >> STV6110x_OFFST_##bitf) & \
56 ((1 << STV6110x_WIDTH_##bitf) - 1))
57
58#define MAKEWORD16(a, b) (((a) << 8) | (b))
59
60#define LSB(x) ((x & 0xff))
61#define MSB(y) ((y >> 8) & 0xff)
62
63#define TRIALS 10
64#define R_DIV(__div) (1 << (__div + 1))
65#define REFCLOCK_kHz (stv6110x->config->refclk / 1000)
66#define REFCLOCK_MHz (stv6110x->config->refclk / 1000000)
67
68struct stv6110x_state {
69 struct i2c_adapter *i2c;
70 const struct stv6110x_config *config;
71
72 struct stv6110x_devctl *devctl;
73};
74
75#endif /* __STV6110x_PRIV_H */
diff --git a/drivers/media/dvb/frontends/stv6110x_reg.h b/drivers/media/dvb/frontends/stv6110x_reg.h
new file mode 100644
index 000000000000..93e5c70e5fd8
--- /dev/null
+++ b/drivers/media/dvb/frontends/stv6110x_reg.h
@@ -0,0 +1,82 @@
1/*
2 STV6110(A) Silicon tuner driver
3
4 Copyright (C) Manu Abraham <abraham.manu@gmail.com>
5
6 Copyright (C) ST Microelectronics
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21*/
22
23#ifndef __STV6110x_REG_H
24#define __STV6110x_REG_H
25
26#define STV6110x_CTRL1 0x00
27#define STV6110x_OFFST_CTRL1_K 3
28#define STV6110x_WIDTH_CTRL1_K 5
29#define STV6110x_OFFST_CTRL1_LPT 2
30#define STV6110x_WIDTH_CTRL1_LPT 1
31#define STV6110x_OFFST_CTRL1_RX 1
32#define STV6110x_WIDTH_CTRL1_RX 1
33#define STV6110x_OFFST_CTRL1_SYN 0
34#define STV6110x_WIDTH_CTRL1_SYN 1
35
36#define STV6110x_CTRL2 0x01
37#define STV6110x_OFFST_CTRL2_CO_DIV 6
38#define STV6110x_WIDTH_CTRL2_CO_DIV 2
39#define STV6110x_OFFST_CTRL2_RSVD 5
40#define STV6110x_WIDTH_CTRL2_RSVD 1
41#define STV6110x_OFFST_CTRL2_REFOUT_SEL 4
42#define STV6110x_WIDTH_CTRL2_REFOUT_SEL 1
43#define STV6110x_OFFST_CTRL2_BBGAIN 0
44#define STV6110x_WIDTH_CTRL2_BBGAIN 4
45
46#define STV6110x_TNG0 0x02
47#define STV6110x_OFFST_TNG0_N_DIV_7_0 0
48#define STV6110x_WIDTH_TNG0_N_DIV_7_0 8
49
50#define STV6110x_TNG1 0x03
51#define STV6110x_OFFST_TNG1_R_DIV 6
52#define STV6110x_WIDTH_TNG1_R_DIV 2
53#define STV6110x_OFFST_TNG1_PRESC32_ON 5
54#define STV6110x_WIDTH_TNG1_PRESC32_ON 1
55#define STV6110x_OFFST_TNG1_DIV4SEL 4
56#define STV6110x_WIDTH_TNG1_DIV4SEL 1
57#define STV6110x_OFFST_TNG1_N_DIV_11_8 0
58#define STV6110x_WIDTH_TNG1_N_DIV_11_8 4
59
60
61#define STV6110x_CTRL3 0x04
62#define STV6110x_OFFST_CTRL3_DCLOOP_OFF 7
63#define STV6110x_WIDTH_CTRL3_DCLOOP_OFF 1
64#define STV6110x_OFFST_CTRL3_RCCLK_OFF 6
65#define STV6110x_WIDTH_CTRL3_RCCLK_OFF 1
66#define STV6110x_OFFST_CTRL3_ICP 5
67#define STV6110x_WIDTH_CTRL3_ICP 1
68#define STV6110x_OFFST_CTRL3_CF 0
69#define STV6110x_WIDTH_CTRL3_CF 5
70
71#define STV6110x_STAT1 0x05
72#define STV6110x_OFFST_STAT1_CALVCO_STRT 2
73#define STV6110x_WIDTH_STAT1_CALVCO_STRT 1
74#define STV6110x_OFFST_STAT1_CALRC_STRT 1
75#define STV6110x_WIDTH_STAT1_CALRC_STRT 1
76#define STV6110x_OFFST_STAT1_LOCK 0
77#define STV6110x_WIDTH_STAT1_LOCK 1
78
79#define STV6110x_STAT2 0x06
80#define STV6110x_STAT3 0x07
81
82#endif /* __STV6110x_REG_H */
diff --git a/drivers/media/dvb/frontends/tda10048.c b/drivers/media/dvb/frontends/tda10048.c
index 2a8bbcd44cd0..4302c563a6b8 100644
--- a/drivers/media/dvb/frontends/tda10048.c
+++ b/drivers/media/dvb/frontends/tda10048.c
@@ -1,7 +1,7 @@
1/* 1/*
2 NXP TDA10048HN DVB OFDM demodulator driver 2 NXP TDA10048HN DVB OFDM demodulator driver
3 3
4 Copyright (C) 2008 Steven Toth <stoth@linuxtv.org> 4 Copyright (C) 2009 Steven Toth <stoth@kernellabs.com>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
@@ -25,6 +25,7 @@
25#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <asm/div64.h>
28#include "dvb_frontend.h" 29#include "dvb_frontend.h"
29#include "dvb_math.h" 30#include "dvb_math.h"
30#include "tda10048.h" 31#include "tda10048.h"
@@ -138,11 +139,20 @@ struct tda10048_state {
138 139
139 struct i2c_adapter *i2c; 140 struct i2c_adapter *i2c;
140 141
141 /* configuration settings */ 142 /* We'll cache and update the attach config settings */
142 const struct tda10048_config *config; 143 struct tda10048_config config;
143 struct dvb_frontend frontend; 144 struct dvb_frontend frontend;
144 145
145 int fwloaded; 146 int fwloaded;
147
148 u32 freq_if_hz;
149 u32 xtal_hz;
150 u32 pll_mfactor;
151 u32 pll_nfactor;
152 u32 pll_pfactor;
153 u32 sample_freq;
154
155 enum fe_bandwidth bandwidth;
146}; 156};
147 157
148static struct init_tab { 158static struct init_tab {
@@ -192,12 +202,26 @@ static struct init_tab {
192 { TDA10048_CONF_C4_2, 0x04 }, 202 { TDA10048_CONF_C4_2, 0x04 },
193}; 203};
194 204
205static struct pll_tab {
206 u32 clk_freq_khz;
207 u32 if_freq_khz;
208 u8 m, n, p;
209} pll_tab[] = {
210 { TDA10048_CLK_4000, TDA10048_IF_36130, 10, 0, 0 },
211 { TDA10048_CLK_16000, TDA10048_IF_3300, 10, 3, 0 },
212 { TDA10048_CLK_16000, TDA10048_IF_3500, 10, 3, 0 },
213 { TDA10048_CLK_16000, TDA10048_IF_4000, 10, 3, 0 },
214 { TDA10048_CLK_16000, TDA10048_IF_4300, 10, 3, 0 },
215 { TDA10048_CLK_16000, TDA10048_IF_36130, 10, 3, 0 },
216};
217
195static int tda10048_writereg(struct tda10048_state *state, u8 reg, u8 data) 218static int tda10048_writereg(struct tda10048_state *state, u8 reg, u8 data)
196{ 219{
220 struct tda10048_config *config = &state->config;
197 int ret; 221 int ret;
198 u8 buf[] = { reg, data }; 222 u8 buf[] = { reg, data };
199 struct i2c_msg msg = { 223 struct i2c_msg msg = {
200 .addr = state->config->demod_address, 224 .addr = config->demod_address,
201 .flags = 0, .buf = buf, .len = 2 }; 225 .flags = 0, .buf = buf, .len = 2 };
202 226
203 dprintk(2, "%s(reg = 0x%02x, data = 0x%02x)\n", __func__, reg, data); 227 dprintk(2, "%s(reg = 0x%02x, data = 0x%02x)\n", __func__, reg, data);
@@ -212,13 +236,14 @@ static int tda10048_writereg(struct tda10048_state *state, u8 reg, u8 data)
212 236
213static u8 tda10048_readreg(struct tda10048_state *state, u8 reg) 237static u8 tda10048_readreg(struct tda10048_state *state, u8 reg)
214{ 238{
239 struct tda10048_config *config = &state->config;
215 int ret; 240 int ret;
216 u8 b0[] = { reg }; 241 u8 b0[] = { reg };
217 u8 b1[] = { 0 }; 242 u8 b1[] = { 0 };
218 struct i2c_msg msg[] = { 243 struct i2c_msg msg[] = {
219 { .addr = state->config->demod_address, 244 { .addr = config->demod_address,
220 .flags = 0, .buf = b0, .len = 1 }, 245 .flags = 0, .buf = b0, .len = 1 },
221 { .addr = state->config->demod_address, 246 { .addr = config->demod_address,
222 .flags = I2C_M_RD, .buf = b1, .len = 1 } }; 247 .flags = I2C_M_RD, .buf = b1, .len = 1 } };
223 248
224 dprintk(2, "%s(reg = 0x%02x)\n", __func__, reg); 249 dprintk(2, "%s(reg = 0x%02x)\n", __func__, reg);
@@ -235,6 +260,7 @@ static u8 tda10048_readreg(struct tda10048_state *state, u8 reg)
235static int tda10048_writeregbulk(struct tda10048_state *state, u8 reg, 260static int tda10048_writeregbulk(struct tda10048_state *state, u8 reg,
236 const u8 *data, u16 len) 261 const u8 *data, u16 len)
237{ 262{
263 struct tda10048_config *config = &state->config;
238 int ret = -EREMOTEIO; 264 int ret = -EREMOTEIO;
239 struct i2c_msg msg; 265 struct i2c_msg msg;
240 u8 *buf; 266 u8 *buf;
@@ -250,7 +276,7 @@ static int tda10048_writeregbulk(struct tda10048_state *state, u8 reg,
250 *buf = reg; 276 *buf = reg;
251 memcpy(buf + 1, data, len); 277 memcpy(buf + 1, data, len);
252 278
253 msg.addr = state->config->demod_address; 279 msg.addr = config->demod_address;
254 msg.flags = 0; 280 msg.flags = 0;
255 msg.buf = buf; 281 msg.buf = buf;
256 msg.len = len + 1; 282 msg.len = len + 1;
@@ -271,14 +297,206 @@ error:
271 return ret; 297 return ret;
272} 298}
273 299
300static int tda10048_set_phy2(struct dvb_frontend *fe, u32 sample_freq_hz,
301 u32 if_hz)
302{
303 struct tda10048_state *state = fe->demodulator_priv;
304 u64 t;
305
306 dprintk(1, "%s()\n", __func__);
307
308 if (sample_freq_hz == 0)
309 return -EINVAL;
310
311 if (if_hz < (sample_freq_hz / 2)) {
312 /* PHY2 = (if2/fs) * 2^15 */
313 t = if_hz;
314 t *= 10;
315 t *= 32768;
316 do_div(t, sample_freq_hz);
317 t += 5;
318 do_div(t, 10);
319 } else {
320 /* PHY2 = ((IF1-fs)/fs) * 2^15 */
321 t = sample_freq_hz - if_hz;
322 t *= 10;
323 t *= 32768;
324 do_div(t, sample_freq_hz);
325 t += 5;
326 do_div(t, 10);
327 t = ~t + 1;
328 }
329
330 tda10048_writereg(state, TDA10048_FREQ_PHY2_LSB, (u8)t);
331 tda10048_writereg(state, TDA10048_FREQ_PHY2_MSB, (u8)(t >> 8));
332
333 return 0;
334}
335
336static int tda10048_set_wref(struct dvb_frontend *fe, u32 sample_freq_hz,
337 u32 bw)
338{
339 struct tda10048_state *state = fe->demodulator_priv;
340 u64 t, z;
341 u32 b = 8000000;
342
343 dprintk(1, "%s()\n", __func__);
344
345 if (sample_freq_hz == 0)
346 return -EINVAL;
347
348 if (bw == BANDWIDTH_6_MHZ)
349 b = 6000000;
350 else
351 if (bw == BANDWIDTH_7_MHZ)
352 b = 7000000;
353
354 /* WREF = (B / (7 * fs)) * 2^31 */
355 t = b * 10;
356 /* avoid warning: this decimal constant is unsigned only in ISO C90 */
357 /* t *= 2147483648 on 32bit platforms */
358 t *= (2048 * 1024);
359 t *= 1024;
360 z = 7 * sample_freq_hz;
361 do_div(t, z);
362 t += 5;
363 do_div(t, 10);
364
365 tda10048_writereg(state, TDA10048_TIME_WREF_LSB, (u8)t);
366 tda10048_writereg(state, TDA10048_TIME_WREF_MID1, (u8)(t >> 8));
367 tda10048_writereg(state, TDA10048_TIME_WREF_MID2, (u8)(t >> 16));
368 tda10048_writereg(state, TDA10048_TIME_WREF_MSB, (u8)(t >> 24));
369
370 return 0;
371}
372
373static int tda10048_set_invwref(struct dvb_frontend *fe, u32 sample_freq_hz,
374 u32 bw)
375{
376 struct tda10048_state *state = fe->demodulator_priv;
377 u64 t;
378 u32 b = 8000000;
379
380 dprintk(1, "%s()\n", __func__);
381
382 if (sample_freq_hz == 0)
383 return -EINVAL;
384
385 if (bw == BANDWIDTH_6_MHZ)
386 b = 6000000;
387 else
388 if (bw == BANDWIDTH_7_MHZ)
389 b = 7000000;
390
391 /* INVWREF = ((7 * fs) / B) * 2^5 */
392 t = sample_freq_hz;
393 t *= 7;
394 t *= 32;
395 t *= 10;
396 do_div(t, b);
397 t += 5;
398 do_div(t, 10);
399
400 tda10048_writereg(state, TDA10048_TIME_INVWREF_LSB, (u8)t);
401 tda10048_writereg(state, TDA10048_TIME_INVWREF_MSB, (u8)(t >> 8));
402
403 return 0;
404}
405
406static int tda10048_set_bandwidth(struct dvb_frontend *fe,
407 enum fe_bandwidth bw)
408{
409 struct tda10048_state *state = fe->demodulator_priv;
410 dprintk(1, "%s(bw=%d)\n", __func__, bw);
411
412 /* Bandwidth setting may need to be adjusted */
413 switch (bw) {
414 case BANDWIDTH_6_MHZ:
415 case BANDWIDTH_7_MHZ:
416 case BANDWIDTH_8_MHZ:
417 tda10048_set_wref(fe, state->sample_freq, bw);
418 tda10048_set_invwref(fe, state->sample_freq, bw);
419 break;
420 default:
421 printk(KERN_ERR "%s() invalid bandwidth\n", __func__);
422 return -EINVAL;
423 }
424
425 state->bandwidth = bw;
426
427 return 0;
428}
429
430static int tda10048_set_if(struct dvb_frontend *fe, enum fe_bandwidth bw)
431{
432 struct tda10048_state *state = fe->demodulator_priv;
433 struct tda10048_config *config = &state->config;
434 int i;
435 u32 if_freq_khz;
436
437 dprintk(1, "%s(bw = %d)\n", __func__, bw);
438
439 /* based on target bandwidth and clk we calculate pll factors */
440 switch (bw) {
441 case BANDWIDTH_6_MHZ:
442 if_freq_khz = config->dtv6_if_freq_khz;
443 break;
444 case BANDWIDTH_7_MHZ:
445 if_freq_khz = config->dtv7_if_freq_khz;
446 break;
447 case BANDWIDTH_8_MHZ:
448 if_freq_khz = config->dtv8_if_freq_khz;
449 break;
450 default:
451 printk(KERN_ERR "%s() no default\n", __func__);
452 return -EINVAL;
453 }
454
455 for (i = 0; i < ARRAY_SIZE(pll_tab); i++) {
456 if ((pll_tab[i].clk_freq_khz == config->clk_freq_khz) &&
457 (pll_tab[i].if_freq_khz == if_freq_khz)) {
458
459 state->freq_if_hz = pll_tab[i].if_freq_khz * 1000;
460 state->xtal_hz = pll_tab[i].clk_freq_khz * 1000;
461 state->pll_mfactor = pll_tab[i].m;
462 state->pll_nfactor = pll_tab[i].n;
463 state->pll_pfactor = pll_tab[i].p;
464 break;
465 }
466 }
467 if (i == ARRAY_SIZE(pll_tab)) {
468 printk(KERN_ERR "%s() Incorrect attach settings\n",
469 __func__);
470 return -EINVAL;
471 }
472
473 dprintk(1, "- freq_if_hz = %d\n", state->freq_if_hz);
474 dprintk(1, "- xtal_hz = %d\n", state->xtal_hz);
475 dprintk(1, "- pll_mfactor = %d\n", state->pll_mfactor);
476 dprintk(1, "- pll_nfactor = %d\n", state->pll_nfactor);
477 dprintk(1, "- pll_pfactor = %d\n", state->pll_pfactor);
478
479 /* Calculate the sample frequency */
480 state->sample_freq = state->xtal_hz * (state->pll_mfactor + 45);
481 state->sample_freq /= (state->pll_nfactor + 1);
482 state->sample_freq /= (state->pll_pfactor + 4);
483 dprintk(1, "- sample_freq = %d\n", state->sample_freq);
484
485 /* Update the I/F */
486 tda10048_set_phy2(fe, state->sample_freq, state->freq_if_hz);
487
488 return 0;
489}
490
274static int tda10048_firmware_upload(struct dvb_frontend *fe) 491static int tda10048_firmware_upload(struct dvb_frontend *fe)
275{ 492{
276 struct tda10048_state *state = fe->demodulator_priv; 493 struct tda10048_state *state = fe->demodulator_priv;
494 struct tda10048_config *config = &state->config;
277 const struct firmware *fw; 495 const struct firmware *fw;
278 int ret; 496 int ret;
279 int pos = 0; 497 int pos = 0;
280 int cnt; 498 int cnt;
281 u8 wlen = state->config->fwbulkwritelen; 499 u8 wlen = config->fwbulkwritelen;
282 500
283 if ((wlen != TDA10048_BULKWRITE_200) && (wlen != TDA10048_BULKWRITE_50)) 501 if ((wlen != TDA10048_BULKWRITE_200) && (wlen != TDA10048_BULKWRITE_50))
284 wlen = TDA10048_BULKWRITE_200; 502 wlen = TDA10048_BULKWRITE_200;
@@ -289,7 +507,7 @@ static int tda10048_firmware_upload(struct dvb_frontend *fe)
289 TDA10048_DEFAULT_FIRMWARE); 507 TDA10048_DEFAULT_FIRMWARE);
290 508
291 ret = request_firmware(&fw, TDA10048_DEFAULT_FIRMWARE, 509 ret = request_firmware(&fw, TDA10048_DEFAULT_FIRMWARE,
292 &state->i2c->dev); 510 state->i2c->dev.parent);
293 if (ret) { 511 if (ret) {
294 printk(KERN_ERR "%s: Upload failed. (file not found?)\n", 512 printk(KERN_ERR "%s: Upload failed. (file not found?)\n",
295 __func__); 513 __func__);
@@ -484,8 +702,12 @@ static int tda10048_get_tps(struct tda10048_state *state,
484static int tda10048_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) 702static int tda10048_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
485{ 703{
486 struct tda10048_state *state = fe->demodulator_priv; 704 struct tda10048_state *state = fe->demodulator_priv;
705 struct tda10048_config *config = &state->config;
487 dprintk(1, "%s(%d)\n", __func__, enable); 706 dprintk(1, "%s(%d)\n", __func__, enable);
488 707
708 if (config->disable_gate_access)
709 return 0;
710
489 if (enable) 711 if (enable)
490 return tda10048_writereg(state, TDA10048_CONF_C4_1, 712 return tda10048_writereg(state, TDA10048_CONF_C4_1,
491 tda10048_readreg(state, TDA10048_CONF_C4_1) | 0x02); 713 tda10048_readreg(state, TDA10048_CONF_C4_1) | 0x02);
@@ -523,6 +745,12 @@ static int tda10048_set_frontend(struct dvb_frontend *fe,
523 745
524 dprintk(1, "%s(frequency=%d)\n", __func__, p->frequency); 746 dprintk(1, "%s(frequency=%d)\n", __func__, p->frequency);
525 747
748 /* Update the I/F pll's if the bandwidth changes */
749 if (p->u.ofdm.bandwidth != state->bandwidth) {
750 tda10048_set_if(fe, p->u.ofdm.bandwidth);
751 tda10048_set_bandwidth(fe, p->u.ofdm.bandwidth);
752 }
753
526 if (fe->ops.tuner_ops.set_params) { 754 if (fe->ops.tuner_ops.set_params) {
527 755
528 if (fe->ops.i2c_gate_ctrl) 756 if (fe->ops.i2c_gate_ctrl)
@@ -544,6 +772,7 @@ static int tda10048_set_frontend(struct dvb_frontend *fe,
544static int tda10048_init(struct dvb_frontend *fe) 772static int tda10048_init(struct dvb_frontend *fe)
545{ 773{
546 struct tda10048_state *state = fe->demodulator_priv; 774 struct tda10048_state *state = fe->demodulator_priv;
775 struct tda10048_config *config = &state->config;
547 int ret = 0, i; 776 int ret = 0, i;
548 777
549 dprintk(1, "%s()\n", __func__); 778 dprintk(1, "%s()\n", __func__);
@@ -556,10 +785,14 @@ static int tda10048_init(struct dvb_frontend *fe)
556 ret = tda10048_firmware_upload(fe); 785 ret = tda10048_firmware_upload(fe);
557 786
558 /* Set either serial or parallel */ 787 /* Set either serial or parallel */
559 tda10048_output_mode(fe, state->config->output_mode); 788 tda10048_output_mode(fe, config->output_mode);
789
790 /* Set inversion */
791 tda10048_set_inversion(fe, config->inversion);
560 792
561 /* set inversion */ 793 /* Establish default RF values */
562 tda10048_set_inversion(fe, state->config->inversion); 794 tda10048_set_if(fe, BANDWIDTH_8_MHZ);
795 tda10048_set_bandwidth(fe, BANDWIDTH_8_MHZ);
563 796
564 /* Ensure we leave the gate closed */ 797 /* Ensure we leave the gate closed */
565 tda10048_i2c_gate_ctrl(fe, 0); 798 tda10048_i2c_gate_ctrl(fe, 0);
@@ -812,6 +1045,45 @@ static void tda10048_release(struct dvb_frontend *fe)
812 kfree(state); 1045 kfree(state);
813} 1046}
814 1047
1048static void tda10048_establish_defaults(struct dvb_frontend *fe)
1049{
1050 struct tda10048_state *state = fe->demodulator_priv;
1051 struct tda10048_config *config = &state->config;
1052
1053 /* Validate/default the config */
1054 if (config->dtv6_if_freq_khz == 0) {
1055 config->dtv6_if_freq_khz = TDA10048_IF_4300;
1056 printk(KERN_WARNING "%s() tda10048_config.dtv6_if_freq_khz "
1057 "is not set (defaulting to %d)\n",
1058 __func__,
1059 config->dtv6_if_freq_khz);
1060 }
1061
1062 if (config->dtv7_if_freq_khz == 0) {
1063 config->dtv7_if_freq_khz = TDA10048_IF_4300;
1064 printk(KERN_WARNING "%s() tda10048_config.dtv7_if_freq_khz "
1065 "is not set (defaulting to %d)\n",
1066 __func__,
1067 config->dtv7_if_freq_khz);
1068 }
1069
1070 if (config->dtv8_if_freq_khz == 0) {
1071 config->dtv8_if_freq_khz = TDA10048_IF_4300;
1072 printk(KERN_WARNING "%s() tda10048_config.dtv8_if_freq_khz "
1073 "is not set (defaulting to %d)\n",
1074 __func__,
1075 config->dtv8_if_freq_khz);
1076 }
1077
1078 if (config->clk_freq_khz == 0) {
1079 config->clk_freq_khz = TDA10048_CLK_16000;
1080 printk(KERN_WARNING "%s() tda10048_config.clk_freq_khz "
1081 "is not set (defaulting to %d)\n",
1082 __func__,
1083 config->clk_freq_khz);
1084 }
1085}
1086
815static struct dvb_frontend_ops tda10048_ops; 1087static struct dvb_frontend_ops tda10048_ops;
816 1088
817struct dvb_frontend *tda10048_attach(const struct tda10048_config *config, 1089struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
@@ -826,10 +1098,11 @@ struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
826 if (state == NULL) 1098 if (state == NULL)
827 goto error; 1099 goto error;
828 1100
829 /* setup the state */ 1101 /* setup the state and clone the config */
830 state->config = config; 1102 memcpy(&state->config, config, sizeof(*config));
831 state->i2c = i2c; 1103 state->i2c = i2c;
832 state->fwloaded = 0; 1104 state->fwloaded = 0;
1105 state->bandwidth = BANDWIDTH_8_MHZ;
833 1106
834 /* check if the demod is present */ 1107 /* check if the demod is present */
835 if (tda10048_readreg(state, TDA10048_IDENTITY) != 0x048) 1108 if (tda10048_readreg(state, TDA10048_IDENTITY) != 0x048)
@@ -840,6 +1113,17 @@ struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
840 sizeof(struct dvb_frontend_ops)); 1113 sizeof(struct dvb_frontend_ops));
841 state->frontend.demodulator_priv = state; 1114 state->frontend.demodulator_priv = state;
842 1115
1116 /* Establish any defaults the the user didn't pass */
1117 tda10048_establish_defaults(&state->frontend);
1118
1119 /* Set the xtal and freq defaults */
1120 if (tda10048_set_if(&state->frontend, BANDWIDTH_8_MHZ) != 0)
1121 goto error;
1122
1123 /* Default bandwidth */
1124 if (tda10048_set_bandwidth(&state->frontend, BANDWIDTH_8_MHZ) != 0)
1125 goto error;
1126
843 /* Leave the gate closed */ 1127 /* Leave the gate closed */
844 tda10048_i2c_gate_ctrl(&state->frontend, 0); 1128 tda10048_i2c_gate_ctrl(&state->frontend, 0);
845 1129
diff --git a/drivers/media/dvb/frontends/tda10048.h b/drivers/media/dvb/frontends/tda10048.h
index 0457b24601fa..8828ceaf74bb 100644
--- a/drivers/media/dvb/frontends/tda10048.h
+++ b/drivers/media/dvb/frontends/tda10048.h
@@ -1,7 +1,7 @@
1/* 1/*
2 NXP TDA10048HN DVB OFDM demodulator driver 2 NXP TDA10048HN DVB OFDM demodulator driver
3 3
4 Copyright (C) 2008 Steven Toth <stoth@linuxtv.org> 4 Copyright (C) 2009 Steven Toth <stoth@kernellabs.com>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by 7 it under the terms of the GNU General Public License as published by
@@ -43,6 +43,25 @@ struct tda10048_config {
43#define TDA10048_INVERSION_OFF 0 43#define TDA10048_INVERSION_OFF 0
44#define TDA10048_INVERSION_ON 1 44#define TDA10048_INVERSION_ON 1
45 u8 inversion; 45 u8 inversion;
46
47#define TDA10048_IF_3300 3300
48#define TDA10048_IF_3500 3500
49#define TDA10048_IF_3800 3800
50#define TDA10048_IF_4000 4000
51#define TDA10048_IF_4300 4300
52#define TDA10048_IF_4500 4500
53#define TDA10048_IF_4750 4750
54#define TDA10048_IF_36130 36130
55 u16 dtv6_if_freq_khz;
56 u16 dtv7_if_freq_khz;
57 u16 dtv8_if_freq_khz;
58
59#define TDA10048_CLK_4000 4000
60#define TDA10048_CLK_16000 16000
61 u16 clk_freq_khz;
62
63 /* Disable I2C gate access */
64 u8 disable_gate_access;
46}; 65};
47 66
48#if defined(CONFIG_DVB_TDA10048) || \ 67#if defined(CONFIG_DVB_TDA10048) || \
diff --git a/drivers/media/dvb/siano/Makefile b/drivers/media/dvb/siano/Makefile
index bcf93f4828b2..c6644d909433 100644
--- a/drivers/media/dvb/siano/Makefile
+++ b/drivers/media/dvb/siano/Makefile
@@ -1,4 +1,4 @@
1sms1xxx-objs := smscoreapi.o sms-cards.o 1sms1xxx-objs := smscoreapi.o sms-cards.o smsendian.o smsir.o
2 2
3obj-$(CONFIG_DVB_SIANO_SMS1XXX) += sms1xxx.o 3obj-$(CONFIG_DVB_SIANO_SMS1XXX) += sms1xxx.o
4obj-$(CONFIG_DVB_SIANO_SMS1XXX) += smsusb.o 4obj-$(CONFIG_DVB_SIANO_SMS1XXX) += smsusb.o
diff --git a/drivers/media/dvb/siano/sms-cards.c b/drivers/media/dvb/siano/sms-cards.c
index 63e4d0ec6583..d8b15d583bde 100644
--- a/drivers/media/dvb/siano/sms-cards.c
+++ b/drivers/media/dvb/siano/sms-cards.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include "sms-cards.h" 20#include "sms-cards.h"
21#include "smsir.h"
21 22
22static int sms_dbg; 23static int sms_dbg;
23module_param_named(cards_dbg, sms_dbg, int, 0644); 24module_param_named(cards_dbg, sms_dbg, int, 0644);
@@ -30,17 +31,14 @@ static struct sms_board sms_boards[] = {
30 [SMS1XXX_BOARD_SIANO_STELLAR] = { 31 [SMS1XXX_BOARD_SIANO_STELLAR] = {
31 .name = "Siano Stellar Digital Receiver", 32 .name = "Siano Stellar Digital Receiver",
32 .type = SMS_STELLAR, 33 .type = SMS_STELLAR,
33 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-stellar-dvbt-01.fw",
34 }, 34 },
35 [SMS1XXX_BOARD_SIANO_NOVA_A] = { 35 [SMS1XXX_BOARD_SIANO_NOVA_A] = {
36 .name = "Siano Nova A Digital Receiver", 36 .name = "Siano Nova A Digital Receiver",
37 .type = SMS_NOVA_A0, 37 .type = SMS_NOVA_A0,
38 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-nova-a-dvbt-01.fw",
39 }, 38 },
40 [SMS1XXX_BOARD_SIANO_NOVA_B] = { 39 [SMS1XXX_BOARD_SIANO_NOVA_B] = {
41 .name = "Siano Nova B Digital Receiver", 40 .name = "Siano Nova B Digital Receiver",
42 .type = SMS_NOVA_B0, 41 .type = SMS_NOVA_B0,
43 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-nova-b-dvbt-01.fw",
44 }, 42 },
45 [SMS1XXX_BOARD_SIANO_VEGA] = { 43 [SMS1XXX_BOARD_SIANO_VEGA] = {
46 .name = "Siano Vega Digital Receiver", 44 .name = "Siano Vega Digital Receiver",
@@ -65,6 +63,9 @@ static struct sms_board sms_boards[] = {
65 .name = "Hauppauge WinTV MiniStick", 63 .name = "Hauppauge WinTV MiniStick",
66 .type = SMS_NOVA_B0, 64 .type = SMS_NOVA_B0,
67 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw", 65 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw",
66 .board_cfg.leds_power = 26,
67 .board_cfg.led0 = 27,
68 .board_cfg.led1 = 28,
68 .led_power = 26, 69 .led_power = 26,
69 .led_lo = 27, 70 .led_lo = 27,
70 .led_hi = 28, 71 .led_hi = 28,
@@ -74,7 +75,9 @@ static struct sms_board sms_boards[] = {
74 .type = SMS_NOVA_B0, 75 .type = SMS_NOVA_B0,
75 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw", 76 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw",
76 .lna_ctrl = 29, 77 .lna_ctrl = 29,
78 .board_cfg.foreign_lna0_ctrl = 29,
77 .rf_switch = 17, 79 .rf_switch = 17,
80 .board_cfg.rf_switch_uhf = 17,
78 }, 81 },
79 [SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2] = { 82 [SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2] = {
80 .name = "Hauppauge WinTV MiniCard", 83 .name = "Hauppauge WinTV MiniCard",
@@ -82,6 +85,16 @@ static struct sms_board sms_boards[] = {
82 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw", 85 .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw",
83 .lna_ctrl = -1, 86 .lna_ctrl = -1,
84 }, 87 },
88 [SMS1XXX_BOARD_SIANO_NICE] = {
89 /* 11 */
90 .name = "Siano Nice Digital Receiver",
91 .type = SMS_NOVA_B0,
92 },
93 [SMS1XXX_BOARD_SIANO_VENICE] = {
94 /* 12 */
95 .name = "Siano Venice Digital Receiver",
96 .type = SMS_VEGA,
97 },
85}; 98};
86 99
87struct sms_board *sms_get_board(int id) 100struct sms_board *sms_get_board(int id)
@@ -91,12 +104,179 @@ struct sms_board *sms_get_board(int id)
91 return &sms_boards[id]; 104 return &sms_boards[id];
92} 105}
93EXPORT_SYMBOL_GPL(sms_get_board); 106EXPORT_SYMBOL_GPL(sms_get_board);
107static inline void sms_gpio_assign_11xx_default_led_config(
108 struct smscore_gpio_config *pGpioConfig) {
109 pGpioConfig->Direction = SMS_GPIO_DIRECTION_OUTPUT;
110 pGpioConfig->InputCharacteristics =
111 SMS_GPIO_INPUT_CHARACTERISTICS_NORMAL;
112 pGpioConfig->OutputDriving = SMS_GPIO_OUTPUT_DRIVING_4mA;
113 pGpioConfig->OutputSlewRate = SMS_GPIO_OUTPUT_SLEW_RATE_0_45_V_NS;
114 pGpioConfig->PullUpDown = SMS_GPIO_PULL_UP_DOWN_NONE;
115}
116
117int sms_board_event(struct smscore_device_t *coredev,
118 enum SMS_BOARD_EVENTS gevent) {
119 int board_id = smscore_get_board_id(coredev);
120 struct sms_board *board = sms_get_board(board_id);
121 struct smscore_gpio_config MyGpioConfig;
122
123 sms_gpio_assign_11xx_default_led_config(&MyGpioConfig);
124
125 switch (gevent) {
126 case BOARD_EVENT_POWER_INIT: /* including hotplug */
127 switch (board_id) {
128 case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
129 /* set I/O and turn off all LEDs */
130 smscore_gpio_configure(coredev,
131 board->board_cfg.leds_power,
132 &MyGpioConfig);
133 smscore_gpio_set_level(coredev,
134 board->board_cfg.leds_power, 0);
135 smscore_gpio_configure(coredev, board->board_cfg.led0,
136 &MyGpioConfig);
137 smscore_gpio_set_level(coredev,
138 board->board_cfg.led0, 0);
139 smscore_gpio_configure(coredev, board->board_cfg.led1,
140 &MyGpioConfig);
141 smscore_gpio_set_level(coredev,
142 board->board_cfg.led1, 0);
143 break;
144 case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2:
145 case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD:
146 /* set I/O and turn off LNA */
147 smscore_gpio_configure(coredev,
148 board->board_cfg.foreign_lna0_ctrl,
149 &MyGpioConfig);
150 smscore_gpio_set_level(coredev,
151 board->board_cfg.foreign_lna0_ctrl,
152 0);
153 break;
154 }
155 break; /* BOARD_EVENT_BIND */
156
157 case BOARD_EVENT_POWER_SUSPEND:
158 switch (board_id) {
159 case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
160 smscore_gpio_set_level(coredev,
161 board->board_cfg.leds_power, 0);
162 smscore_gpio_set_level(coredev,
163 board->board_cfg.led0, 0);
164 smscore_gpio_set_level(coredev,
165 board->board_cfg.led1, 0);
166 break;
167 case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2:
168 case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD:
169 smscore_gpio_set_level(coredev,
170 board->board_cfg.foreign_lna0_ctrl,
171 0);
172 break;
173 }
174 break; /* BOARD_EVENT_POWER_SUSPEND */
175
176 case BOARD_EVENT_POWER_RESUME:
177 switch (board_id) {
178 case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
179 smscore_gpio_set_level(coredev,
180 board->board_cfg.leds_power, 1);
181 smscore_gpio_set_level(coredev,
182 board->board_cfg.led0, 1);
183 smscore_gpio_set_level(coredev,
184 board->board_cfg.led1, 0);
185 break;
186 case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2:
187 case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD:
188 smscore_gpio_set_level(coredev,
189 board->board_cfg.foreign_lna0_ctrl,
190 1);
191 break;
192 }
193 break; /* BOARD_EVENT_POWER_RESUME */
194
195 case BOARD_EVENT_BIND:
196 switch (board_id) {
197 case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
198 smscore_gpio_set_level(coredev,
199 board->board_cfg.leds_power, 1);
200 smscore_gpio_set_level(coredev,
201 board->board_cfg.led0, 1);
202 smscore_gpio_set_level(coredev,
203 board->board_cfg.led1, 0);
204 break;
205 case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2:
206 case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD:
207 smscore_gpio_set_level(coredev,
208 board->board_cfg.foreign_lna0_ctrl,
209 1);
210 break;
211 }
212 break; /* BOARD_EVENT_BIND */
213
214 case BOARD_EVENT_SCAN_PROG:
215 break; /* BOARD_EVENT_SCAN_PROG */
216 case BOARD_EVENT_SCAN_COMP:
217 break; /* BOARD_EVENT_SCAN_COMP */
218 case BOARD_EVENT_EMERGENCY_WARNING_SIGNAL:
219 break; /* BOARD_EVENT_EMERGENCY_WARNING_SIGNAL */
220 case BOARD_EVENT_FE_LOCK:
221 switch (board_id) {
222 case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
223 smscore_gpio_set_level(coredev,
224 board->board_cfg.led1, 1);
225 break;
226 }
227 break; /* BOARD_EVENT_FE_LOCK */
228 case BOARD_EVENT_FE_UNLOCK:
229 switch (board_id) {
230 case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
231 smscore_gpio_set_level(coredev,
232 board->board_cfg.led1, 0);
233 break;
234 }
235 break; /* BOARD_EVENT_FE_UNLOCK */
236 case BOARD_EVENT_DEMOD_LOCK:
237 break; /* BOARD_EVENT_DEMOD_LOCK */
238 case BOARD_EVENT_DEMOD_UNLOCK:
239 break; /* BOARD_EVENT_DEMOD_UNLOCK */
240 case BOARD_EVENT_RECEPTION_MAX_4:
241 break; /* BOARD_EVENT_RECEPTION_MAX_4 */
242 case BOARD_EVENT_RECEPTION_3:
243 break; /* BOARD_EVENT_RECEPTION_3 */
244 case BOARD_EVENT_RECEPTION_2:
245 break; /* BOARD_EVENT_RECEPTION_2 */
246 case BOARD_EVENT_RECEPTION_1:
247 break; /* BOARD_EVENT_RECEPTION_1 */
248 case BOARD_EVENT_RECEPTION_LOST_0:
249 break; /* BOARD_EVENT_RECEPTION_LOST_0 */
250 case BOARD_EVENT_MULTIPLEX_OK:
251 switch (board_id) {
252 case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
253 smscore_gpio_set_level(coredev,
254 board->board_cfg.led1, 1);
255 break;
256 }
257 break; /* BOARD_EVENT_MULTIPLEX_OK */
258 case BOARD_EVENT_MULTIPLEX_ERRORS:
259 switch (board_id) {
260 case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM:
261 smscore_gpio_set_level(coredev,
262 board->board_cfg.led1, 0);
263 break;
264 }
265 break; /* BOARD_EVENT_MULTIPLEX_ERRORS */
266
267 default:
268 sms_err("Unknown SMS board event");
269 break;
270 }
271 return 0;
272}
273EXPORT_SYMBOL_GPL(sms_board_event);
94 274
95static int sms_set_gpio(struct smscore_device_t *coredev, int pin, int enable) 275static int sms_set_gpio(struct smscore_device_t *coredev, int pin, int enable)
96{ 276{
97 int lvl, ret; 277 int lvl, ret;
98 u32 gpio; 278 u32 gpio;
99 struct smscore_gpio_config gpioconfig = { 279 struct smscore_config_gpio gpioconfig = {
100 .direction = SMS_GPIO_DIRECTION_OUTPUT, 280 .direction = SMS_GPIO_DIRECTION_OUTPUT,
101 .pullupdown = SMS_GPIO_PULLUPDOWN_NONE, 281 .pullupdown = SMS_GPIO_PULLUPDOWN_NONE,
102 .inputcharacteristics = SMS_GPIO_INPUTCHARACTERISTICS_NORMAL, 282 .inputcharacteristics = SMS_GPIO_INPUTCHARACTERISTICS_NORMAL,
diff --git a/drivers/media/dvb/siano/sms-cards.h b/drivers/media/dvb/siano/sms-cards.h
index 64d74c59c33f..38f062f6ad68 100644
--- a/drivers/media/dvb/siano/sms-cards.h
+++ b/drivers/media/dvb/siano/sms-cards.h
@@ -22,6 +22,7 @@
22 22
23#include <linux/usb.h> 23#include <linux/usb.h>
24#include "smscoreapi.h" 24#include "smscoreapi.h"
25#include "smsir.h"
25 26
26#define SMS_BOARD_UNKNOWN 0 27#define SMS_BOARD_UNKNOWN 0
27#define SMS1XXX_BOARD_SIANO_STELLAR 1 28#define SMS1XXX_BOARD_SIANO_STELLAR 1
@@ -34,10 +35,47 @@
34#define SMS1XXX_BOARD_HAUPPAUGE_WINDHAM 8 35#define SMS1XXX_BOARD_HAUPPAUGE_WINDHAM 8
35#define SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD 9 36#define SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD 9
36#define SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2 10 37#define SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2 10
38#define SMS1XXX_BOARD_SIANO_NICE 11
39#define SMS1XXX_BOARD_SIANO_VENICE 12
40
41struct sms_board_gpio_cfg {
42 int lna_vhf_exist;
43 int lna_vhf_ctrl;
44 int lna_uhf_exist;
45 int lna_uhf_ctrl;
46 int lna_uhf_d_ctrl;
47 int lna_sband_exist;
48 int lna_sband_ctrl;
49 int lna_sband_d_ctrl;
50 int foreign_lna0_ctrl;
51 int foreign_lna1_ctrl;
52 int foreign_lna2_ctrl;
53 int rf_switch_vhf;
54 int rf_switch_uhf;
55 int rf_switch_sband;
56 int leds_power;
57 int led0;
58 int led1;
59 int led2;
60 int led3;
61 int led4;
62 int ir;
63 int eeprom_wp;
64 int mrc_sense;
65 int mrc_pdn_resetn;
66 int mrc_gp0; /* mrcs spi int */
67 int mrc_gp1;
68 int mrc_gp2;
69 int mrc_gp3;
70 int mrc_gp4;
71 int host_spi_gsp_ts_int;
72};
37 73
38struct sms_board { 74struct sms_board {
39 enum sms_device_type_st type; 75 enum sms_device_type_st type;
40 char *name, *fw[DEVICE_MODE_MAX]; 76 char *name, *fw[DEVICE_MODE_MAX];
77 struct sms_board_gpio_cfg board_cfg;
78 enum ir_kb_type ir_kb_type;
41 79
42 /* gpios */ 80 /* gpios */
43 int led_power, led_hi, led_lo, lna_ctrl, rf_switch; 81 int led_power, led_hi, led_lo, lna_ctrl, rf_switch;
@@ -45,6 +83,32 @@ struct sms_board {
45 83
46struct sms_board *sms_get_board(int id); 84struct sms_board *sms_get_board(int id);
47 85
86extern struct smscore_device_t *coredev;
87
88enum SMS_BOARD_EVENTS {
89 BOARD_EVENT_POWER_INIT,
90 BOARD_EVENT_POWER_SUSPEND,
91 BOARD_EVENT_POWER_RESUME,
92 BOARD_EVENT_BIND,
93 BOARD_EVENT_SCAN_PROG,
94 BOARD_EVENT_SCAN_COMP,
95 BOARD_EVENT_EMERGENCY_WARNING_SIGNAL,
96 BOARD_EVENT_FE_LOCK,
97 BOARD_EVENT_FE_UNLOCK,
98 BOARD_EVENT_DEMOD_LOCK,
99 BOARD_EVENT_DEMOD_UNLOCK,
100 BOARD_EVENT_RECEPTION_MAX_4,
101 BOARD_EVENT_RECEPTION_3,
102 BOARD_EVENT_RECEPTION_2,
103 BOARD_EVENT_RECEPTION_1,
104 BOARD_EVENT_RECEPTION_LOST_0,
105 BOARD_EVENT_MULTIPLEX_OK,
106 BOARD_EVENT_MULTIPLEX_ERRORS
107};
108
109int sms_board_event(struct smscore_device_t *coredev,
110 enum SMS_BOARD_EVENTS gevent);
111
48int sms_board_setup(struct smscore_device_t *coredev); 112int sms_board_setup(struct smscore_device_t *coredev);
49 113
50#define SMS_LED_OFF 0 114#define SMS_LED_OFF 0
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index 7bd4d1dee2b3..32be382f0e97 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -30,9 +30,13 @@
30#include <linux/io.h> 30#include <linux/io.h>
31 31
32#include <linux/firmware.h> 32#include <linux/firmware.h>
33#include <linux/wait.h>
34#include <asm/byteorder.h>
33 35
34#include "smscoreapi.h" 36#include "smscoreapi.h"
35#include "sms-cards.h" 37#include "sms-cards.h"
38#include "smsir.h"
39#include "smsendian.h"
36 40
37static int sms_dbg; 41static int sms_dbg;
38module_param_named(debug, sms_dbg, int, 0644); 42module_param_named(debug, sms_dbg, int, 0644);
@@ -58,42 +62,6 @@ struct smscore_client_t {
58 onremove_t onremove_handler; 62 onremove_t onremove_handler;
59}; 63};
60 64
61struct smscore_device_t {
62 struct list_head entry;
63
64 struct list_head clients;
65 struct list_head subclients;
66 spinlock_t clientslock;
67
68 struct list_head buffers;
69 spinlock_t bufferslock;
70 int num_buffers;
71
72 void *common_buffer;
73 int common_buffer_size;
74 dma_addr_t common_buffer_phys;
75
76 void *context;
77 struct device *device;
78
79 char devpath[32];
80 unsigned long device_flags;
81
82 setmode_t setmode_handler;
83 detectmode_t detectmode_handler;
84 sendrequest_t sendrequest_handler;
85 preload_t preload_handler;
86 postload_t postload_handler;
87
88 int mode, modes_supported;
89
90 struct completion version_ex_done, data_download_done, trigger_done;
91 struct completion init_device_done, reload_start_done, resume_done;
92
93 int board_id;
94 int led_state;
95};
96
97void smscore_set_board_id(struct smscore_device_t *core, int id) 65void smscore_set_board_id(struct smscore_device_t *core, int id)
98{ 66{
99 core->board_id = id; 67 core->board_id = id;
@@ -384,6 +352,13 @@ int smscore_register_device(struct smsdevice_params_t *params,
384 init_completion(&dev->init_device_done); 352 init_completion(&dev->init_device_done);
385 init_completion(&dev->reload_start_done); 353 init_completion(&dev->reload_start_done);
386 init_completion(&dev->resume_done); 354 init_completion(&dev->resume_done);
355 init_completion(&dev->gpio_configuration_done);
356 init_completion(&dev->gpio_set_level_done);
357 init_completion(&dev->gpio_get_level_done);
358 init_completion(&dev->ir_init_done);
359
360 /* Buffer management */
361 init_waitqueue_head(&dev->buffer_mng_waitq);
387 362
388 /* alloc common buffer */ 363 /* alloc common buffer */
389 dev->common_buffer_size = params->buffer_size * params->num_buffers; 364 dev->common_buffer_size = params->buffer_size * params->num_buffers;
@@ -439,6 +414,71 @@ int smscore_register_device(struct smsdevice_params_t *params,
439} 414}
440EXPORT_SYMBOL_GPL(smscore_register_device); 415EXPORT_SYMBOL_GPL(smscore_register_device);
441 416
417
418static int smscore_sendrequest_and_wait(struct smscore_device_t *coredev,
419 void *buffer, size_t size, struct completion *completion) {
420 int rc = coredev->sendrequest_handler(coredev->context, buffer, size);
421 if (rc < 0) {
422 sms_info("sendrequest returned error %d", rc);
423 return rc;
424 }
425
426 return wait_for_completion_timeout(completion,
427 msecs_to_jiffies(SMS_PROTOCOL_MAX_RAOUNDTRIP_MS)) ?
428 0 : -ETIME;
429}
430
431/**
432 * Starts & enables IR operations
433 *
434 * @return 0 on success, < 0 on error.
435 */
436static int smscore_init_ir(struct smscore_device_t *coredev)
437{
438 int ir_io;
439 int rc;
440 void *buffer;
441
442 coredev->ir.input_dev = NULL;
443 ir_io = sms_get_board(smscore_get_board_id(coredev))->board_cfg.ir;
444 if (ir_io) {/* only if IR port exist we use IR sub-module */
445 sms_info("IR loading");
446 rc = sms_ir_init(coredev);
447
448 if (rc != 0)
449 sms_err("Error initialization DTV IR sub-module");
450 else {
451 buffer = kmalloc(sizeof(struct SmsMsgData_ST2) +
452 SMS_DMA_ALIGNMENT,
453 GFP_KERNEL | GFP_DMA);
454 if (buffer) {
455 struct SmsMsgData_ST2 *msg =
456 (struct SmsMsgData_ST2 *)
457 SMS_ALIGN_ADDRESS(buffer);
458
459 SMS_INIT_MSG(&msg->xMsgHeader,
460 MSG_SMS_START_IR_REQ,
461 sizeof(struct SmsMsgData_ST2));
462 msg->msgData[0] = coredev->ir.controller;
463 msg->msgData[1] = coredev->ir.timeout;
464
465 smsendian_handle_tx_message(
466 (struct SmsMsgHdr_ST2 *)msg);
467 rc = smscore_sendrequest_and_wait(coredev, msg,
468 msg->xMsgHeader. msgLength,
469 &coredev->ir_init_done);
470
471 kfree(buffer);
472 } else
473 sms_err
474 ("Sending IR initialization message failed");
475 }
476 } else
477 sms_info("IR port has not been detected");
478
479 return 0;
480}
481
442/** 482/**
443 * sets initial device mode and notifies client hotplugs that device is ready 483 * sets initial device mode and notifies client hotplugs that device is ready
444 * 484 *
@@ -459,6 +499,7 @@ int smscore_start_device(struct smscore_device_t *coredev)
459 kmutex_lock(&g_smscore_deviceslock); 499 kmutex_lock(&g_smscore_deviceslock);
460 500
461 rc = smscore_notify_callbacks(coredev, coredev->device, 1); 501 rc = smscore_notify_callbacks(coredev, coredev->device, 1);
502 smscore_init_ir(coredev);
462 503
463 sms_info("device %p started, rc %d", coredev, rc); 504 sms_info("device %p started, rc %d", coredev, rc);
464 505
@@ -468,29 +509,19 @@ int smscore_start_device(struct smscore_device_t *coredev)
468} 509}
469EXPORT_SYMBOL_GPL(smscore_start_device); 510EXPORT_SYMBOL_GPL(smscore_start_device);
470 511
471static int smscore_sendrequest_and_wait(struct smscore_device_t *coredev,
472 void *buffer, size_t size,
473 struct completion *completion)
474{
475 int rc = coredev->sendrequest_handler(coredev->context, buffer, size);
476 if (rc < 0) {
477 sms_info("sendrequest returned error %d", rc);
478 return rc;
479 }
480
481 return wait_for_completion_timeout(completion,
482 msecs_to_jiffies(10000)) ?
483 0 : -ETIME;
484}
485 512
486static int smscore_load_firmware_family2(struct smscore_device_t *coredev, 513static int smscore_load_firmware_family2(struct smscore_device_t *coredev,
487 void *buffer, size_t size) 514 void *buffer, size_t size)
488{ 515{
489 struct SmsFirmware_ST *firmware = (struct SmsFirmware_ST *) buffer; 516 struct SmsFirmware_ST *firmware = (struct SmsFirmware_ST *) buffer;
490 struct SmsMsgHdr_ST *msg; 517 struct SmsMsgHdr_ST *msg;
491 u32 mem_address = firmware->StartAddress; 518 u32 mem_address;
492 u8 *payload = firmware->Payload; 519 u8 *payload = firmware->Payload;
493 int rc = 0; 520 int rc = 0;
521 firmware->StartAddress = le32_to_cpu(firmware->StartAddress);
522 firmware->Length = le32_to_cpu(firmware->Length);
523
524 mem_address = firmware->StartAddress;
494 525
495 sms_info("loading FW to addr 0x%x size %d", 526 sms_info("loading FW to addr 0x%x size %d",
496 mem_address, firmware->Length); 527 mem_address, firmware->Length);
@@ -657,6 +688,9 @@ void smscore_unregister_device(struct smscore_device_t *coredev)
657 688
658 kmutex_lock(&g_smscore_deviceslock); 689 kmutex_lock(&g_smscore_deviceslock);
659 690
691 /* Release input device (IR) resources */
692 sms_ir_exit(coredev);
693
660 smscore_notify_clients(coredev); 694 smscore_notify_clients(coredev);
661 smscore_notify_callbacks(coredev, NULL, 0); 695 smscore_notify_callbacks(coredev, NULL, 0);
662 696
@@ -664,7 +698,9 @@ void smscore_unregister_device(struct smscore_device_t *coredev)
664 * onresponse must no longer be called */ 698 * onresponse must no longer be called */
665 699
666 while (1) { 700 while (1) {
667 while ((cb = smscore_getbuffer(coredev))) { 701 while (!list_empty(&coredev->buffers)) {
702 cb = (struct smscore_buffer_t *) coredev->buffers.next;
703 list_del(&cb->entry);
668 kfree(cb); 704 kfree(cb);
669 num_buffers++; 705 num_buffers++;
670 } 706 }
@@ -685,8 +721,10 @@ void smscore_unregister_device(struct smscore_device_t *coredev)
685 721
686 if (coredev->common_buffer) 722 if (coredev->common_buffer)
687 dma_free_coherent(NULL, coredev->common_buffer_size, 723 dma_free_coherent(NULL, coredev->common_buffer_size,
688 coredev->common_buffer, 724 coredev->common_buffer, coredev->common_buffer_phys);
689 coredev->common_buffer_phys); 725
726 if (coredev->fw_buf != NULL)
727 kfree(coredev->fw_buf);
690 728
691 list_del(&coredev->entry); 729 list_del(&coredev->entry);
692 kfree(coredev); 730 kfree(coredev);
@@ -746,7 +784,7 @@ static char *smscore_fw_lkup[][SMS_NUM_OF_DEVICE_TYPES] = {
746 /*BDA*/ 784 /*BDA*/
747 {"none", "dvb_nova_12mhz.inp", "dvb_nova_12mhz_b0.inp", "none"}, 785 {"none", "dvb_nova_12mhz.inp", "dvb_nova_12mhz_b0.inp", "none"},
748 /*ISDBT*/ 786 /*ISDBT*/
749 {"none", "isdbt_nova_12mhz.inp", "dvb_nova_12mhz.inp", "none"}, 787 {"none", "isdbt_nova_12mhz.inp", "isdbt_nova_12mhz_b0.inp", "none"},
750 /*ISDBTBDA*/ 788 /*ISDBTBDA*/
751 {"none", "isdbt_nova_12mhz.inp", "isdbt_nova_12mhz_b0.inp", "none"}, 789 {"none", "isdbt_nova_12mhz.inp", "isdbt_nova_12mhz_b0.inp", "none"},
752 /*CMMB*/ 790 /*CMMB*/
@@ -870,7 +908,7 @@ int smscore_set_device_mode(struct smscore_device_t *coredev, int mode)
870 coredev->device_flags &= ~SMS_DEVICE_NOT_READY; 908 coredev->device_flags &= ~SMS_DEVICE_NOT_READY;
871 } 909 }
872 910
873 if (rc != 0) 911 if (rc < 0)
874 sms_err("return error code %d.", rc); 912 sms_err("return error code %d.", rc);
875 return rc; 913 return rc;
876} 914}
@@ -940,14 +978,11 @@ smscore_client_t *smscore_find_client(struct smscore_device_t *coredev,
940 * 978 *
941 */ 979 */
942void smscore_onresponse(struct smscore_device_t *coredev, 980void smscore_onresponse(struct smscore_device_t *coredev,
943 struct smscore_buffer_t *cb) 981 struct smscore_buffer_t *cb) {
944{ 982 struct SmsMsgHdr_ST *phdr = (struct SmsMsgHdr_ST *) ((u8 *) cb->p
945 struct SmsMsgHdr_ST *phdr = 983 + cb->offset);
946 (struct SmsMsgHdr_ST *)((u8 *) cb->p + cb->offset); 984 struct smscore_client_t *client;
947 struct smscore_client_t *client =
948 smscore_find_client(coredev, phdr->msgType, phdr->msgDstId);
949 int rc = -EBUSY; 985 int rc = -EBUSY;
950
951 static unsigned long last_sample_time; /* = 0; */ 986 static unsigned long last_sample_time; /* = 0; */
952 static int data_total; /* = 0; */ 987 static int data_total; /* = 0; */
953 unsigned long time_now = jiffies_to_msecs(jiffies); 988 unsigned long time_now = jiffies_to_msecs(jiffies);
@@ -965,6 +1000,16 @@ void smscore_onresponse(struct smscore_device_t *coredev,
965 } 1000 }
966 1001
967 data_total += cb->size; 1002 data_total += cb->size;
1003 /* Do we need to re-route? */
1004 if ((phdr->msgType == MSG_SMS_HO_PER_SLICES_IND) ||
1005 (phdr->msgType == MSG_SMS_TRANSMISSION_IND)) {
1006 if (coredev->mode == DEVICE_MODE_DVBT_BDA)
1007 phdr->msgDstId = DVBT_BDA_CONTROL_MSG_ID;
1008 }
1009
1010
1011 client = smscore_find_client(coredev, phdr->msgType, phdr->msgDstId);
1012
968 /* If no client registered for type & id, 1013 /* If no client registered for type & id,
969 * check for control client where type is not registered */ 1014 * check for control client where type is not registered */
970 if (client) 1015 if (client)
@@ -1009,6 +1054,35 @@ void smscore_onresponse(struct smscore_device_t *coredev,
1009 case MSG_SMS_SLEEP_RESUME_COMP_IND: 1054 case MSG_SMS_SLEEP_RESUME_COMP_IND:
1010 complete(&coredev->resume_done); 1055 complete(&coredev->resume_done);
1011 break; 1056 break;
1057 case MSG_SMS_GPIO_CONFIG_EX_RES:
1058 sms_debug("MSG_SMS_GPIO_CONFIG_EX_RES");
1059 complete(&coredev->gpio_configuration_done);
1060 break;
1061 case MSG_SMS_GPIO_SET_LEVEL_RES:
1062 sms_debug("MSG_SMS_GPIO_SET_LEVEL_RES");
1063 complete(&coredev->gpio_set_level_done);
1064 break;
1065 case MSG_SMS_GPIO_GET_LEVEL_RES:
1066 {
1067 u32 *msgdata = (u32 *) phdr;
1068 coredev->gpio_get_res = msgdata[1];
1069 sms_debug("MSG_SMS_GPIO_GET_LEVEL_RES gpio level %d",
1070 coredev->gpio_get_res);
1071 complete(&coredev->gpio_get_level_done);
1072 break;
1073 }
1074 case MSG_SMS_START_IR_RES:
1075 complete(&coredev->ir_init_done);
1076 break;
1077 case MSG_SMS_IR_SAMPLES_IND:
1078 sms_ir_event(coredev,
1079 (const char *)
1080 ((char *)phdr
1081 + sizeof(struct SmsMsgHdr_ST)),
1082 (int)phdr->msgLength
1083 - sizeof(struct SmsMsgHdr_ST));
1084 break;
1085
1012 default: 1086 default:
1013 break; 1087 break;
1014 } 1088 }
@@ -1030,12 +1104,24 @@ struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev)
1030 struct smscore_buffer_t *cb = NULL; 1104 struct smscore_buffer_t *cb = NULL;
1031 unsigned long flags; 1105 unsigned long flags;
1032 1106
1107 DEFINE_WAIT(wait);
1108
1033 spin_lock_irqsave(&coredev->bufferslock, flags); 1109 spin_lock_irqsave(&coredev->bufferslock, flags);
1034 1110
1035 if (!list_empty(&coredev->buffers)) { 1111 /* This function must return a valid buffer, since the buffer list is
1036 cb = (struct smscore_buffer_t *) coredev->buffers.next; 1112 * finite, we check that there is an available buffer, if not, we wait
1037 list_del(&cb->entry); 1113 * until such buffer become available.
1038 } 1114 */
1115
1116 prepare_to_wait(&coredev->buffer_mng_waitq, &wait, TASK_INTERRUPTIBLE);
1117
1118 if (list_empty(&coredev->buffers))
1119 schedule();
1120
1121 finish_wait(&coredev->buffer_mng_waitq, &wait);
1122
1123 cb = (struct smscore_buffer_t *) coredev->buffers.next;
1124 list_del(&cb->entry);
1039 1125
1040 spin_unlock_irqrestore(&coredev->bufferslock, flags); 1126 spin_unlock_irqrestore(&coredev->bufferslock, flags);
1041 1127
@@ -1052,8 +1138,8 @@ EXPORT_SYMBOL_GPL(smscore_getbuffer);
1052 * 1138 *
1053 */ 1139 */
1054void smscore_putbuffer(struct smscore_device_t *coredev, 1140void smscore_putbuffer(struct smscore_device_t *coredev,
1055 struct smscore_buffer_t *cb) 1141 struct smscore_buffer_t *cb) {
1056{ 1142 wake_up_interruptible(&coredev->buffer_mng_waitq);
1057 list_add_locked(&cb->entry, &coredev->buffers, &coredev->bufferslock); 1143 list_add_locked(&cb->entry, &coredev->buffers, &coredev->bufferslock);
1058} 1144}
1059EXPORT_SYMBOL_GPL(smscore_putbuffer); 1145EXPORT_SYMBOL_GPL(smscore_putbuffer);
@@ -1210,8 +1296,9 @@ int smsclient_sendrequest(struct smscore_client_t *client,
1210EXPORT_SYMBOL_GPL(smsclient_sendrequest); 1296EXPORT_SYMBOL_GPL(smsclient_sendrequest);
1211 1297
1212 1298
1299/* old GPIO managments implementation */
1213int smscore_configure_gpio(struct smscore_device_t *coredev, u32 pin, 1300int smscore_configure_gpio(struct smscore_device_t *coredev, u32 pin,
1214 struct smscore_gpio_config *pinconfig) 1301 struct smscore_config_gpio *pinconfig)
1215{ 1302{
1216 struct { 1303 struct {
1217 struct SmsMsgHdr_ST hdr; 1304 struct SmsMsgHdr_ST hdr;
@@ -1280,35 +1367,254 @@ int smscore_set_gpio(struct smscore_device_t *coredev, u32 pin, int level)
1280 &msg, sizeof(msg)); 1367 &msg, sizeof(msg));
1281} 1368}
1282 1369
1283static int __init smscore_module_init(void) 1370/* new GPIO managment implementation */
1284{ 1371static int GetGpioPinParams(u32 PinNum, u32 *pTranslatedPinNum,
1285 int rc = 0; 1372 u32 *pGroupNum, u32 *pGroupCfg) {
1373
1374 *pGroupCfg = 1;
1375
1376 if (PinNum >= 0 && PinNum <= 1) {
1377 *pTranslatedPinNum = 0;
1378 *pGroupNum = 9;
1379 *pGroupCfg = 2;
1380 } else if (PinNum >= 2 && PinNum <= 6) {
1381 *pTranslatedPinNum = 2;
1382 *pGroupNum = 0;
1383 *pGroupCfg = 2;
1384 } else if (PinNum >= 7 && PinNum <= 11) {
1385 *pTranslatedPinNum = 7;
1386 *pGroupNum = 1;
1387 } else if (PinNum >= 12 && PinNum <= 15) {
1388 *pTranslatedPinNum = 12;
1389 *pGroupNum = 2;
1390 *pGroupCfg = 3;
1391 } else if (PinNum == 16) {
1392 *pTranslatedPinNum = 16;
1393 *pGroupNum = 23;
1394 } else if (PinNum >= 17 && PinNum <= 24) {
1395 *pTranslatedPinNum = 17;
1396 *pGroupNum = 3;
1397 } else if (PinNum == 25) {
1398 *pTranslatedPinNum = 25;
1399 *pGroupNum = 6;
1400 } else if (PinNum >= 26 && PinNum <= 28) {
1401 *pTranslatedPinNum = 26;
1402 *pGroupNum = 4;
1403 } else if (PinNum == 29) {
1404 *pTranslatedPinNum = 29;
1405 *pGroupNum = 5;
1406 *pGroupCfg = 2;
1407 } else if (PinNum == 30) {
1408 *pTranslatedPinNum = 30;
1409 *pGroupNum = 8;
1410 } else if (PinNum == 31) {
1411 *pTranslatedPinNum = 31;
1412 *pGroupNum = 17;
1413 } else
1414 return -1;
1286 1415
1287 INIT_LIST_HEAD(&g_smscore_notifyees); 1416 *pGroupCfg <<= 24;
1288 INIT_LIST_HEAD(&g_smscore_devices);
1289 kmutex_init(&g_smscore_deviceslock);
1290 1417
1291 INIT_LIST_HEAD(&g_smscore_registry); 1418 return 0;
1292 kmutex_init(&g_smscore_registrylock); 1419}
1420
1421int smscore_gpio_configure(struct smscore_device_t *coredev, u8 PinNum,
1422 struct smscore_gpio_config *pGpioConfig) {
1423
1424 u32 totalLen;
1425 u32 TranslatedPinNum;
1426 u32 GroupNum;
1427 u32 ElectricChar;
1428 u32 groupCfg;
1429 void *buffer;
1430 int rc;
1431
1432 struct SetGpioMsg {
1433 struct SmsMsgHdr_ST xMsgHeader;
1434 u32 msgData[6];
1435 } *pMsg;
1436
1437
1438 if (PinNum > MAX_GPIO_PIN_NUMBER)
1439 return -EINVAL;
1440
1441 if (pGpioConfig == NULL)
1442 return -EINVAL;
1443
1444 totalLen = sizeof(struct SmsMsgHdr_ST) + (sizeof(u32) * 6);
1445
1446 buffer = kmalloc(totalLen + SMS_DMA_ALIGNMENT,
1447 GFP_KERNEL | GFP_DMA);
1448 if (!buffer)
1449 return -ENOMEM;
1450
1451 pMsg = (struct SetGpioMsg *) SMS_ALIGN_ADDRESS(buffer);
1452
1453 pMsg->xMsgHeader.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
1454 pMsg->xMsgHeader.msgDstId = HIF_TASK;
1455 pMsg->xMsgHeader.msgFlags = 0;
1456 pMsg->xMsgHeader.msgLength = (u16) totalLen;
1457 pMsg->msgData[0] = PinNum;
1458
1459 if (!(coredev->device_flags & SMS_DEVICE_FAMILY2)) {
1460 pMsg->xMsgHeader.msgType = MSG_SMS_GPIO_CONFIG_REQ;
1461 if (GetGpioPinParams(PinNum, &TranslatedPinNum, &GroupNum,
1462 &groupCfg) != 0)
1463 return -EINVAL;
1464
1465 pMsg->msgData[1] = TranslatedPinNum;
1466 pMsg->msgData[2] = GroupNum;
1467 ElectricChar = (pGpioConfig->PullUpDown)
1468 | (pGpioConfig->InputCharacteristics << 2)
1469 | (pGpioConfig->OutputSlewRate << 3)
1470 | (pGpioConfig->OutputDriving << 4);
1471 pMsg->msgData[3] = ElectricChar;
1472 pMsg->msgData[4] = pGpioConfig->Direction;
1473 pMsg->msgData[5] = groupCfg;
1474 } else {
1475 pMsg->xMsgHeader.msgType = MSG_SMS_GPIO_CONFIG_EX_REQ;
1476 pMsg->msgData[1] = pGpioConfig->PullUpDown;
1477 pMsg->msgData[2] = pGpioConfig->OutputSlewRate;
1478 pMsg->msgData[3] = pGpioConfig->OutputDriving;
1479 pMsg->msgData[4] = pGpioConfig->Direction;
1480 pMsg->msgData[5] = 0;
1481 }
1482
1483 smsendian_handle_tx_message((struct SmsMsgHdr_ST *)pMsg);
1484 rc = smscore_sendrequest_and_wait(coredev, pMsg, totalLen,
1485 &coredev->gpio_configuration_done);
1486
1487 if (rc != 0) {
1488 if (rc == -ETIME)
1489 sms_err("smscore_gpio_configure timeout");
1490 else
1491 sms_err("smscore_gpio_configure error");
1492 }
1493 kfree(buffer);
1494
1495 return rc;
1496}
1497
1498int smscore_gpio_set_level(struct smscore_device_t *coredev, u8 PinNum,
1499 u8 NewLevel) {
1500
1501 u32 totalLen;
1502 int rc;
1503 void *buffer;
1504
1505 struct SetGpioMsg {
1506 struct SmsMsgHdr_ST xMsgHeader;
1507 u32 msgData[3]; /* keep it 3 ! */
1508 } *pMsg;
1509
1510 if ((NewLevel > 1) || (PinNum > MAX_GPIO_PIN_NUMBER) ||
1511 (PinNum > MAX_GPIO_PIN_NUMBER))
1512 return -EINVAL;
1293 1513
1514 totalLen = sizeof(struct SmsMsgHdr_ST) +
1515 (3 * sizeof(u32)); /* keep it 3 ! */
1294 1516
1517 buffer = kmalloc(totalLen + SMS_DMA_ALIGNMENT,
1518 GFP_KERNEL | GFP_DMA);
1519 if (!buffer)
1520 return -ENOMEM;
1295 1521
1522 pMsg = (struct SetGpioMsg *) SMS_ALIGN_ADDRESS(buffer);
1296 1523
1524 pMsg->xMsgHeader.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
1525 pMsg->xMsgHeader.msgDstId = HIF_TASK;
1526 pMsg->xMsgHeader.msgFlags = 0;
1527 pMsg->xMsgHeader.msgType = MSG_SMS_GPIO_SET_LEVEL_REQ;
1528 pMsg->xMsgHeader.msgLength = (u16) totalLen;
1529 pMsg->msgData[0] = PinNum;
1530 pMsg->msgData[1] = NewLevel;
1297 1531
1532 /* Send message to SMS */
1533 smsendian_handle_tx_message((struct SmsMsgHdr_ST *)pMsg);
1534 rc = smscore_sendrequest_and_wait(coredev, pMsg, totalLen,
1535 &coredev->gpio_set_level_done);
1536
1537 if (rc != 0) {
1538 if (rc == -ETIME)
1539 sms_err("smscore_gpio_set_level timeout");
1540 else
1541 sms_err("smscore_gpio_set_level error");
1542 }
1543 kfree(buffer);
1298 1544
1299 return rc; 1545 return rc;
1300 sms_debug("rc %d", rc); 1546}
1547
1548int smscore_gpio_get_level(struct smscore_device_t *coredev, u8 PinNum,
1549 u8 *level) {
1550
1551 u32 totalLen;
1552 int rc;
1553 void *buffer;
1554
1555 struct SetGpioMsg {
1556 struct SmsMsgHdr_ST xMsgHeader;
1557 u32 msgData[2];
1558 } *pMsg;
1559
1560
1561 if (PinNum > MAX_GPIO_PIN_NUMBER)
1562 return -EINVAL;
1563
1564 totalLen = sizeof(struct SmsMsgHdr_ST) + (2 * sizeof(u32));
1565
1566 buffer = kmalloc(totalLen + SMS_DMA_ALIGNMENT,
1567 GFP_KERNEL | GFP_DMA);
1568 if (!buffer)
1569 return -ENOMEM;
1570
1571 pMsg = (struct SetGpioMsg *) SMS_ALIGN_ADDRESS(buffer);
1572
1573 pMsg->xMsgHeader.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
1574 pMsg->xMsgHeader.msgDstId = HIF_TASK;
1575 pMsg->xMsgHeader.msgFlags = 0;
1576 pMsg->xMsgHeader.msgType = MSG_SMS_GPIO_GET_LEVEL_REQ;
1577 pMsg->xMsgHeader.msgLength = (u16) totalLen;
1578 pMsg->msgData[0] = PinNum;
1579 pMsg->msgData[1] = 0;
1580
1581 /* Send message to SMS */
1582 smsendian_handle_tx_message((struct SmsMsgHdr_ST *)pMsg);
1583 rc = smscore_sendrequest_and_wait(coredev, pMsg, totalLen,
1584 &coredev->gpio_get_level_done);
1585
1586 if (rc != 0) {
1587 if (rc == -ETIME)
1588 sms_err("smscore_gpio_get_level timeout");
1589 else
1590 sms_err("smscore_gpio_get_level error");
1591 }
1592 kfree(buffer);
1593
1594 /* Its a race between other gpio_get_level() and the copy of the single
1595 * global 'coredev->gpio_get_res' to the function's variable 'level'
1596 */
1597 *level = coredev->gpio_get_res;
1301 1598
1302 return rc; 1599 return rc;
1303} 1600}
1304 1601
1305static void __exit smscore_module_exit(void) 1602static int __init smscore_module_init(void)
1306{ 1603{
1604 int rc = 0;
1307 1605
1606 INIT_LIST_HEAD(&g_smscore_notifyees);
1607 INIT_LIST_HEAD(&g_smscore_devices);
1608 kmutex_init(&g_smscore_deviceslock);
1308 1609
1610 INIT_LIST_HEAD(&g_smscore_registry);
1611 kmutex_init(&g_smscore_registrylock);
1309 1612
1613 return rc;
1614}
1310 1615
1311 1616static void __exit smscore_module_exit(void)
1617{
1312 kmutex_lock(&g_smscore_deviceslock); 1618 kmutex_lock(&g_smscore_deviceslock);
1313 while (!list_empty(&g_smscore_notifyees)) { 1619 while (!list_empty(&g_smscore_notifyees)) {
1314 struct smscore_device_notifyee_t *notifyee = 1620 struct smscore_device_notifyee_t *notifyee =
diff --git a/drivers/media/dvb/siano/smscoreapi.h b/drivers/media/dvb/siano/smscoreapi.h
index 548de9056e8b..f1108c64e895 100644
--- a/drivers/media/dvb/siano/smscoreapi.h
+++ b/drivers/media/dvb/siano/smscoreapi.h
@@ -1,26 +1,26 @@
1/* 1/****************************************************************
2 * Driver for the Siano SMS1xxx USB dongle 2
3 * 3Siano Mobile Silicon, Inc.
4 * author: Anatoly Greenblat 4MDTV receiver kernel modules.
5 * 5Copyright (C) 2006-2008, Uri Shkolnik, Anatoly Greenblat
6 * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc. 6
7 * 7This program is free software: you can redistribute it and/or modify
8 * This program is free software; you can redistribute it and/or modify 8it under the terms of the GNU General Public License as published by
9 * it under the terms of the GNU General Public License version 2 as 9the Free Software Foundation, either version 2 of the License, or
10 * published by the Free Software Foundation; 10(at your option) any later version.
11 * 11
12 * Software distributed under the License is distributed on an "AS IS" 12 This program is distributed in the hope that it will be useful,
13 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. 13but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * 14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * See the GNU General Public License for more details. 15GNU General Public License for more details.
16 * 16
17 * You should have received a copy of the GNU General Public License 17You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software 18along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19
20 */ 20****************************************************************/
21 21
22#ifndef __smscoreapi_h__ 22#ifndef __SMS_CORE_API_H__
23#define __smscoreapi_h__ 23#define __SMS_CORE_API_H__
24 24
25#include <linux/version.h> 25#include <linux/version.h>
26#include <linux/device.h> 26#include <linux/device.h>
@@ -28,14 +28,13 @@
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/scatterlist.h> 29#include <linux/scatterlist.h>
30#include <linux/types.h> 30#include <linux/types.h>
31#include <asm/page.h>
32#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/wait.h>
33#include <linux/timer.h>
33 34
34#include "dmxdev.h" 35#include <asm/page.h>
35#include "dvbdev.h"
36#include "dvb_demux.h"
37#include "dvb_frontend.h"
38 36
37#include "smsir.h"
39 38
40#define kmutex_init(_p_) mutex_init(_p_) 39#define kmutex_init(_p_) mutex_init(_p_)
41#define kmutex_lock(_p_) mutex_lock(_p_) 40#define kmutex_lock(_p_) mutex_lock(_p_)
@@ -46,13 +45,14 @@
46#define min(a, b) (((a) < (b)) ? (a) : (b)) 45#define min(a, b) (((a) < (b)) ? (a) : (b))
47#endif 46#endif
48 47
49#define SMS_ALLOC_ALIGNMENT 128 48#define SMS_PROTOCOL_MAX_RAOUNDTRIP_MS (10000)
50#define SMS_DMA_ALIGNMENT 16 49#define SMS_ALLOC_ALIGNMENT 128
50#define SMS_DMA_ALIGNMENT 16
51#define SMS_ALIGN_ADDRESS(addr) \ 51#define SMS_ALIGN_ADDRESS(addr) \
52 ((((uintptr_t)(addr)) + (SMS_DMA_ALIGNMENT-1)) & ~(SMS_DMA_ALIGNMENT-1)) 52 ((((uintptr_t)(addr)) + (SMS_DMA_ALIGNMENT-1)) & ~(SMS_DMA_ALIGNMENT-1))
53 53
54#define SMS_DEVICE_FAMILY2 1 54#define SMS_DEVICE_FAMILY2 1
55#define SMS_ROM_NO_RESPONSE 2 55#define SMS_ROM_NO_RESPONSE 2
56#define SMS_DEVICE_NOT_READY 0x8000000 56#define SMS_DEVICE_NOT_READY 0x8000000
57 57
58enum sms_device_type_st { 58enum sms_device_type_st {
@@ -83,13 +83,13 @@ typedef void (*onremove_t)(void *context);
83struct smscore_buffer_t { 83struct smscore_buffer_t {
84 /* public members, once passed to clients can be changed freely */ 84 /* public members, once passed to clients can be changed freely */
85 struct list_head entry; 85 struct list_head entry;
86 int size; 86 int size;
87 int offset; 87 int offset;
88 88
89 /* private members, read-only for clients */ 89 /* private members, read-only for clients */
90 void *p; 90 void *p;
91 dma_addr_t phys; 91 dma_addr_t phys;
92 unsigned long offset_in_common; 92 unsigned long offset_in_common;
93}; 93};
94 94
95struct smsdevice_params_t { 95struct smsdevice_params_t {
@@ -116,10 +116,63 @@ struct smsclient_params_t {
116 int data_type; 116 int data_type;
117 onresponse_t onresponse_handler; 117 onresponse_t onresponse_handler;
118 onremove_t onremove_handler; 118 onremove_t onremove_handler;
119
120 void *context; 119 void *context;
121}; 120};
122 121
122struct smscore_device_t {
123 struct list_head entry;
124
125 struct list_head clients;
126 struct list_head subclients;
127 spinlock_t clientslock;
128
129 struct list_head buffers;
130 spinlock_t bufferslock;
131 int num_buffers;
132
133 void *common_buffer;
134 int common_buffer_size;
135 dma_addr_t common_buffer_phys;
136
137 void *context;
138 struct device *device;
139
140 char devpath[32];
141 unsigned long device_flags;
142
143 setmode_t setmode_handler;
144 detectmode_t detectmode_handler;
145 sendrequest_t sendrequest_handler;
146 preload_t preload_handler;
147 postload_t postload_handler;
148
149 int mode, modes_supported;
150
151 /* host <--> device messages */
152 struct completion version_ex_done, data_download_done, trigger_done;
153 struct completion init_device_done, reload_start_done, resume_done;
154 struct completion gpio_configuration_done, gpio_set_level_done;
155 struct completion gpio_get_level_done, ir_init_done;
156
157 /* Buffer management */
158 wait_queue_head_t buffer_mng_waitq;
159
160 /* GPIO */
161 int gpio_get_res;
162
163 /* Target hardware board */
164 int board_id;
165
166 /* Firmware */
167 u8 *fw_buf;
168 u32 fw_buf_size;
169
170 /* Infrared (IR) */
171 struct ir_t ir;
172
173 int led_state;
174};
175
123/* GPIO definitions for antenna frequency domain control (SMS8021) */ 176/* GPIO definitions for antenna frequency domain control (SMS8021) */
124#define SMS_ANTENNA_GPIO_0 1 177#define SMS_ANTENNA_GPIO_0 1
125#define SMS_ANTENNA_GPIO_1 0 178#define SMS_ANTENNA_GPIO_1 0
@@ -154,18 +207,15 @@ struct smsclient_params_t {
154#define MSG_SMS_INIT_DEVICE_RES 579 207#define MSG_SMS_INIT_DEVICE_RES 579
155#define MSG_SMS_ADD_PID_FILTER_REQ 601 208#define MSG_SMS_ADD_PID_FILTER_REQ 601
156#define MSG_SMS_ADD_PID_FILTER_RES 602 209#define MSG_SMS_ADD_PID_FILTER_RES 602
157#define MSG_SMS_REMOVE_PID_FILTER_REQ 603 210#define MSG_SMS_REMOVE_PID_FILTER_REQ 603
158#define MSG_SMS_REMOVE_PID_FILTER_RES 604 211#define MSG_SMS_REMOVE_PID_FILTER_RES 604
159#define MSG_SMS_DAB_CHANNEL 607 212#define MSG_SMS_DAB_CHANNEL 607
160#define MSG_SMS_GET_PID_FILTER_LIST_REQ 608 213#define MSG_SMS_GET_PID_FILTER_LIST_REQ 608
161#define MSG_SMS_GET_PID_FILTER_LIST_RES 609 214#define MSG_SMS_GET_PID_FILTER_LIST_RES 609
162#define MSG_SMS_GET_STATISTICS_REQ 615 215#define MSG_SMS_HO_PER_SLICES_IND 630
163#define MSG_SMS_GET_STATISTICS_RES 616 216#define MSG_SMS_SET_ANTENNA_CONFIG_REQ 651
164#define MSG_SMS_SET_ANTENNA_CONFIG_REQ 651 217#define MSG_SMS_SET_ANTENNA_CONFIG_RES 652
165#define MSG_SMS_SET_ANTENNA_CONFIG_RES 652 218#define MSG_SMS_SLEEP_RESUME_COMP_IND 655
166#define MSG_SMS_GET_STATISTICS_EX_REQ 653
167#define MSG_SMS_GET_STATISTICS_EX_RES 654
168#define MSG_SMS_SLEEP_RESUME_COMP_IND 655
169#define MSG_SMS_DATA_DOWNLOAD_REQ 660 219#define MSG_SMS_DATA_DOWNLOAD_REQ 660
170#define MSG_SMS_DATA_DOWNLOAD_RES 661 220#define MSG_SMS_DATA_DOWNLOAD_RES 661
171#define MSG_SMS_SWDOWNLOAD_TRIGGER_REQ 664 221#define MSG_SMS_SWDOWNLOAD_TRIGGER_REQ 664
@@ -190,14 +240,31 @@ struct smsclient_params_t {
190#define MSG_SMS_GPIO_CONFIG_EX_RES 713 240#define MSG_SMS_GPIO_CONFIG_EX_RES 713
191#define MSG_SMS_ISDBT_TUNE_REQ 776 241#define MSG_SMS_ISDBT_TUNE_REQ 776
192#define MSG_SMS_ISDBT_TUNE_RES 777 242#define MSG_SMS_ISDBT_TUNE_RES 777
243#define MSG_SMS_TRANSMISSION_IND 782
244#define MSG_SMS_START_IR_REQ 800
245#define MSG_SMS_START_IR_RES 801
246#define MSG_SMS_IR_SAMPLES_IND 802
247#define MSG_SMS_SIGNAL_DETECTED_IND 827
248#define MSG_SMS_NO_SIGNAL_IND 828
193 249
194#define SMS_INIT_MSG_EX(ptr, type, src, dst, len) do { \ 250#define SMS_INIT_MSG_EX(ptr, type, src, dst, len) do { \
195 (ptr)->msgType = type; (ptr)->msgSrcId = src; (ptr)->msgDstId = dst; \ 251 (ptr)->msgType = type; (ptr)->msgSrcId = src; (ptr)->msgDstId = dst; \
196 (ptr)->msgLength = len; (ptr)->msgFlags = 0; \ 252 (ptr)->msgLength = len; (ptr)->msgFlags = 0; \
197} while (0) 253} while (0)
254
198#define SMS_INIT_MSG(ptr, type, len) \ 255#define SMS_INIT_MSG(ptr, type, len) \
199 SMS_INIT_MSG_EX(ptr, type, 0, HIF_TASK, len) 256 SMS_INIT_MSG_EX(ptr, type, 0, HIF_TASK, len)
200 257
258enum SMS_DVB3_EVENTS {
259 DVB3_EVENT_INIT = 0,
260 DVB3_EVENT_SLEEP,
261 DVB3_EVENT_HOTPLUG,
262 DVB3_EVENT_FE_LOCK,
263 DVB3_EVENT_FE_UNLOCK,
264 DVB3_EVENT_UNC_OK,
265 DVB3_EVENT_UNC_ERR
266};
267
201enum SMS_DEVICE_MODE { 268enum SMS_DEVICE_MODE {
202 DEVICE_MODE_NONE = -1, 269 DEVICE_MODE_NONE = -1,
203 DEVICE_MODE_DVBT = 0, 270 DEVICE_MODE_DVBT = 0,
@@ -221,8 +288,13 @@ struct SmsMsgHdr_ST {
221}; 288};
222 289
223struct SmsMsgData_ST { 290struct SmsMsgData_ST {
224 struct SmsMsgHdr_ST xMsgHeader; 291 struct SmsMsgHdr_ST xMsgHeader;
225 u32 msgData[1]; 292 u32 msgData[1];
293};
294
295struct SmsMsgData_ST2 {
296 struct SmsMsgHdr_ST xMsgHeader;
297 u32 msgData[2];
226}; 298};
227 299
228struct SmsDataDownload_ST { 300struct SmsDataDownload_ST {
@@ -238,11 +310,12 @@ struct SmsVersionRes_ST {
238 u8 Step; /* 0 - Step A */ 310 u8 Step; /* 0 - Step A */
239 u8 MetalFix; /* 0 - Metal 0 */ 311 u8 MetalFix; /* 0 - Metal 0 */
240 312
241 u8 FirmwareId; /* 0xFF � ROM, otherwise the 313 /* FirmwareId 0xFF if ROM, otherwise the
242 * value indicated by 314 * value indicated by SMSHOSTLIB_DEVICE_MODES_E */
243 * SMSHOSTLIB_DEVICE_MODES_E */ 315 u8 FirmwareId;
244 u8 SupportedProtocols; /* Bitwise OR combination of 316 /* SupportedProtocols Bitwise OR combination of
245 * supported protocols */ 317 * supported protocols */
318 u8 SupportedProtocols;
246 319
247 u8 VersionMajor; 320 u8 VersionMajor;
248 u8 VersionMinor; 321 u8 VersionMinor;
@@ -264,86 +337,219 @@ struct SmsFirmware_ST {
264 u8 Payload[1]; 337 u8 Payload[1];
265}; 338};
266 339
267struct SMSHOSTLIB_STATISTICS_ST { 340/* Statistics information returned as response for
268 u32 Reserved; /* Reserved */ 341 * SmsHostApiGetStatistics_Req */
342struct SMSHOSTLIB_STATISTICS_S {
343 u32 Reserved; /* Reserved */
269 344
270 /* Common parameters */ 345 /* Common parameters */
271 u32 IsRfLocked; /* 0 - not locked, 1 - locked */ 346 u32 IsRfLocked; /* 0 - not locked, 1 - locked */
272 u32 IsDemodLocked; /* 0 - not locked, 1 - locked */ 347 u32 IsDemodLocked; /* 0 - not locked, 1 - locked */
273 u32 IsExternalLNAOn; /* 0 - external LNA off, 1 - external LNA on */ 348 u32 IsExternalLNAOn; /* 0 - external LNA off, 1 - external LNA on */
274 349
275 /* Reception quality */ 350 /* Reception quality */
276 s32 SNR; /* dB */ 351 s32 SNR; /* dB */
277 u32 BER; /* Post Viterbi BER [1E-5] */ 352 u32 BER; /* Post Viterbi BER [1E-5] */
278 u32 FIB_CRC; /* CRC errors percentage, valid only for DAB */ 353 u32 FIB_CRC; /* CRC errors percentage, valid only for DAB */
279 u32 TS_PER; /* Transport stream PER, 0xFFFFFFFF indicate N/A, 354 u32 TS_PER; /* Transport stream PER,
280 * valid only for DVB-T/H */ 355 0xFFFFFFFF indicate N/A, valid only for DVB-T/H */
281 u32 MFER; /* DVB-H frame error rate in percentage, 356 u32 MFER; /* DVB-H frame error rate in percentage,
282 * 0xFFFFFFFF indicate N/A, valid only for DVB-H */ 357 0xFFFFFFFF indicate N/A, valid only for DVB-H */
283 s32 RSSI; /* dBm */ 358 s32 RSSI; /* dBm */
284 s32 InBandPwr; /* In band power in dBM */ 359 s32 InBandPwr; /* In band power in dBM */
285 s32 CarrierOffset; /* Carrier Offset in bin/1024 */ 360 s32 CarrierOffset; /* Carrier Offset in bin/1024 */
286 361
287 /* Transmission parameters, valid only for DVB-T/H */ 362 /* Transmission parameters */
288 u32 Frequency; /* Frequency in Hz */ 363 u32 Frequency; /* Frequency in Hz */
289 u32 Bandwidth; /* Bandwidth in MHz */ 364 u32 Bandwidth; /* Bandwidth in MHz, valid only for DVB-T/H */
290 u32 TransmissionMode; /* Transmission Mode, for DAB modes 1-4, 365 u32 TransmissionMode; /* Transmission Mode, for DAB modes 1-4,
291 * for DVB-T/H FFT mode carriers in Kilos */ 366 for DVB-T/H FFT mode carriers in Kilos */
292 u32 ModemState; /* from SMS_DvbModemState_ET */ 367 u32 ModemState; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET,
293 u32 GuardInterval; /* Guard Interval, 1 divided by value */ 368 valid only for DVB-T/H */
294 u32 CodeRate; /* Code Rate from SMS_DvbModemState_ET */ 369 u32 GuardInterval; /* Guard Interval from
295 u32 LPCodeRate; /* Low Priority Code Rate from SMS_DvbModemState_ET */ 370 SMSHOSTLIB_GUARD_INTERVALS_ET, valid only for DVB-T/H */
296 u32 Hierarchy; /* Hierarchy from SMS_Hierarchy_ET */ 371 u32 CodeRate; /* Code Rate from SMSHOSTLIB_CODE_RATE_ET,
297 u32 Constellation; /* Constellation from SMS_Constellation_ET */ 372 valid only for DVB-T/H */
373 u32 LPCodeRate; /* Low Priority Code Rate from
374 SMSHOSTLIB_CODE_RATE_ET, valid only for DVB-T/H */
375 u32 Hierarchy; /* Hierarchy from SMSHOSTLIB_HIERARCHY_ET,
376 valid only for DVB-T/H */
377 u32 Constellation; /* Constellation from
378 SMSHOSTLIB_CONSTELLATION_ET, valid only for DVB-T/H */
298 379
299 /* Burst parameters, valid only for DVB-H */ 380 /* Burst parameters, valid only for DVB-H */
300 u32 BurstSize; /* Current burst size in bytes */ 381 u32 BurstSize; /* Current burst size in bytes,
301 u32 BurstDuration; /* Current burst duration in mSec */ 382 valid only for DVB-H */
302 u32 BurstCycleTime; /* Current burst cycle time in mSec */ 383 u32 BurstDuration; /* Current burst duration in mSec,
303 u32 CalculatedBurstCycleTime; /* Current burst cycle time in mSec, 384 valid only for DVB-H */
304 * as calculated by demodulator */ 385 u32 BurstCycleTime; /* Current burst cycle time in mSec,
305 u32 NumOfRows; /* Number of rows in MPE table */ 386 valid only for DVB-H */
306 u32 NumOfPaddCols; /* Number of padding columns in MPE table */ 387 u32 CalculatedBurstCycleTime;/* Current burst cycle time in mSec,
307 u32 NumOfPunctCols; /* Number of puncturing columns in MPE table */ 388 as calculated by demodulator, valid only for DVB-H */
308 /* Burst parameters */ 389 u32 NumOfRows; /* Number of rows in MPE table,
309 u32 ErrorTSPackets; /* Number of erroneous transport-stream packets */ 390 valid only for DVB-H */
310 u32 TotalTSPackets; /* Total number of transport-stream packets */ 391 u32 NumOfPaddCols; /* Number of padding columns in MPE table,
311 u32 NumOfValidMpeTlbs; /* Number of MPE tables which do not include 392 valid only for DVB-H */
312 * errors after MPE RS decoding */ 393 u32 NumOfPunctCols; /* Number of puncturing columns in MPE table,
313 u32 NumOfInvalidMpeTlbs; /* Number of MPE tables which include errors 394 valid only for DVB-H */
314 * after MPE RS decoding */ 395 u32 ErrorTSPackets; /* Number of erroneous
315 u32 NumOfCorrectedMpeTlbs; /* Number of MPE tables which were corrected 396 transport-stream packets */
316 * by MPE RS decoding */ 397 u32 TotalTSPackets; /* Total number of transport-stream packets */
317 398 u32 NumOfValidMpeTlbs; /* Number of MPE tables which do not include
399 errors after MPE RS decoding */
400 u32 NumOfInvalidMpeTlbs;/* Number of MPE tables which include errors
401 after MPE RS decoding */
402 u32 NumOfCorrectedMpeTlbs;/* Number of MPE tables which were
403 corrected by MPE RS decoding */
318 /* Common params */ 404 /* Common params */
319 u32 BERErrorCount; /* Number of errornous SYNC bits. */ 405 u32 BERErrorCount; /* Number of errornous SYNC bits. */
320 u32 BERBitCount; /* Total number of SYNC bits. */ 406 u32 BERBitCount; /* Total number of SYNC bits. */
321 407
322 /* Interface information */ 408 /* Interface information */
323 u32 SmsToHostTxErrors; /* Total number of transmission errors. */ 409 u32 SmsToHostTxErrors; /* Total number of transmission errors. */
324 410
325 /* DAB/T-DMB */ 411 /* DAB/T-DMB */
326 u32 PreBER; /* DAB/T-DMB only: Pre Viterbi BER [1E-5] */ 412 u32 PreBER; /* DAB/T-DMB only: Pre Viterbi BER [1E-5] */
327 413
328 /* DVB-H TPS parameters */ 414 /* DVB-H TPS parameters */
329 u32 CellId; /* TPS Cell ID in bits 15..0, bits 31..16 zero; 415 u32 CellId; /* TPS Cell ID in bits 15..0, bits 31..16 zero;
330 * if set to 0xFFFFFFFF cell_id not yet recovered */ 416 if set to 0xFFFFFFFF cell_id not yet recovered */
417 u32 DvbhSrvIndHP; /* DVB-H service indication info, bit 1 -
418 Time Slicing indicator, bit 0 - MPE-FEC indicator */
419 u32 DvbhSrvIndLP; /* DVB-H service indication info, bit 1 -
420 Time Slicing indicator, bit 0 - MPE-FEC indicator */
331 421
422 u32 NumMPEReceived; /* DVB-H, Num MPE section received */
423
424 u32 ReservedFields[10]; /* Reserved */
332}; 425};
333 426
334struct SmsMsgStatisticsInfo_ST { 427struct PID_STATISTICS_DATA_S {
335 u32 RequestResult; 428 struct PID_BURST_S {
429 u32 size;
430 u32 padding_cols;
431 u32 punct_cols;
432 u32 duration;
433 u32 cycle;
434 u32 calc_cycle;
435 } burst;
436
437 u32 tot_tbl_cnt;
438 u32 invalid_tbl_cnt;
439 u32 tot_cor_tbl;
440};
336 441
337 struct SMSHOSTLIB_STATISTICS_ST Stat; 442struct PID_DATA_S {
443 u32 pid;
444 u32 num_rows;
445 struct PID_STATISTICS_DATA_S pid_statistics;
446};
338 447
339 /* Split the calc of the SNR in DAB */ 448#define CORRECT_STAT_RSSI(_stat) ((_stat).RSSI *= -1)
340 u32 Signal; /* dB */ 449#define CORRECT_STAT_BANDWIDTH(_stat) (_stat.Bandwidth = 8 - _stat.Bandwidth)
341 u32 Noise; /* dB */ 450#define CORRECT_STAT_TRANSMISSON_MODE(_stat) \
451 if (_stat.TransmissionMode == 0) \
452 _stat.TransmissionMode = 2; \
453 else if (_stat.TransmissionMode == 1) \
454 _stat.TransmissionMode = 8; \
455 else \
456 _stat.TransmissionMode = 4;
457
458struct TRANSMISSION_STATISTICS_S {
459 u32 Frequency; /* Frequency in Hz */
460 u32 Bandwidth; /* Bandwidth in MHz */
461 u32 TransmissionMode; /* FFT mode carriers in Kilos */
462 u32 GuardInterval; /* Guard Interval from
463 SMSHOSTLIB_GUARD_INTERVALS_ET */
464 u32 CodeRate; /* Code Rate from SMSHOSTLIB_CODE_RATE_ET */
465 u32 LPCodeRate; /* Low Priority Code Rate from
466 SMSHOSTLIB_CODE_RATE_ET */
467 u32 Hierarchy; /* Hierarchy from SMSHOSTLIB_HIERARCHY_ET */
468 u32 Constellation; /* Constellation from
469 SMSHOSTLIB_CONSTELLATION_ET */
342 470
471 /* DVB-H TPS parameters */
472 u32 CellId; /* TPS Cell ID in bits 15..0, bits 31..16 zero;
473 if set to 0xFFFFFFFF cell_id not yet recovered */
474 u32 DvbhSrvIndHP; /* DVB-H service indication info, bit 1 -
475 Time Slicing indicator, bit 0 - MPE-FEC indicator */
476 u32 DvbhSrvIndLP; /* DVB-H service indication info, bit 1 -
477 Time Slicing indicator, bit 0 - MPE-FEC indicator */
478 u32 IsDemodLocked; /* 0 - not locked, 1 - locked */
343}; 479};
344 480
481struct RECEPTION_STATISTICS_S {
482 u32 IsRfLocked; /* 0 - not locked, 1 - locked */
483 u32 IsDemodLocked; /* 0 - not locked, 1 - locked */
484 u32 IsExternalLNAOn; /* 0 - external LNA off, 1 - external LNA on */
485
486 u32 ModemState; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */
487 s32 SNR; /* dB */
488 u32 BER; /* Post Viterbi BER [1E-5] */
489 u32 BERErrorCount; /* Number of erronous SYNC bits. */
490 u32 BERBitCount; /* Total number of SYNC bits. */
491 u32 TS_PER; /* Transport stream PER,
492 0xFFFFFFFF indicate N/A */
493 u32 MFER; /* DVB-H frame error rate in percentage,
494 0xFFFFFFFF indicate N/A, valid only for DVB-H */
495 s32 RSSI; /* dBm */
496 s32 InBandPwr; /* In band power in dBM */
497 s32 CarrierOffset; /* Carrier Offset in bin/1024 */
498 u32 ErrorTSPackets; /* Number of erroneous
499 transport-stream packets */
500 u32 TotalTSPackets; /* Total number of transport-stream packets */
501
502 s32 MRC_SNR; /* dB */
503 s32 MRC_RSSI; /* dBm */
504 s32 MRC_InBandPwr; /* In band power in dBM */
505};
345 506
346struct smscore_gpio_config { 507
508/* Statistics information returned as response for
509 * SmsHostApiGetStatisticsEx_Req for DVB applications, SMS1100 and up */
510struct SMSHOSTLIB_STATISTICS_DVB_S {
511 /* Reception */
512 struct RECEPTION_STATISTICS_S ReceptionData;
513
514 /* Transmission parameters */
515 struct TRANSMISSION_STATISTICS_S TransmissionData;
516
517 /* Burst parameters, valid only for DVB-H */
518#define SRVM_MAX_PID_FILTERS 8
519 struct PID_DATA_S PidData[SRVM_MAX_PID_FILTERS];
520};
521
522struct SRVM_SIGNAL_STATUS_S {
523 u32 result;
524 u32 snr;
525 u32 tsPackets;
526 u32 etsPackets;
527 u32 constellation;
528 u32 hpCode;
529 u32 tpsSrvIndLP;
530 u32 tpsSrvIndHP;
531 u32 cellId;
532 u32 reason;
533
534 s32 inBandPower;
535 u32 requestId;
536};
537
538struct SMSHOSTLIB_I2C_REQ_ST {
539 u32 DeviceAddress; /* I2c device address */
540 u32 WriteCount; /* number of bytes to write */
541 u32 ReadCount; /* number of bytes to read */
542 u8 Data[1];
543};
544
545struct SMSHOSTLIB_I2C_RES_ST {
546 u32 Status; /* non-zero value in case of failure */
547 u32 ReadCount; /* number of bytes read */
548 u8 Data[1];
549};
550
551
552struct smscore_config_gpio {
347#define SMS_GPIO_DIRECTION_INPUT 0 553#define SMS_GPIO_DIRECTION_INPUT 0
348#define SMS_GPIO_DIRECTION_OUTPUT 1 554#define SMS_GPIO_DIRECTION_OUTPUT 1
349 u8 direction; 555 u8 direction;
@@ -369,6 +575,47 @@ struct smscore_gpio_config {
369 u8 outputdriving; 575 u8 outputdriving;
370}; 576};
371 577
578struct smscore_gpio_config {
579#define SMS_GPIO_DIRECTION_INPUT 0
580#define SMS_GPIO_DIRECTION_OUTPUT 1
581 u8 Direction;
582
583#define SMS_GPIO_PULL_UP_DOWN_NONE 0
584#define SMS_GPIO_PULL_UP_DOWN_PULLDOWN 1
585#define SMS_GPIO_PULL_UP_DOWN_PULLUP 2
586#define SMS_GPIO_PULL_UP_DOWN_KEEPER 3
587 u8 PullUpDown;
588
589#define SMS_GPIO_INPUT_CHARACTERISTICS_NORMAL 0
590#define SMS_GPIO_INPUT_CHARACTERISTICS_SCHMITT 1
591 u8 InputCharacteristics;
592
593#define SMS_GPIO_OUTPUT_SLEW_RATE_SLOW 1 /* 10xx */
594#define SMS_GPIO_OUTPUT_SLEW_RATE_FAST 0 /* 10xx */
595
596
597#define SMS_GPIO_OUTPUT_SLEW_RATE_0_45_V_NS 0 /* 11xx */
598#define SMS_GPIO_OUTPUT_SLEW_RATE_0_9_V_NS 1 /* 11xx */
599#define SMS_GPIO_OUTPUT_SLEW_RATE_1_7_V_NS 2 /* 11xx */
600#define SMS_GPIO_OUTPUT_SLEW_RATE_3_3_V_NS 3 /* 11xx */
601 u8 OutputSlewRate;
602
603#define SMS_GPIO_OUTPUT_DRIVING_S_4mA 0 /* 10xx */
604#define SMS_GPIO_OUTPUT_DRIVING_S_8mA 1 /* 10xx */
605#define SMS_GPIO_OUTPUT_DRIVING_S_12mA 2 /* 10xx */
606#define SMS_GPIO_OUTPUT_DRIVING_S_16mA 3 /* 10xx */
607
608#define SMS_GPIO_OUTPUT_DRIVING_1_5mA 0 /* 11xx */
609#define SMS_GPIO_OUTPUT_DRIVING_2_8mA 1 /* 11xx */
610#define SMS_GPIO_OUTPUT_DRIVING_4mA 2 /* 11xx */
611#define SMS_GPIO_OUTPUT_DRIVING_7mA 3 /* 11xx */
612#define SMS_GPIO_OUTPUT_DRIVING_10mA 4 /* 11xx */
613#define SMS_GPIO_OUTPUT_DRIVING_11mA 5 /* 11xx */
614#define SMS_GPIO_OUTPUT_DRIVING_14mA 6 /* 11xx */
615#define SMS_GPIO_OUTPUT_DRIVING_16mA 7 /* 11xx */
616 u8 OutputDriving;
617};
618
372extern void smscore_registry_setmode(char *devpath, int mode); 619extern void smscore_registry_setmode(char *devpath, int mode);
373extern int smscore_registry_getmode(char *devpath); 620extern int smscore_registry_getmode(char *devpath);
374 621
@@ -410,10 +657,19 @@ struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev);
410extern void smscore_putbuffer(struct smscore_device_t *coredev, 657extern void smscore_putbuffer(struct smscore_device_t *coredev,
411 struct smscore_buffer_t *cb); 658 struct smscore_buffer_t *cb);
412 659
660/* old GPIO managment */
413int smscore_configure_gpio(struct smscore_device_t *coredev, u32 pin, 661int smscore_configure_gpio(struct smscore_device_t *coredev, u32 pin,
414 struct smscore_gpio_config *pinconfig); 662 struct smscore_config_gpio *pinconfig);
415int smscore_set_gpio(struct smscore_device_t *coredev, u32 pin, int level); 663int smscore_set_gpio(struct smscore_device_t *coredev, u32 pin, int level);
416 664
665/* new GPIO managment */
666extern int smscore_gpio_configure(struct smscore_device_t *coredev, u8 PinNum,
667 struct smscore_gpio_config *pGpioConfig);
668extern int smscore_gpio_set_level(struct smscore_device_t *coredev, u8 PinNum,
669 u8 NewLevel);
670extern int smscore_gpio_get_level(struct smscore_device_t *coredev, u8 PinNum,
671 u8 *level);
672
417void smscore_set_board_id(struct smscore_device_t *core, int id); 673void smscore_set_board_id(struct smscore_device_t *core, int id);
418int smscore_get_board_id(struct smscore_device_t *core); 674int smscore_get_board_id(struct smscore_device_t *core);
419 675
@@ -442,4 +698,4 @@ int smscore_led_state(struct smscore_device_t *core, int led);
442 dprintk(KERN_DEBUG, DBG_ADV, fmt, ##arg) 698 dprintk(KERN_DEBUG, DBG_ADV, fmt, ##arg)
443 699
444 700
445#endif /* __smscoreapi_h__ */ 701#endif /* __SMS_CORE_API_H__ */
diff --git a/drivers/media/dvb/siano/smsdvb.c b/drivers/media/dvb/siano/smsdvb.c
index ba080b95befb..3ee1c3902c56 100644
--- a/drivers/media/dvb/siano/smsdvb.c
+++ b/drivers/media/dvb/siano/smsdvb.c
@@ -1,28 +1,34 @@
1/* 1/****************************************************************
2 * Driver for the Siano SMS1xxx USB dongle 2
3 * 3Siano Mobile Silicon, Inc.
4 * Author: Uri Shkolni 4MDTV receiver kernel modules.
5 * 5Copyright (C) 2006-2008, Uri Shkolnik
6 * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc. 6
7 * 7This program is free software: you can redistribute it and/or modify
8 * This program is free software; you can redistribute it and/or modify 8it under the terms of the GNU General Public License as published by
9 * it under the terms of the GNU General Public License version 2 as 9the Free Software Foundation, either version 2 of the License, or
10 * published by the Free Software Foundation; 10(at your option) any later version.
11 * 11
12 * Software distributed under the License is distributed on an "AS IS" 12 This program is distributed in the hope that it will be useful,
13 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. 13but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * 14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * See the GNU General Public License for more details. 15GNU General Public License for more details.
16 * 16
17 * You should have received a copy of the GNU General Public License 17You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software 18along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19
20 */ 20****************************************************************/
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/init.h> 23#include <linux/init.h>
24 24
25#include "dmxdev.h"
26#include "dvbdev.h"
27#include "dvb_demux.h"
28#include "dvb_frontend.h"
29
25#include "smscoreapi.h" 30#include "smscoreapi.h"
31#include "smsendian.h"
26#include "sms-cards.h" 32#include "sms-cards.h"
27 33
28DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 34DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
@@ -39,12 +45,15 @@ struct smsdvb_client_t {
39 struct dvb_frontend frontend; 45 struct dvb_frontend frontend;
40 46
41 fe_status_t fe_status; 47 fe_status_t fe_status;
42 int fe_ber, fe_snr, fe_unc, fe_signal_strength;
43 48
44 struct completion tune_done, stat_done; 49 struct completion tune_done;
45 50
46 /* todo: save freq/band instead whole struct */ 51 /* todo: save freq/band instead whole struct */
47 struct dvb_frontend_parameters fe_params; 52 struct dvb_frontend_parameters fe_params;
53
54 struct SMSHOSTLIB_STATISTICS_DVB_S sms_stat_dvb;
55 int event_fe_state;
56 int event_unc_state;
48}; 57};
49 58
50static struct list_head g_smsdvb_clients; 59static struct list_head g_smsdvb_clients;
@@ -54,11 +63,69 @@ static int sms_dbg;
54module_param_named(debug, sms_dbg, int, 0644); 63module_param_named(debug, sms_dbg, int, 0644);
55MODULE_PARM_DESC(debug, "set debug level (info=1, adv=2 (or-able))"); 64MODULE_PARM_DESC(debug, "set debug level (info=1, adv=2 (or-able))");
56 65
66/* Events that may come from DVB v3 adapter */
67static void sms_board_dvb3_event(struct smsdvb_client_t *client,
68 enum SMS_DVB3_EVENTS event) {
69
70 struct smscore_device_t *coredev = client->coredev;
71 switch (event) {
72 case DVB3_EVENT_INIT:
73 sms_debug("DVB3_EVENT_INIT");
74 sms_board_event(coredev, BOARD_EVENT_BIND);
75 break;
76 case DVB3_EVENT_SLEEP:
77 sms_debug("DVB3_EVENT_SLEEP");
78 sms_board_event(coredev, BOARD_EVENT_POWER_SUSPEND);
79 break;
80 case DVB3_EVENT_HOTPLUG:
81 sms_debug("DVB3_EVENT_HOTPLUG");
82 sms_board_event(coredev, BOARD_EVENT_POWER_INIT);
83 break;
84 case DVB3_EVENT_FE_LOCK:
85 if (client->event_fe_state != DVB3_EVENT_FE_LOCK) {
86 client->event_fe_state = DVB3_EVENT_FE_LOCK;
87 sms_debug("DVB3_EVENT_FE_LOCK");
88 sms_board_event(coredev, BOARD_EVENT_FE_LOCK);
89 }
90 break;
91 case DVB3_EVENT_FE_UNLOCK:
92 if (client->event_fe_state != DVB3_EVENT_FE_UNLOCK) {
93 client->event_fe_state = DVB3_EVENT_FE_UNLOCK;
94 sms_debug("DVB3_EVENT_FE_UNLOCK");
95 sms_board_event(coredev, BOARD_EVENT_FE_UNLOCK);
96 }
97 break;
98 case DVB3_EVENT_UNC_OK:
99 if (client->event_unc_state != DVB3_EVENT_UNC_OK) {
100 client->event_unc_state = DVB3_EVENT_UNC_OK;
101 sms_debug("DVB3_EVENT_UNC_OK");
102 sms_board_event(coredev, BOARD_EVENT_MULTIPLEX_OK);
103 }
104 break;
105 case DVB3_EVENT_UNC_ERR:
106 if (client->event_unc_state != DVB3_EVENT_UNC_ERR) {
107 client->event_unc_state = DVB3_EVENT_UNC_ERR;
108 sms_debug("DVB3_EVENT_UNC_ERR");
109 sms_board_event(coredev, BOARD_EVENT_MULTIPLEX_ERRORS);
110 }
111 break;
112
113 default:
114 sms_err("Unknown dvb3 api event");
115 break;
116 }
117}
118
57static int smsdvb_onresponse(void *context, struct smscore_buffer_t *cb) 119static int smsdvb_onresponse(void *context, struct smscore_buffer_t *cb)
58{ 120{
59 struct smsdvb_client_t *client = (struct smsdvb_client_t *) context; 121 struct smsdvb_client_t *client = (struct smsdvb_client_t *) context;
60 struct SmsMsgHdr_ST *phdr = 122 struct SmsMsgHdr_ST *phdr = (struct SmsMsgHdr_ST *) (((u8 *) cb->p)
61 (struct SmsMsgHdr_ST *)(((u8 *) cb->p) + cb->offset); 123 + cb->offset);
124 u32 *pMsgData = (u32 *) phdr + 1;
125 /*u32 MsgDataLen = phdr->msgLength - sizeof(struct SmsMsgHdr_ST);*/
126 bool is_status_update = false;
127
128 smsendian_handle_rx_message((struct SmsMsgData_ST *) phdr);
62 129
63 switch (phdr->msgType) { 130 switch (phdr->msgType) {
64 case MSG_SMS_DVBT_BDA_DATA: 131 case MSG_SMS_DVBT_BDA_DATA:
@@ -70,43 +137,110 @@ static int smsdvb_onresponse(void *context, struct smscore_buffer_t *cb)
70 complete(&client->tune_done); 137 complete(&client->tune_done);
71 break; 138 break;
72 139
73 case MSG_SMS_GET_STATISTICS_RES: 140 case MSG_SMS_SIGNAL_DETECTED_IND:
74 { 141 sms_info("MSG_SMS_SIGNAL_DETECTED_IND");
75 struct SmsMsgStatisticsInfo_ST *p = 142 client->sms_stat_dvb.TransmissionData.IsDemodLocked = true;
76 (struct SmsMsgStatisticsInfo_ST *)(phdr + 1); 143 is_status_update = true;
77 144 break;
78 if (p->Stat.IsDemodLocked) { 145
79 client->fe_status = FE_HAS_SIGNAL | 146 case MSG_SMS_NO_SIGNAL_IND:
80 FE_HAS_CARRIER | 147 sms_info("MSG_SMS_NO_SIGNAL_IND");
81 FE_HAS_VITERBI | 148 client->sms_stat_dvb.TransmissionData.IsDemodLocked = false;
82 FE_HAS_SYNC | 149 is_status_update = true;
83 FE_HAS_LOCK; 150 break;
84 151
85 client->fe_snr = p->Stat.SNR; 152 case MSG_SMS_TRANSMISSION_IND: {
86 client->fe_ber = p->Stat.BER; 153 sms_info("MSG_SMS_TRANSMISSION_IND");
87 client->fe_unc = p->Stat.BERErrorCount; 154
88 155 pMsgData++;
89 if (p->Stat.InBandPwr < -95) 156 memcpy(&client->sms_stat_dvb.TransmissionData, pMsgData,
90 client->fe_signal_strength = 0; 157 sizeof(struct TRANSMISSION_STATISTICS_S));
91 else if (p->Stat.InBandPwr > -29) 158
92 client->fe_signal_strength = 100; 159 /* Mo need to correct guard interval
93 else 160 * (as opposed to old statistics message).
94 client->fe_signal_strength = 161 */
95 (p->Stat.InBandPwr + 95) * 3 / 2; 162 CORRECT_STAT_BANDWIDTH(client->sms_stat_dvb.TransmissionData);
163 CORRECT_STAT_TRANSMISSON_MODE(
164 client->sms_stat_dvb.TransmissionData);
165 is_status_update = true;
166 break;
167 }
168 case MSG_SMS_HO_PER_SLICES_IND: {
169 struct RECEPTION_STATISTICS_S *pReceptionData =
170 &client->sms_stat_dvb.ReceptionData;
171 struct SRVM_SIGNAL_STATUS_S SignalStatusData;
172
173 /*sms_info("MSG_SMS_HO_PER_SLICES_IND");*/
174 pMsgData++;
175 SignalStatusData.result = pMsgData[0];
176 SignalStatusData.snr = pMsgData[1];
177 SignalStatusData.inBandPower = (s32) pMsgData[2];
178 SignalStatusData.tsPackets = pMsgData[3];
179 SignalStatusData.etsPackets = pMsgData[4];
180 SignalStatusData.constellation = pMsgData[5];
181 SignalStatusData.hpCode = pMsgData[6];
182 SignalStatusData.tpsSrvIndLP = pMsgData[7] & 0x03;
183 SignalStatusData.tpsSrvIndHP = pMsgData[8] & 0x03;
184 SignalStatusData.cellId = pMsgData[9] & 0xFFFF;
185 SignalStatusData.reason = pMsgData[10];
186 SignalStatusData.requestId = pMsgData[11];
187 pReceptionData->IsRfLocked = pMsgData[16];
188 pReceptionData->IsDemodLocked = pMsgData[17];
189 pReceptionData->ModemState = pMsgData[12];
190 pReceptionData->SNR = pMsgData[1];
191 pReceptionData->BER = pMsgData[13];
192 pReceptionData->RSSI = pMsgData[14];
193 CORRECT_STAT_RSSI(client->sms_stat_dvb.ReceptionData);
194
195 pReceptionData->InBandPwr = (s32) pMsgData[2];
196 pReceptionData->CarrierOffset = (s32) pMsgData[15];
197 pReceptionData->TotalTSPackets = pMsgData[3];
198 pReceptionData->ErrorTSPackets = pMsgData[4];
199
200 /* TS PER */
201 if ((SignalStatusData.tsPackets + SignalStatusData.etsPackets)
202 > 0) {
203 pReceptionData->TS_PER = (SignalStatusData.etsPackets
204 * 100) / (SignalStatusData.tsPackets
205 + SignalStatusData.etsPackets);
96 } else { 206 } else {
97 client->fe_status = 0; 207 pReceptionData->TS_PER = 0;
98 client->fe_snr =
99 client->fe_ber =
100 client->fe_unc =
101 client->fe_signal_strength = 0;
102 } 208 }
103 209
104 complete(&client->stat_done); 210 pReceptionData->BERBitCount = pMsgData[18];
105 break; 211 pReceptionData->BERErrorCount = pMsgData[19];
106 } }
107 212
213 pReceptionData->MRC_SNR = pMsgData[20];
214 pReceptionData->MRC_InBandPwr = pMsgData[21];
215 pReceptionData->MRC_RSSI = pMsgData[22];
216
217 is_status_update = true;
218 break;
219 }
220 }
108 smscore_putbuffer(client->coredev, cb); 221 smscore_putbuffer(client->coredev, cb);
109 222
223 if (is_status_update) {
224 if (client->sms_stat_dvb.ReceptionData.IsDemodLocked) {
225 client->fe_status = FE_HAS_SIGNAL | FE_HAS_CARRIER
226 | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
227 sms_board_dvb3_event(client, DVB3_EVENT_FE_LOCK);
228 if (client->sms_stat_dvb.ReceptionData.ErrorTSPackets
229 == 0)
230 sms_board_dvb3_event(client, DVB3_EVENT_UNC_OK);
231 else
232 sms_board_dvb3_event(client,
233 DVB3_EVENT_UNC_ERR);
234
235 } else {
236 /*client->fe_status =
237 (phdr->msgType == MSG_SMS_NO_SIGNAL_IND) ?
238 0 : FE_HAS_SIGNAL;*/
239 client->fe_status = 0;
240 sms_board_dvb3_event(client, DVB3_EVENT_FE_UNLOCK);
241 }
242 }
243
110 return 0; 244 return 0;
111} 245}
112 246
@@ -149,6 +283,7 @@ static int smsdvb_start_feed(struct dvb_demux_feed *feed)
149 PidMsg.xMsgHeader.msgLength = sizeof(PidMsg); 283 PidMsg.xMsgHeader.msgLength = sizeof(PidMsg);
150 PidMsg.msgData[0] = feed->pid; 284 PidMsg.msgData[0] = feed->pid;
151 285
286 smsendian_handle_tx_message((struct SmsMsgHdr_ST *)&PidMsg);
152 return smsclient_sendrequest(client->smsclient, 287 return smsclient_sendrequest(client->smsclient,
153 &PidMsg, sizeof(PidMsg)); 288 &PidMsg, sizeof(PidMsg));
154} 289}
@@ -169,6 +304,7 @@ static int smsdvb_stop_feed(struct dvb_demux_feed *feed)
169 PidMsg.xMsgHeader.msgLength = sizeof(PidMsg); 304 PidMsg.xMsgHeader.msgLength = sizeof(PidMsg);
170 PidMsg.msgData[0] = feed->pid; 305 PidMsg.msgData[0] = feed->pid;
171 306
307 smsendian_handle_tx_message((struct SmsMsgHdr_ST *)&PidMsg);
172 return smsclient_sendrequest(client->smsclient, 308 return smsclient_sendrequest(client->smsclient,
173 &PidMsg, sizeof(PidMsg)); 309 &PidMsg, sizeof(PidMsg));
174} 310}
@@ -177,7 +313,10 @@ static int smsdvb_sendrequest_and_wait(struct smsdvb_client_t *client,
177 void *buffer, size_t size, 313 void *buffer, size_t size,
178 struct completion *completion) 314 struct completion *completion)
179{ 315{
180 int rc = smsclient_sendrequest(client->smsclient, buffer, size); 316 int rc;
317
318 smsendian_handle_tx_message((struct SmsMsgHdr_ST *)buffer);
319 rc = smsclient_sendrequest(client->smsclient, buffer, size);
181 if (rc < 0) 320 if (rc < 0)
182 return rc; 321 return rc;
183 322
@@ -186,83 +325,61 @@ static int smsdvb_sendrequest_and_wait(struct smsdvb_client_t *client,
186 0 : -ETIME; 325 0 : -ETIME;
187} 326}
188 327
189static int smsdvb_send_statistics_request(struct smsdvb_client_t *client)
190{
191 struct SmsMsgHdr_ST Msg = { MSG_SMS_GET_STATISTICS_REQ,
192 DVBT_BDA_CONTROL_MSG_ID,
193 HIF_TASK, sizeof(struct SmsMsgHdr_ST), 0 };
194 int ret = smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg),
195 &client->stat_done);
196 if (ret < 0)
197 return ret;
198
199 if (client->fe_status & FE_HAS_LOCK)
200 sms_board_led_feedback(client->coredev,
201 (client->fe_unc == 0) ?
202 SMS_LED_HI : SMS_LED_LO);
203 else
204 sms_board_led_feedback(client->coredev, SMS_LED_OFF);
205 return ret;
206}
207
208static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat) 328static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat)
209{ 329{
210 struct smsdvb_client_t *client = 330 struct smsdvb_client_t *client;
211 container_of(fe, struct smsdvb_client_t, frontend); 331 client = container_of(fe, struct smsdvb_client_t, frontend);
212 int rc = smsdvb_send_statistics_request(client);
213 332
214 if (!rc) 333 *stat = client->fe_status;
215 *stat = client->fe_status;
216 334
217 return rc; 335 return 0;
218} 336}
219 337
220static int smsdvb_read_ber(struct dvb_frontend *fe, u32 *ber) 338static int smsdvb_read_ber(struct dvb_frontend *fe, u32 *ber)
221{ 339{
222 struct smsdvb_client_t *client = 340 struct smsdvb_client_t *client;
223 container_of(fe, struct smsdvb_client_t, frontend); 341 client = container_of(fe, struct smsdvb_client_t, frontend);
224 int rc = smsdvb_send_statistics_request(client);
225 342
226 if (!rc) 343 *ber = client->sms_stat_dvb.ReceptionData.BER;
227 *ber = client->fe_ber;
228 344
229 return rc; 345 return 0;
230} 346}
231 347
232static int smsdvb_read_signal_strength(struct dvb_frontend *fe, u16 *strength) 348static int smsdvb_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
233{ 349{
234 struct smsdvb_client_t *client = 350 struct smsdvb_client_t *client;
235 container_of(fe, struct smsdvb_client_t, frontend); 351 client = container_of(fe, struct smsdvb_client_t, frontend);
236 int rc = smsdvb_send_statistics_request(client);
237 352
238 if (!rc) 353 if (client->sms_stat_dvb.ReceptionData.InBandPwr < -95)
239 *strength = client->fe_signal_strength; 354 *strength = 0;
355 else if (client->sms_stat_dvb.ReceptionData.InBandPwr > -29)
356 *strength = 100;
357 else
358 *strength =
359 (client->sms_stat_dvb.ReceptionData.InBandPwr
360 + 95) * 3 / 2;
240 361
241 return rc; 362 return 0;
242} 363}
243 364
244static int smsdvb_read_snr(struct dvb_frontend *fe, u16 *snr) 365static int smsdvb_read_snr(struct dvb_frontend *fe, u16 *snr)
245{ 366{
246 struct smsdvb_client_t *client = 367 struct smsdvb_client_t *client;
247 container_of(fe, struct smsdvb_client_t, frontend); 368 client = container_of(fe, struct smsdvb_client_t, frontend);
248 int rc = smsdvb_send_statistics_request(client);
249 369
250 if (!rc) 370 *snr = client->sms_stat_dvb.ReceptionData.SNR;
251 *snr = client->fe_snr;
252 371
253 return rc; 372 return 0;
254} 373}
255 374
256static int smsdvb_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) 375static int smsdvb_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
257{ 376{
258 struct smsdvb_client_t *client = 377 struct smsdvb_client_t *client;
259 container_of(fe, struct smsdvb_client_t, frontend); 378 client = container_of(fe, struct smsdvb_client_t, frontend);
260 int rc = smsdvb_send_statistics_request(client);
261 379
262 if (!rc) 380 *ucblocks = client->sms_stat_dvb.ReceptionData.ErrorTSPackets;
263 *ucblocks = client->fe_unc;
264 381
265 return rc; 382 return 0;
266} 383}
267 384
268static int smsdvb_get_tune_settings(struct dvb_frontend *fe, 385static int smsdvb_get_tune_settings(struct dvb_frontend *fe,
@@ -286,12 +403,15 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe,
286 struct SmsMsgHdr_ST Msg; 403 struct SmsMsgHdr_ST Msg;
287 u32 Data[3]; 404 u32 Data[3];
288 } Msg; 405 } Msg;
289 int ret;
290 406
291 Msg.Msg.msgSrcId = DVBT_BDA_CONTROL_MSG_ID; 407 client->fe_status = FE_HAS_SIGNAL;
292 Msg.Msg.msgDstId = HIF_TASK; 408 client->event_fe_state = -1;
293 Msg.Msg.msgFlags = 0; 409 client->event_unc_state = -1;
294 Msg.Msg.msgType = MSG_SMS_RF_TUNE_REQ; 410
411 Msg.Msg.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
412 Msg.Msg.msgDstId = HIF_TASK;
413 Msg.Msg.msgFlags = 0;
414 Msg.Msg.msgType = MSG_SMS_RF_TUNE_REQ;
295 Msg.Msg.msgLength = sizeof(Msg); 415 Msg.Msg.msgLength = sizeof(Msg);
296 Msg.Data[0] = fep->frequency; 416 Msg.Data[0] = fep->frequency;
297 Msg.Data[2] = 12000000; 417 Msg.Data[2] = 12000000;
@@ -307,24 +427,6 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe,
307 default: return -EINVAL; 427 default: return -EINVAL;
308 } 428 }
309 429
310 /* Disable LNA, if any. An error is returned if no LNA is present */
311 ret = sms_board_lna_control(client->coredev, 0);
312 if (ret == 0) {
313 fe_status_t status;
314
315 /* tune with LNA off at first */
316 ret = smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg),
317 &client->tune_done);
318
319 smsdvb_read_status(fe, &status);
320
321 if (status & FE_HAS_LOCK)
322 return ret;
323
324 /* previous tune didnt lock - enable LNA and tune again */
325 sms_board_lna_control(client->coredev, 1);
326 }
327
328 return smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg), 430 return smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg),
329 &client->tune_done); 431 &client->tune_done);
330} 432}
@@ -349,8 +451,7 @@ static int smsdvb_init(struct dvb_frontend *fe)
349 struct smsdvb_client_t *client = 451 struct smsdvb_client_t *client =
350 container_of(fe, struct smsdvb_client_t, frontend); 452 container_of(fe, struct smsdvb_client_t, frontend);
351 453
352 sms_board_power(client->coredev, 1); 454 sms_board_dvb3_event(client, DVB3_EVENT_INIT);
353
354 return 0; 455 return 0;
355} 456}
356 457
@@ -359,8 +460,7 @@ static int smsdvb_sleep(struct dvb_frontend *fe)
359 struct smsdvb_client_t *client = 460 struct smsdvb_client_t *client =
360 container_of(fe, struct smsdvb_client_t, frontend); 461 container_of(fe, struct smsdvb_client_t, frontend);
361 462
362 sms_board_led_feedback(client->coredev, SMS_LED_OFF); 463 sms_board_dvb3_event(client, DVB3_EVENT_SLEEP);
363 sms_board_power(client->coredev, 0);
364 464
365 return 0; 465 return 0;
366} 466}
@@ -485,7 +585,6 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
485 client->coredev = coredev; 585 client->coredev = coredev;
486 586
487 init_completion(&client->tune_done); 587 init_completion(&client->tune_done);
488 init_completion(&client->stat_done);
489 588
490 kmutex_lock(&g_smsdvb_clientslock); 589 kmutex_lock(&g_smsdvb_clientslock);
491 590
@@ -493,8 +592,11 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
493 592
494 kmutex_unlock(&g_smsdvb_clientslock); 593 kmutex_unlock(&g_smsdvb_clientslock);
495 594
496 sms_info("success"); 595 client->event_fe_state = -1;
596 client->event_unc_state = -1;
597 sms_board_dvb3_event(client, DVB3_EVENT_HOTPLUG);
497 598
599 sms_info("success");
498 sms_board_setup(coredev); 600 sms_board_setup(coredev);
499 601
500 return 0; 602 return 0;
@@ -547,5 +649,5 @@ module_init(smsdvb_module_init);
547module_exit(smsdvb_module_exit); 649module_exit(smsdvb_module_exit);
548 650
549MODULE_DESCRIPTION("SMS DVB subsystem adaptation module"); 651MODULE_DESCRIPTION("SMS DVB subsystem adaptation module");
550MODULE_AUTHOR("Siano Mobile Silicon, INC. (uris@siano-ms.com)"); 652MODULE_AUTHOR("Siano Mobile Silicon, Inc. (uris@siano-ms.com)");
551MODULE_LICENSE("GPL"); 653MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/siano/smsendian.c b/drivers/media/dvb/siano/smsendian.c
new file mode 100644
index 000000000000..457b6d02ef85
--- /dev/null
+++ b/drivers/media/dvb/siano/smsendian.c
@@ -0,0 +1,102 @@
1/****************************************************************
2
3 Siano Mobile Silicon, Inc.
4 MDTV receiver kernel modules.
5 Copyright (C) 2006-2009, Uri Shkolnik
6
7 This program is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
19
20 ****************************************************************/
21
22#include <asm/byteorder.h>
23
24#include "smsendian.h"
25#include "smscoreapi.h"
26
27void smsendian_handle_tx_message(void *buffer)
28{
29#ifdef __BIG_ENDIAN
30 struct SmsMsgData_ST *msg = (struct SmsMsgData_ST *)buffer;
31 int i;
32 int msgWords;
33
34 switch (msg->xMsgHeader.msgType) {
35 case MSG_SMS_DATA_DOWNLOAD_REQ:
36 {
37 msg->msgData[0] = le32_to_cpu(msg->msgData[0]);
38 break;
39 }
40
41 default:
42 msgWords = (msg->xMsgHeader.msgLength -
43 sizeof(struct SmsMsgHdr_ST))/4;
44
45 for (i = 0; i < msgWords; i++)
46 msg->msgData[i] = le32_to_cpu(msg->msgData[i]);
47
48 break;
49 }
50#endif /* __BIG_ENDIAN */
51}
52EXPORT_SYMBOL_GPL(smsendian_handle_tx_message);
53
54void smsendian_handle_rx_message(void *buffer)
55{
56#ifdef __BIG_ENDIAN
57 struct SmsMsgData_ST *msg = (struct SmsMsgData_ST *)buffer;
58 int i;
59 int msgWords;
60
61 switch (msg->xMsgHeader.msgType) {
62 case MSG_SMS_GET_VERSION_EX_RES:
63 {
64 struct SmsVersionRes_ST *ver =
65 (struct SmsVersionRes_ST *) msg;
66 ver->ChipModel = le16_to_cpu(ver->ChipModel);
67 break;
68 }
69
70 case MSG_SMS_DVBT_BDA_DATA:
71 case MSG_SMS_DAB_CHANNEL:
72 case MSG_SMS_DATA_MSG:
73 {
74 break;
75 }
76
77 default:
78 {
79 msgWords = (msg->xMsgHeader.msgLength -
80 sizeof(struct SmsMsgHdr_ST))/4;
81
82 for (i = 0; i < msgWords; i++)
83 msg->msgData[i] = le32_to_cpu(msg->msgData[i]);
84
85 break;
86 }
87 }
88#endif /* __BIG_ENDIAN */
89}
90EXPORT_SYMBOL_GPL(smsendian_handle_rx_message);
91
92void smsendian_handle_message_header(void *msg)
93{
94#ifdef __BIG_ENDIAN
95 struct SmsMsgHdr_ST *phdr = (struct SmsMsgHdr_ST *)msg;
96
97 phdr->msgType = le16_to_cpu(phdr->msgType);
98 phdr->msgLength = le16_to_cpu(phdr->msgLength);
99 phdr->msgFlags = le16_to_cpu(phdr->msgFlags);
100#endif /* __BIG_ENDIAN */
101}
102EXPORT_SYMBOL_GPL(smsendian_handle_message_header);
diff --git a/drivers/media/dvb/siano/smsendian.h b/drivers/media/dvb/siano/smsendian.h
new file mode 100644
index 000000000000..1624d6fd367b
--- /dev/null
+++ b/drivers/media/dvb/siano/smsendian.h
@@ -0,0 +1,32 @@
1/****************************************************************
2
3Siano Mobile Silicon, Inc.
4MDTV receiver kernel modules.
5Copyright (C) 2006-2009, Uri Shkolnik
6
7This program is free software: you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation, either version 2 of the License, or
10(at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with this program. If not, see <http://www.gnu.org/licenses/>.
19
20****************************************************************/
21
22#ifndef __SMS_ENDIAN_H__
23#define __SMS_ENDIAN_H__
24
25#include <asm/byteorder.h>
26
27extern void smsendian_handle_tx_message(void *buffer);
28extern void smsendian_handle_rx_message(void *buffer);
29extern void smsendian_handle_message_header(void *msg);
30
31#endif /* __SMS_ENDIAN_H__ */
32
diff --git a/drivers/media/dvb/siano/smsir.c b/drivers/media/dvb/siano/smsir.c
new file mode 100644
index 000000000000..e3d776feeaca
--- /dev/null
+++ b/drivers/media/dvb/siano/smsir.c
@@ -0,0 +1,301 @@
1/****************************************************************
2
3 Siano Mobile Silicon, Inc.
4 MDTV receiver kernel modules.
5 Copyright (C) 2006-2009, Uri Shkolnik
6
7 This program is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
19
20 ****************************************************************/
21
22
23#include <linux/types.h>
24#include <linux/input.h>
25
26#include "smscoreapi.h"
27#include "smsir.h"
28#include "sms-cards.h"
29
30/* In order to add new IR remote control -
31 * 1) Add it to the <enum ir_kb_type> @ smsir,h,
32 * 2) Add its map to keyboard_layout_maps below
33 * 3) Set your board (sms-cards sub-module) to use it
34 */
35
36static struct keyboard_layout_map_t keyboard_layout_maps[] = {
37 [SMS_IR_KB_DEFAULT_TV] = {
38 .ir_protocol = IR_RC5,
39 .rc5_kbd_address = KEYBOARD_ADDRESS_TV1,
40 .keyboard_layout_map = {
41 KEY_0, KEY_1, KEY_2,
42 KEY_3, KEY_4, KEY_5,
43 KEY_6, KEY_7, KEY_8,
44 KEY_9, 0, 0, KEY_POWER,
45 KEY_MUTE, 0, 0,
46 KEY_VOLUMEUP, KEY_VOLUMEDOWN,
47 KEY_BRIGHTNESSUP,
48 KEY_BRIGHTNESSDOWN, KEY_CHANNELUP,
49 KEY_CHANNELDOWN,
50 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 0, 0, 0, 0, 0, 0,
52 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
55 }
56 },
57 [SMS_IR_KB_HCW_SILVER] = {
58 .ir_protocol = IR_RC5,
59 .rc5_kbd_address = KEYBOARD_ADDRESS_LIGHTING1,
60 .keyboard_layout_map = {
61 KEY_0, KEY_1, KEY_2,
62 KEY_3, KEY_4, KEY_5,
63 KEY_6, KEY_7, KEY_8,
64 KEY_9, KEY_TEXT, KEY_RED,
65 KEY_RADIO, KEY_MENU,
66 KEY_SUBTITLE,
67 KEY_MUTE, KEY_VOLUMEUP,
68 KEY_VOLUMEDOWN, KEY_PREVIOUS, 0,
69 KEY_UP, KEY_DOWN, KEY_LEFT,
70 KEY_RIGHT, KEY_VIDEO, KEY_AUDIO,
71 KEY_MHP, KEY_EPG, KEY_TV,
72 0, KEY_NEXTSONG, KEY_EXIT,
73 KEY_CHANNELUP, KEY_CHANNELDOWN,
74 KEY_CHANNEL, 0,
75 KEY_PREVIOUSSONG, KEY_ENTER,
76 KEY_SLEEP, 0, 0, KEY_BLUE,
77 0, 0, 0, 0, KEY_GREEN, 0,
78 KEY_PAUSE, 0, KEY_REWIND,
79 0, KEY_FASTFORWARD, KEY_PLAY,
80 KEY_STOP, KEY_RECORD,
81 KEY_YELLOW, 0, 0, KEY_SELECT,
82 KEY_ZOOM, KEY_POWER, 0, 0
83 }
84 },
85 { } /* Terminating entry */
86};
87
88u32 ir_pos;
89u32 ir_word;
90u32 ir_toggle;
91
92#define RC5_PUSH_BIT(dst, bit, pos) \
93 { dst <<= 1; dst |= bit; pos++; }
94
95
96static void sms_ir_rc5_event(struct smscore_device_t *coredev,
97 u32 toggle, u32 addr, u32 cmd)
98{
99 bool toggle_changed;
100 u16 keycode;
101
102 sms_log("IR RC5 word: address %d, command %d, toggle %d",
103 addr, cmd, toggle);
104
105 toggle_changed = ir_toggle != toggle;
106 /* keep toggle */
107 ir_toggle = toggle;
108
109 if (addr !=
110 keyboard_layout_maps[coredev->ir.ir_kb_type].rc5_kbd_address)
111 return; /* Check for valid address */
112
113 keycode =
114 keyboard_layout_maps
115 [coredev->ir.ir_kb_type].keyboard_layout_map[cmd];
116
117 if (!toggle_changed &&
118 (keycode != KEY_VOLUMEUP && keycode != KEY_VOLUMEDOWN))
119 return; /* accept only repeated volume, reject other keys */
120
121 sms_log("kernel input keycode (from ir) %d", keycode);
122 input_report_key(coredev->ir.input_dev, keycode, 1);
123 input_sync(coredev->ir.input_dev);
124
125}
126
127/* decode raw bit pattern to RC5 code */
128/* taken from ir-functions.c */
129static u32 ir_rc5_decode(unsigned int code)
130{
131/* unsigned int org_code = code;*/
132 unsigned int pair;
133 unsigned int rc5 = 0;
134 int i;
135
136 for (i = 0; i < 14; ++i) {
137 pair = code & 0x3;
138 code >>= 2;
139
140 rc5 <<= 1;
141 switch (pair) {
142 case 0:
143 case 2:
144 break;
145 case 1:
146 rc5 |= 1;
147 break;
148 case 3:
149/* dprintk(1, "ir-common: ir_rc5_decode(%x) bad code\n", org_code);*/
150 sms_log("bad code");
151 return 0;
152 }
153 }
154/*
155 dprintk(1, "ir-common: code=%x, rc5=%x, start=%x,
156 toggle=%x, address=%x, "
157 "instr=%x\n", rc5, org_code, RC5_START(rc5),
158 RC5_TOGGLE(rc5), RC5_ADDR(rc5), RC5_INSTR(rc5));
159*/
160 return rc5;
161}
162
163static void sms_rc5_parse_word(struct smscore_device_t *coredev)
164{
165 #define RC5_START(x) (((x)>>12)&3)
166 #define RC5_TOGGLE(x) (((x)>>11)&1)
167 #define RC5_ADDR(x) (((x)>>6)&0x1F)
168 #define RC5_INSTR(x) ((x)&0x3F)
169
170 int i, j;
171 u32 rc5_word = 0;
172
173 /* Reverse the IR word direction */
174 for (i = 0 ; i < 28 ; i++)
175 RC5_PUSH_BIT(rc5_word, (ir_word>>i)&1, j)
176
177 rc5_word = ir_rc5_decode(rc5_word);
178 /* sms_log("temp = 0x%x, rc5_code = 0x%x", ir_word, rc5_word); */
179
180 sms_ir_rc5_event(coredev,
181 RC5_TOGGLE(rc5_word),
182 RC5_ADDR(rc5_word),
183 RC5_INSTR(rc5_word));
184}
185
186
187static void sms_rc5_accumulate_bits(struct smscore_device_t *coredev,
188 s32 ir_sample)
189{
190 #define RC5_TIME_GRANULARITY 200
191 #define RC5_DEF_BIT_TIME 889
192 #define RC5_MAX_SAME_BIT_CONT 4
193 #define RC5_WORD_LEN 27 /* 28 bit */
194
195 u32 i, j;
196 s32 delta_time;
197 u32 time = (ir_sample > 0) ? ir_sample : (0-ir_sample);
198 u32 level = (ir_sample < 0) ? 0 : 1;
199
200 for (i = RC5_MAX_SAME_BIT_CONT; i > 0; i--) {
201 delta_time = time - (i*RC5_DEF_BIT_TIME) + RC5_TIME_GRANULARITY;
202 if (delta_time < 0)
203 continue; /* not so many consecutive bits */
204 if (delta_time > (2 * RC5_TIME_GRANULARITY)) {
205 /* timeout */
206 if (ir_pos == (RC5_WORD_LEN-1))
207 /* complete last bit */
208 RC5_PUSH_BIT(ir_word, level, ir_pos)
209
210 if (ir_pos == RC5_WORD_LEN)
211 sms_rc5_parse_word(coredev);
212 else if (ir_pos) /* timeout within a word */
213 sms_log("IR error parsing a word");
214
215 ir_pos = 0;
216 ir_word = 0;
217 /* sms_log("timeout %d", time); */
218 break;
219 }
220 /* The time is within the range of this number of bits */
221 for (j = 0 ; j < i ; j++)
222 RC5_PUSH_BIT(ir_word, level, ir_pos)
223
224 break;
225 }
226}
227
228void sms_ir_event(struct smscore_device_t *coredev, const char *buf, int len)
229{
230 #define IR_DATA_RECEIVE_MAX_LEN 520 /* 128*4 + 4 + 4 */
231 u32 i;
232 enum ir_protocol ir_protocol =
233 keyboard_layout_maps[coredev->ir.ir_kb_type]
234 .ir_protocol;
235 s32 *samples;
236 int count = len>>2;
237
238 samples = (s32 *)buf;
239/* sms_log("IR buffer received, length = %d", count);*/
240
241 for (i = 0; i < count; i++)
242 if (ir_protocol == IR_RC5)
243 sms_rc5_accumulate_bits(coredev, samples[i]);
244 /* IR_RCMM not implemented */
245}
246
247int sms_ir_init(struct smscore_device_t *coredev)
248{
249 struct input_dev *input_dev;
250
251 sms_log("Allocating input device");
252 input_dev = input_allocate_device();
253 if (!input_dev) {
254 sms_err("Not enough memory");
255 return -ENOMEM;
256 }
257
258 coredev->ir.input_dev = input_dev;
259 coredev->ir.ir_kb_type =
260 sms_get_board(smscore_get_board_id(coredev))->ir_kb_type;
261 coredev->ir.keyboard_layout_map =
262 keyboard_layout_maps[coredev->ir.ir_kb_type].
263 keyboard_layout_map;
264 sms_log("IR remote keyboard type is %d", coredev->ir.ir_kb_type);
265
266 coredev->ir.controller = 0; /* Todo: vega/nova SPI number */
267 coredev->ir.timeout = IR_DEFAULT_TIMEOUT;
268 sms_log("IR port %d, timeout %d ms",
269 coredev->ir.controller, coredev->ir.timeout);
270
271 snprintf(coredev->ir.name,
272 IR_DEV_NAME_MAX_LEN,
273 "SMS IR w/kbd type %d",
274 coredev->ir.ir_kb_type);
275 input_dev->name = coredev->ir.name;
276 input_dev->phys = coredev->ir.name;
277 input_dev->dev.parent = coredev->device;
278
279 /* Key press events only */
280 input_dev->evbit[0] = BIT_MASK(EV_KEY);
281 input_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0);
282
283 sms_log("Input device (IR) %s is set for key events", input_dev->name);
284
285 if (input_register_device(input_dev)) {
286 sms_err("Failed to register device");
287 input_free_device(input_dev);
288 return -EACCES;
289 }
290
291 return 0;
292}
293
294void sms_ir_exit(struct smscore_device_t *coredev)
295{
296 if (coredev->ir.input_dev)
297 input_unregister_device(coredev->ir.input_dev);
298
299 sms_log("");
300}
301
diff --git a/drivers/media/dvb/siano/smsir.h b/drivers/media/dvb/siano/smsir.h
new file mode 100644
index 000000000000..b7d703e2d338
--- /dev/null
+++ b/drivers/media/dvb/siano/smsir.h
@@ -0,0 +1,93 @@
1/****************************************************************
2
3Siano Mobile Silicon, Inc.
4MDTV receiver kernel modules.
5Copyright (C) 2006-2009, Uri Shkolnik
6
7This program is free software: you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation, either version 2 of the License, or
10(at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with this program. If not, see <http://www.gnu.org/licenses/>.
19
20****************************************************************/
21
22#ifndef __SMS_IR_H__
23#define __SMS_IR_H__
24
25#include <linux/input.h>
26
27#define IR_DEV_NAME_MAX_LEN 23 /* "SMS IR kbd type nn\0" */
28#define IR_KEYBOARD_LAYOUT_SIZE 64
29#define IR_DEFAULT_TIMEOUT 100
30
31enum ir_kb_type {
32 SMS_IR_KB_DEFAULT_TV,
33 SMS_IR_KB_HCW_SILVER
34};
35
36enum rc5_keyboard_address {
37 KEYBOARD_ADDRESS_TV1 = 0,
38 KEYBOARD_ADDRESS_TV2 = 1,
39 KEYBOARD_ADDRESS_TELETEXT = 2,
40 KEYBOARD_ADDRESS_VIDEO = 3,
41 KEYBOARD_ADDRESS_LV1 = 4,
42 KEYBOARD_ADDRESS_VCR1 = 5,
43 KEYBOARD_ADDRESS_VCR2 = 6,
44 KEYBOARD_ADDRESS_EXPERIMENTAL = 7,
45 KEYBOARD_ADDRESS_SAT1 = 8,
46 KEYBOARD_ADDRESS_CAMERA = 9,
47 KEYBOARD_ADDRESS_SAT2 = 10,
48 KEYBOARD_ADDRESS_CDV = 12,
49 KEYBOARD_ADDRESS_CAMCORDER = 13,
50 KEYBOARD_ADDRESS_PRE_AMP = 16,
51 KEYBOARD_ADDRESS_TUNER = 17,
52 KEYBOARD_ADDRESS_RECORDER1 = 18,
53 KEYBOARD_ADDRESS_PRE_AMP1 = 19,
54 KEYBOARD_ADDRESS_CD_PLAYER = 20,
55 KEYBOARD_ADDRESS_PHONO = 21,
56 KEYBOARD_ADDRESS_SATA = 22,
57 KEYBOARD_ADDRESS_RECORDER2 = 23,
58 KEYBOARD_ADDRESS_CDR = 26,
59 KEYBOARD_ADDRESS_LIGHTING = 29,
60 KEYBOARD_ADDRESS_LIGHTING1 = 30, /* KEYBOARD_ADDRESS_HCW_SILVER */
61 KEYBOARD_ADDRESS_PHONE = 31,
62 KEYBOARD_ADDRESS_NOT_RC5 = 0xFFFF
63};
64
65enum ir_protocol {
66 IR_RC5,
67 IR_RCMM
68};
69
70struct keyboard_layout_map_t {
71 enum ir_protocol ir_protocol;
72 enum rc5_keyboard_address rc5_kbd_address;
73 u16 keyboard_layout_map[IR_KEYBOARD_LAYOUT_SIZE];
74};
75
76struct smscore_device_t;
77
78struct ir_t {
79 struct input_dev *input_dev;
80 enum ir_kb_type ir_kb_type;
81 char name[IR_DEV_NAME_MAX_LEN+1];
82 u16 *keyboard_layout_map;
83 u32 timeout;
84 u32 controller;
85};
86
87int sms_ir_init(struct smscore_device_t *coredev);
88void sms_ir_exit(struct smscore_device_t *coredev);
89void sms_ir_event(struct smscore_device_t *coredev,
90 const char *buf, int len);
91
92#endif /* __SMS_IR_H__ */
93
diff --git a/drivers/media/dvb/siano/smssdio.c b/drivers/media/dvb/siano/smssdio.c
new file mode 100644
index 000000000000..dfaa49a53f32
--- /dev/null
+++ b/drivers/media/dvb/siano/smssdio.c
@@ -0,0 +1,357 @@
1/*
2 * smssdio.c - Siano 1xxx SDIO interface driver
3 *
4 * Copyright 2008 Pierre Ossman
5 *
6 * Based on code by Siano Mobile Silicon, Inc.,
7 * Copyright (C) 2006-2008, Uri Shkolnik
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or (at
12 * your option) any later version.
13 *
14 *
15 * This hardware is a bit odd in that all transfers should be done
16 * to/from the SMSSDIO_DATA register, yet the "increase address" bit
17 * always needs to be set.
18 *
19 * Also, buffers from the card are always aligned to 128 byte
20 * boundaries.
21 */
22
23/*
24 * General cleanup notes:
25 *
26 * - only typedefs should be name *_t
27 *
28 * - use ERR_PTR and friends for smscore_register_device()
29 *
30 * - smscore_getbuffer should zero fields
31 *
32 * Fix stop command
33 */
34
35#include <linux/moduleparam.h>
36#include <linux/firmware.h>
37#include <linux/delay.h>
38#include <linux/mmc/card.h>
39#include <linux/mmc/sdio_func.h>
40#include <linux/mmc/sdio_ids.h>
41
42#include "smscoreapi.h"
43#include "sms-cards.h"
44
45/* Registers */
46
47#define SMSSDIO_DATA 0x00
48#define SMSSDIO_INT 0x04
49
50static const struct sdio_device_id smssdio_ids[] = {
51 {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR),
52 .driver_data = SMS1XXX_BOARD_SIANO_STELLAR},
53 {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_A0),
54 .driver_data = SMS1XXX_BOARD_SIANO_NOVA_A},
55 {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_B0),
56 .driver_data = SMS1XXX_BOARD_SIANO_NOVA_B},
57 {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_VEGA_A0),
58 .driver_data = SMS1XXX_BOARD_SIANO_VEGA},
59 {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_VENICE),
60 .driver_data = SMS1XXX_BOARD_SIANO_VEGA},
61 { /* end: all zeroes */ },
62};
63
64MODULE_DEVICE_TABLE(sdio, smssdio_ids);
65
66struct smssdio_device {
67 struct sdio_func *func;
68
69 struct smscore_device_t *coredev;
70
71 struct smscore_buffer_t *split_cb;
72};
73
74/*******************************************************************/
75/* Siano core callbacks */
76/*******************************************************************/
77
78static int smssdio_sendrequest(void *context, void *buffer, size_t size)
79{
80 int ret;
81 struct smssdio_device *smsdev;
82
83 smsdev = context;
84
85 sdio_claim_host(smsdev->func);
86
87 while (size >= smsdev->func->cur_blksize) {
88 ret = sdio_write_blocks(smsdev->func, SMSSDIO_DATA, buffer, 1);
89 if (ret)
90 goto out;
91
92 buffer += smsdev->func->cur_blksize;
93 size -= smsdev->func->cur_blksize;
94 }
95
96 if (size) {
97 ret = sdio_write_bytes(smsdev->func, SMSSDIO_DATA,
98 buffer, size);
99 }
100
101out:
102 sdio_release_host(smsdev->func);
103
104 return ret;
105}
106
107/*******************************************************************/
108/* SDIO callbacks */
109/*******************************************************************/
110
111static void smssdio_interrupt(struct sdio_func *func)
112{
113 int ret, isr;
114
115 struct smssdio_device *smsdev;
116 struct smscore_buffer_t *cb;
117 struct SmsMsgHdr_ST *hdr;
118 size_t size;
119
120 smsdev = sdio_get_drvdata(func);
121
122 /*
123 * The interrupt register has no defined meaning. It is just
124 * a way of turning of the level triggered interrupt.
125 */
126 isr = sdio_readb(func, SMSSDIO_INT, &ret);
127 if (ret) {
128 dev_err(&smsdev->func->dev,
129 "Unable to read interrupt register!\n");
130 return;
131 }
132
133 if (smsdev->split_cb == NULL) {
134 cb = smscore_getbuffer(smsdev->coredev);
135 if (!cb) {
136 dev_err(&smsdev->func->dev,
137 "Unable to allocate data buffer!\n");
138 return;
139 }
140
141 ret = sdio_read_blocks(smsdev->func, cb->p, SMSSDIO_DATA, 1);
142 if (ret) {
143 dev_err(&smsdev->func->dev,
144 "Error %d reading initial block!\n", ret);
145 return;
146 }
147
148 hdr = cb->p;
149
150 if (hdr->msgFlags & MSG_HDR_FLAG_SPLIT_MSG) {
151 smsdev->split_cb = cb;
152 return;
153 }
154
155 size = hdr->msgLength - smsdev->func->cur_blksize;
156 } else {
157 cb = smsdev->split_cb;
158 hdr = cb->p;
159
160 size = hdr->msgLength - sizeof(struct SmsMsgHdr_ST);
161
162 smsdev->split_cb = NULL;
163 }
164
165 if (hdr->msgLength > smsdev->func->cur_blksize) {
166 void *buffer;
167
168 size = ALIGN(size, 128);
169 buffer = cb->p + hdr->msgLength;
170
171 BUG_ON(smsdev->func->cur_blksize != 128);
172
173 /*
174 * First attempt to transfer all of it in one go...
175 */
176 ret = sdio_read_blocks(smsdev->func, buffer,
177 SMSSDIO_DATA, size / 128);
178 if (ret && ret != -EINVAL) {
179 smscore_putbuffer(smsdev->coredev, cb);
180 dev_err(&smsdev->func->dev,
181 "Error %d reading data from card!\n", ret);
182 return;
183 }
184
185 /*
186 * ..then fall back to one block at a time if that is
187 * not possible...
188 *
189 * (we have to do this manually because of the
190 * problem with the "increase address" bit)
191 */
192 if (ret == -EINVAL) {
193 while (size) {
194 ret = sdio_read_blocks(smsdev->func,
195 buffer, SMSSDIO_DATA, 1);
196 if (ret) {
197 smscore_putbuffer(smsdev->coredev, cb);
198 dev_err(&smsdev->func->dev,
199 "Error %d reading "
200 "data from card!\n", ret);
201 return;
202 }
203
204 buffer += smsdev->func->cur_blksize;
205 if (size > smsdev->func->cur_blksize)
206 size -= smsdev->func->cur_blksize;
207 else
208 size = 0;
209 }
210 }
211 }
212
213 cb->size = hdr->msgLength;
214 cb->offset = 0;
215
216 smscore_onresponse(smsdev->coredev, cb);
217}
218
219static int smssdio_probe(struct sdio_func *func,
220 const struct sdio_device_id *id)
221{
222 int ret;
223
224 int board_id;
225 struct smssdio_device *smsdev;
226 struct smsdevice_params_t params;
227
228 board_id = id->driver_data;
229
230 smsdev = kzalloc(sizeof(struct smssdio_device), GFP_KERNEL);
231 if (!smsdev)
232 return -ENOMEM;
233
234 smsdev->func = func;
235
236 memset(&params, 0, sizeof(struct smsdevice_params_t));
237
238 params.device = &func->dev;
239 params.buffer_size = 0x5000; /* ?? */
240 params.num_buffers = 22; /* ?? */
241 params.context = smsdev;
242
243 snprintf(params.devpath, sizeof(params.devpath),
244 "sdio\\%s", sdio_func_id(func));
245
246 params.sendrequest_handler = smssdio_sendrequest;
247
248 params.device_type = sms_get_board(board_id)->type;
249
250 if (params.device_type != SMS_STELLAR)
251 params.flags |= SMS_DEVICE_FAMILY2;
252 else {
253 /*
254 * FIXME: Stellar needs special handling...
255 */
256 ret = -ENODEV;
257 goto free;
258 }
259
260 ret = smscore_register_device(&params, &smsdev->coredev);
261 if (ret < 0)
262 goto free;
263
264 smscore_set_board_id(smsdev->coredev, board_id);
265
266 sdio_claim_host(func);
267
268 ret = sdio_enable_func(func);
269 if (ret)
270 goto release;
271
272 ret = sdio_set_block_size(func, 128);
273 if (ret)
274 goto disable;
275
276 ret = sdio_claim_irq(func, smssdio_interrupt);
277 if (ret)
278 goto disable;
279
280 sdio_set_drvdata(func, smsdev);
281
282 sdio_release_host(func);
283
284 ret = smscore_start_device(smsdev->coredev);
285 if (ret < 0)
286 goto reclaim;
287
288 return 0;
289
290reclaim:
291 sdio_claim_host(func);
292 sdio_release_irq(func);
293disable:
294 sdio_disable_func(func);
295release:
296 sdio_release_host(func);
297 smscore_unregister_device(smsdev->coredev);
298free:
299 kfree(smsdev);
300
301 return ret;
302}
303
304static void smssdio_remove(struct sdio_func *func)
305{
306 struct smssdio_device *smsdev;
307
308 smsdev = sdio_get_drvdata(func);
309
310 /* FIXME: racy! */
311 if (smsdev->split_cb)
312 smscore_putbuffer(smsdev->coredev, smsdev->split_cb);
313
314 smscore_unregister_device(smsdev->coredev);
315
316 sdio_claim_host(func);
317 sdio_release_irq(func);
318 sdio_disable_func(func);
319 sdio_release_host(func);
320
321 kfree(smsdev);
322}
323
324static struct sdio_driver smssdio_driver = {
325 .name = "smssdio",
326 .id_table = smssdio_ids,
327 .probe = smssdio_probe,
328 .remove = smssdio_remove,
329};
330
331/*******************************************************************/
332/* Module functions */
333/*******************************************************************/
334
335int smssdio_module_init(void)
336{
337 int ret = 0;
338
339 printk(KERN_INFO "smssdio: Siano SMS1xxx SDIO driver\n");
340 printk(KERN_INFO "smssdio: Copyright Pierre Ossman\n");
341
342 ret = sdio_register_driver(&smssdio_driver);
343
344 return ret;
345}
346
347void smssdio_module_exit(void)
348{
349 sdio_unregister_driver(&smssdio_driver);
350}
351
352module_init(smssdio_module_init);
353module_exit(smssdio_module_exit);
354
355MODULE_DESCRIPTION("Siano SMS1xxx SDIO driver");
356MODULE_AUTHOR("Pierre Ossman");
357MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index 71c65f544c07..cb8a358b7310 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -1,23 +1,23 @@
1/* 1/****************************************************************
2 * Driver for the Siano SMS1xxx USB dongle 2
3 * 3Siano Mobile Silicon, Inc.
4 * author: Anatoly Greenblat 4MDTV receiver kernel modules.
5 * 5Copyright (C) 2005-2009, Uri Shkolnik, Anatoly Greenblat
6 * Copyright (c), 2005-2008 Siano Mobile Silicon, Inc. 6
7 * 7This program is free software: you can redistribute it and/or modify
8 * This program is free software; you can redistribute it and/or modify 8it under the terms of the GNU General Public License as published by
9 * it under the terms of the GNU General Public License version 2 as 9the Free Software Foundation, either version 2 of the License, or
10 * published by the Free Software Foundation; 10(at your option) any later version.
11 * 11
12 * Software distributed under the License is distributed on an "AS IS" 12 This program is distributed in the hope that it will be useful,
13 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. 13but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * 14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * See the GNU General Public License for more details. 15GNU General Public License for more details.
16 * 16
17 * You should have received a copy of the GNU General Public License 17You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software 18along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19
20 */ 20****************************************************************/
21 21
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/init.h> 23#include <linux/init.h>
@@ -26,6 +26,7 @@
26 26
27#include "smscoreapi.h" 27#include "smscoreapi.h"
28#include "sms-cards.h" 28#include "sms-cards.h"
29#include "smsendian.h"
29 30
30static int sms_dbg; 31static int sms_dbg;
31module_param_named(debug, sms_dbg, int, 0644); 32module_param_named(debug, sms_dbg, int, 0644);
@@ -64,15 +65,16 @@ static void smsusb_onresponse(struct urb *urb)
64 struct smsusb_urb_t *surb = (struct smsusb_urb_t *) urb->context; 65 struct smsusb_urb_t *surb = (struct smsusb_urb_t *) urb->context;
65 struct smsusb_device_t *dev = surb->dev; 66 struct smsusb_device_t *dev = surb->dev;
66 67
67 if (urb->status < 0) { 68 if (urb->status == -ESHUTDOWN) {
68 sms_err("error, urb status %d, %d bytes", 69 sms_err("error, urb status %d (-ESHUTDOWN), %d bytes",
69 urb->status, urb->actual_length); 70 urb->status, urb->actual_length);
70 return; 71 return;
71 } 72 }
72 73
73 if (urb->actual_length > 0) { 74 if ((urb->actual_length > 0) && (urb->status == 0)) {
74 struct SmsMsgHdr_ST *phdr = (struct SmsMsgHdr_ST *) surb->cb->p; 75 struct SmsMsgHdr_ST *phdr = (struct SmsMsgHdr_ST *)surb->cb->p;
75 76
77 smsendian_handle_message_header(phdr);
76 if (urb->actual_length >= phdr->msgLength) { 78 if (urb->actual_length >= phdr->msgLength) {
77 surb->cb->size = phdr->msgLength; 79 surb->cb->size = phdr->msgLength;
78 80
@@ -109,7 +111,10 @@ static void smsusb_onresponse(struct urb *urb)
109 "msglen %d actual %d", 111 "msglen %d actual %d",
110 phdr->msgLength, urb->actual_length); 112 phdr->msgLength, urb->actual_length);
111 } 113 }
112 } 114 } else
115 sms_err("error, urb status %d, %d bytes",
116 urb->status, urb->actual_length);
117
113 118
114exit_and_resubmit: 119exit_and_resubmit:
115 smsusb_submit_urb(dev, surb); 120 smsusb_submit_urb(dev, surb);
@@ -176,6 +181,7 @@ static int smsusb_sendrequest(void *context, void *buffer, size_t size)
176 struct smsusb_device_t *dev = (struct smsusb_device_t *) context; 181 struct smsusb_device_t *dev = (struct smsusb_device_t *) context;
177 int dummy; 182 int dummy;
178 183
184 smsendian_handle_message_header((struct SmsMsgHdr_ST *)buffer);
179 return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2), 185 return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
180 buffer, size, &dummy, 1000); 186 buffer, size, &dummy, 1000);
181} 187}
@@ -333,8 +339,8 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
333 case SMS_VEGA: 339 case SMS_VEGA:
334 dev->buffer_size = USB2_BUFFER_SIZE; 340 dev->buffer_size = USB2_BUFFER_SIZE;
335 dev->response_alignment = 341 dev->response_alignment =
336 dev->udev->ep_in[1]->desc.wMaxPacketSize - 342 le16_to_cpu(dev->udev->ep_in[1]->desc.wMaxPacketSize) -
337 sizeof(struct SmsMsgHdr_ST); 343 sizeof(struct SmsMsgHdr_ST);
338 344
339 params.flags |= SMS_DEVICE_FAMILY2; 345 params.flags |= SMS_DEVICE_FAMILY2;
340 break; 346 break;
@@ -479,7 +485,6 @@ static int smsusb_resume(struct usb_interface *intf)
479} 485}
480 486
481struct usb_device_id smsusb_id_table[] = { 487struct usb_device_id smsusb_id_table[] = {
482#ifdef CONFIG_DVB_SIANO_SMS1XXX_SMS_IDS
483 { USB_DEVICE(0x187f, 0x0010), 488 { USB_DEVICE(0x187f, 0x0010),
484 .driver_info = SMS1XXX_BOARD_SIANO_STELLAR }, 489 .driver_info = SMS1XXX_BOARD_SIANO_STELLAR },
485 { USB_DEVICE(0x187f, 0x0100), 490 { USB_DEVICE(0x187f, 0x0100),
@@ -490,7 +495,6 @@ struct usb_device_id smsusb_id_table[] = {
490 .driver_info = SMS1XXX_BOARD_SIANO_NOVA_B }, 495 .driver_info = SMS1XXX_BOARD_SIANO_NOVA_B },
491 { USB_DEVICE(0x187f, 0x0300), 496 { USB_DEVICE(0x187f, 0x0300),
492 .driver_info = SMS1XXX_BOARD_SIANO_VEGA }, 497 .driver_info = SMS1XXX_BOARD_SIANO_VEGA },
493#endif
494 { USB_DEVICE(0x2040, 0x1700), 498 { USB_DEVICE(0x2040, 0x1700),
495 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_CATAMOUNT }, 499 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_CATAMOUNT },
496 { USB_DEVICE(0x2040, 0x1800), 500 { USB_DEVICE(0x2040, 0x1800),
@@ -521,8 +525,13 @@ struct usb_device_id smsusb_id_table[] = {
521 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, 525 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
522 { USB_DEVICE(0x2040, 0x5590), 526 { USB_DEVICE(0x2040, 0x5590),
523 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, 527 .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
524 { } /* Terminating entry */ 528 { USB_DEVICE(0x187f, 0x0202),
525}; 529 .driver_info = SMS1XXX_BOARD_SIANO_NICE },
530 { USB_DEVICE(0x187f, 0x0301),
531 .driver_info = SMS1XXX_BOARD_SIANO_VENICE },
532 { } /* Terminating entry */
533 };
534
526MODULE_DEVICE_TABLE(usb, smsusb_id_table); 535MODULE_DEVICE_TABLE(usb, smsusb_id_table);
527 536
528static struct usb_driver smsusb_driver = { 537static struct usb_driver smsusb_driver = {
@@ -548,14 +557,14 @@ int smsusb_module_init(void)
548 557
549void smsusb_module_exit(void) 558void smsusb_module_exit(void)
550{ 559{
551 sms_debug("");
552 /* Regular USB Cleanup */ 560 /* Regular USB Cleanup */
553 usb_deregister(&smsusb_driver); 561 usb_deregister(&smsusb_driver);
562 sms_info("end");
554} 563}
555 564
556module_init(smsusb_module_init); 565module_init(smsusb_module_init);
557module_exit(smsusb_module_exit); 566module_exit(smsusb_module_exit);
558 567
559MODULE_DESCRIPTION("Driver for the Siano SMS1XXX USB dongle"); 568MODULE_DESCRIPTION("Driver for the Siano SMS1xxx USB dongle");
560MODULE_AUTHOR("Siano Mobile Silicon, INC. (uris@siano-ms.com)"); 569MODULE_AUTHOR("Siano Mobile Silicon, INC. (uris@siano-ms.com)");
561MODULE_LICENSE("GPL"); 570MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index e4d0900d5121..53884814161c 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -89,6 +89,7 @@
89 89
90static void p_to_t(u8 const *buf, long int length, u16 pid, 90static void p_to_t(u8 const *buf, long int length, u16 pid,
91 u8 *counter, struct dvb_demux_feed *feed); 91 u8 *counter, struct dvb_demux_feed *feed);
92static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, size_t len);
92 93
93 94
94int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len) 95int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len)
@@ -192,8 +193,6 @@ int av7110_av_start_play(struct av7110 *av7110, int av)
192 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, AV_PES, 0); 193 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, AV_PES, 0);
193 break; 194 break;
194 } 195 }
195 if (!ret)
196 ret = av7110->playing;
197 return ret; 196 return ret;
198} 197}
199 198
@@ -437,6 +436,45 @@ static void play_audio_cb(u8 *buf, int count, void *priv)
437 aux_ring_buffer_write(&av7110->aout, buf, count); 436 aux_ring_buffer_write(&av7110->aout, buf, count);
438} 437}
439 438
439
440#define FREE_COND_TS (dvb_ringbuffer_free(rb) >= 4096)
441
442static ssize_t ts_play(struct av7110 *av7110, const char __user *buf,
443 unsigned long count, int nonblock, int type)
444{
445 struct dvb_ringbuffer *rb;
446 u8 *kb;
447 unsigned long todo = count;
448
449 dprintk(2, "%s: type %d cnt %lu\n", __func__, type, count);
450
451 rb = (type) ? &av7110->avout : &av7110->aout;
452 kb = av7110->kbuf[type];
453
454 if (!kb)
455 return -ENOBUFS;
456
457 if (nonblock && !FREE_COND_TS)
458 return -EWOULDBLOCK;
459
460 while (todo >= TS_SIZE) {
461 if (!FREE_COND_TS) {
462 if (nonblock)
463 return count - todo;
464 if (wait_event_interruptible(rb->queue, FREE_COND_TS))
465 return count - todo;
466 }
467 if (copy_from_user(kb, buf, TS_SIZE))
468 return -EFAULT;
469 write_ts_to_decoder(av7110, type, kb, TS_SIZE);
470 todo -= TS_SIZE;
471 buf += TS_SIZE;
472 }
473
474 return count - todo;
475}
476
477
440#define FREE_COND (dvb_ringbuffer_free(&av7110->avout) >= 20 * 1024 && \ 478#define FREE_COND (dvb_ringbuffer_free(&av7110->avout) >= 20 * 1024 && \
441 dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024) 479 dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024)
442 480
@@ -780,11 +818,37 @@ static void p_to_t(u8 const *buf, long int length, u16 pid, u8 *counter,
780} 818}
781 819
782 820
821static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, size_t len)
822{
823 struct ipack *ipack = &av7110->ipack[type];
824
825 if (buf[1] & TRANS_ERROR) {
826 av7110_ipack_reset(ipack);
827 return -1;
828 }
829
830 if (!(buf[3] & PAYLOAD))
831 return -1;
832
833 if (buf[1] & PAY_START)
834 av7110_ipack_flush(ipack);
835
836 if (buf[3] & ADAPT_FIELD) {
837 len -= buf[4] + 1;
838 buf += buf[4] + 1;
839 if (!len)
840 return 0;
841 }
842
843 av7110_ipack_instant_repack(buf + 4, len - 4, ipack);
844 return 0;
845}
846
847
783int av7110_write_to_decoder(struct dvb_demux_feed *feed, const u8 *buf, size_t len) 848int av7110_write_to_decoder(struct dvb_demux_feed *feed, const u8 *buf, size_t len)
784{ 849{
785 struct dvb_demux *demux = feed->demux; 850 struct dvb_demux *demux = feed->demux;
786 struct av7110 *av7110 = (struct av7110 *) demux->priv; 851 struct av7110 *av7110 = (struct av7110 *) demux->priv;
787 struct ipack *ipack = &av7110->ipack[feed->pes_type];
788 852
789 dprintk(2, "av7110:%p, \n", av7110); 853 dprintk(2, "av7110:%p, \n", av7110);
790 854
@@ -804,20 +868,7 @@ int av7110_write_to_decoder(struct dvb_demux_feed *feed, const u8 *buf, size_t l
804 return -1; 868 return -1;
805 } 869 }
806 870
807 if (!(buf[3] & 0x10)) /* no payload? */ 871 return write_ts_to_decoder(av7110, feed->pes_type, buf, len);
808 return -1;
809 if (buf[1] & 0x40)
810 av7110_ipack_flush(ipack);
811
812 if (buf[3] & 0x20) { /* adaptation field? */
813 len -= buf[4] + 1;
814 buf += buf[4] + 1;
815 if (!len)
816 return 0;
817 }
818
819 av7110_ipack_instant_repack(buf + 4, len - 4, &av7110->ipack[feed->pes_type]);
820 return 0;
821} 872}
822 873
823 874
@@ -916,6 +967,7 @@ static ssize_t dvb_video_write(struct file *file, const char __user *buf,
916{ 967{
917 struct dvb_device *dvbdev = file->private_data; 968 struct dvb_device *dvbdev = file->private_data;
918 struct av7110 *av7110 = dvbdev->priv; 969 struct av7110 *av7110 = dvbdev->priv;
970 unsigned char c;
919 971
920 dprintk(2, "av7110:%p, \n", av7110); 972 dprintk(2, "av7110:%p, \n", av7110);
921 973
@@ -925,7 +977,12 @@ static ssize_t dvb_video_write(struct file *file, const char __user *buf,
925 if (av7110->videostate.stream_source != VIDEO_SOURCE_MEMORY) 977 if (av7110->videostate.stream_source != VIDEO_SOURCE_MEMORY)
926 return -EPERM; 978 return -EPERM;
927 979
928 return dvb_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1); 980 if (get_user(c, buf))
981 return -EFAULT;
982 if (c == 0x47 && count % TS_SIZE == 0)
983 return ts_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1);
984 else
985 return dvb_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1);
929} 986}
930 987
931static unsigned int dvb_audio_poll(struct file *file, poll_table *wait) 988static unsigned int dvb_audio_poll(struct file *file, poll_table *wait)
@@ -952,6 +1009,7 @@ static ssize_t dvb_audio_write(struct file *file, const char __user *buf,
952{ 1009{
953 struct dvb_device *dvbdev = file->private_data; 1010 struct dvb_device *dvbdev = file->private_data;
954 struct av7110 *av7110 = dvbdev->priv; 1011 struct av7110 *av7110 = dvbdev->priv;
1012 unsigned char c;
955 1013
956 dprintk(2, "av7110:%p, \n", av7110); 1014 dprintk(2, "av7110:%p, \n", av7110);
957 1015
@@ -959,7 +1017,13 @@ static ssize_t dvb_audio_write(struct file *file, const char __user *buf,
959 printk(KERN_ERR "not audio source memory\n"); 1017 printk(KERN_ERR "not audio source memory\n");
960 return -EPERM; 1018 return -EPERM;
961 } 1019 }
962 return dvb_aplay(av7110, buf, count, file->f_flags & O_NONBLOCK, 0); 1020
1021 if (get_user(c, buf))
1022 return -EFAULT;
1023 if (c == 0x47 && count % TS_SIZE == 0)
1024 return ts_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 0);
1025 else
1026 return dvb_aplay(av7110, buf, count, file->f_flags & O_NONBLOCK, 0);
963} 1027}
964 1028
965static u8 iframe_header[] = { 0x00, 0x00, 0x01, 0xe0, 0x00, 0x00, 0x80, 0x00, 0x00 }; 1029static u8 iframe_header[] = { 0x00, 0x00, 0x01, 0xe0, 0x00, 0x00, 0x80, 0x00, 0x00 };
@@ -1062,7 +1126,6 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
1062 if (ret) 1126 if (ret)
1063 break; 1127 break;
1064 } 1128 }
1065
1066 if (av7110->videostate.stream_source == VIDEO_SOURCE_MEMORY) { 1129 if (av7110->videostate.stream_source == VIDEO_SOURCE_MEMORY) {
1067 if (av7110->playing == RP_AV) { 1130 if (av7110->playing == RP_AV) {
1068 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Stop, 0); 1131 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Stop, 0);
@@ -1122,20 +1185,16 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
1122 case VIDEO_SET_DISPLAY_FORMAT: 1185 case VIDEO_SET_DISPLAY_FORMAT:
1123 { 1186 {
1124 video_displayformat_t format = (video_displayformat_t) arg; 1187 video_displayformat_t format = (video_displayformat_t) arg;
1125
1126 switch (format) { 1188 switch (format) {
1127 case VIDEO_PAN_SCAN: 1189 case VIDEO_PAN_SCAN:
1128 av7110->display_panscan = VID_PAN_SCAN_PREF; 1190 av7110->display_panscan = VID_PAN_SCAN_PREF;
1129 break; 1191 break;
1130
1131 case VIDEO_LETTER_BOX: 1192 case VIDEO_LETTER_BOX:
1132 av7110->display_panscan = VID_VC_AND_PS_PREF; 1193 av7110->display_panscan = VID_VC_AND_PS_PREF;
1133 break; 1194 break;
1134
1135 case VIDEO_CENTER_CUT_OUT: 1195 case VIDEO_CENTER_CUT_OUT:
1136 av7110->display_panscan = VID_CENTRE_CUT_PREF; 1196 av7110->display_panscan = VID_CENTRE_CUT_PREF;
1137 break; 1197 break;
1138
1139 default: 1198 default:
1140 ret = -EINVAL; 1199 ret = -EINVAL;
1141 } 1200 }
@@ -1183,7 +1242,8 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
1183 1242
1184 case VIDEO_SLOWMOTION: 1243 case VIDEO_SLOWMOTION:
1185 if (av7110->playing&RP_VIDEO) { 1244 if (av7110->playing&RP_VIDEO) {
1186 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Slow, 2, 0, 0); 1245 if (av7110->trickmode != TRICK_SLOW)
1246 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Slow, 2, 0, 0);
1187 if (!ret) 1247 if (!ret)
1188 ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg); 1248 ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg);
1189 } else { 1249 } else {
@@ -1207,7 +1267,6 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
1207 case VIDEO_CLEAR_BUFFER: 1267 case VIDEO_CLEAR_BUFFER:
1208 dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout); 1268 dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout);
1209 av7110_ipack_reset(&av7110->ipack[1]); 1269 av7110_ipack_reset(&av7110->ipack[1]);
1210
1211 if (av7110->playing == RP_AV) { 1270 if (av7110->playing == RP_AV) {
1212 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, 1271 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
1213 __Play, 2, AV_PES, 0); 1272 __Play, 2, AV_PES, 0);
@@ -1228,13 +1287,13 @@ static int dvb_video_ioctl(struct inode *inode, struct file *file,
1228 break; 1287 break;
1229 1288
1230 case VIDEO_SET_STREAMTYPE: 1289 case VIDEO_SET_STREAMTYPE:
1231
1232 break; 1290 break;
1233 1291
1234 default: 1292 default:
1235 ret = -ENOIOCTLCMD; 1293 ret = -ENOIOCTLCMD;
1236 break; 1294 break;
1237 } 1295 }
1296
1238 return ret; 1297 return ret;
1239} 1298}
1240 1299
@@ -1309,7 +1368,6 @@ static int dvb_audio_ioctl(struct inode *inode, struct file *file,
1309 1368
1310 case AUDIO_CHANNEL_SELECT: 1369 case AUDIO_CHANNEL_SELECT:
1311 av7110->audiostate.channel_select = (audio_channel_select_t) arg; 1370 av7110->audiostate.channel_select = (audio_channel_select_t) arg;
1312
1313 switch(av7110->audiostate.channel_select) { 1371 switch(av7110->audiostate.channel_select) {
1314 case AUDIO_STEREO: 1372 case AUDIO_STEREO:
1315 ret = audcom(av7110, AUDIO_CMD_STEREO); 1373 ret = audcom(av7110, AUDIO_CMD_STEREO);
@@ -1320,7 +1378,6 @@ static int dvb_audio_ioctl(struct inode *inode, struct file *file,
1320 msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220); 1378 msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220);
1321 } 1379 }
1322 break; 1380 break;
1323
1324 case AUDIO_MONO_LEFT: 1381 case AUDIO_MONO_LEFT:
1325 ret = audcom(av7110, AUDIO_CMD_MONO_L); 1382 ret = audcom(av7110, AUDIO_CMD_MONO_L);
1326 if (!ret) { 1383 if (!ret) {
@@ -1330,7 +1387,6 @@ static int dvb_audio_ioctl(struct inode *inode, struct file *file,
1330 msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0200); 1387 msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0200);
1331 } 1388 }
1332 break; 1389 break;
1333
1334 case AUDIO_MONO_RIGHT: 1390 case AUDIO_MONO_RIGHT:
1335 ret = audcom(av7110, AUDIO_CMD_MONO_R); 1391 ret = audcom(av7110, AUDIO_CMD_MONO_R);
1336 if (!ret) { 1392 if (!ret) {
@@ -1340,7 +1396,6 @@ static int dvb_audio_ioctl(struct inode *inode, struct file *file,
1340 msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0210); 1396 msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0210);
1341 } 1397 }
1342 break; 1398 break;
1343
1344 default: 1399 default:
1345 ret = -EINVAL; 1400 ret = -EINVAL;
1346 break; 1401 break;
@@ -1366,21 +1421,24 @@ static int dvb_audio_ioctl(struct inode *inode, struct file *file,
1366 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, 1421 ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
1367 __Play, 2, AV_PES, 0); 1422 __Play, 2, AV_PES, 0);
1368 break; 1423 break;
1369 case AUDIO_SET_ID:
1370 1424
1425 case AUDIO_SET_ID:
1371 break; 1426 break;
1427
1372 case AUDIO_SET_MIXER: 1428 case AUDIO_SET_MIXER:
1373 { 1429 {
1374 struct audio_mixer *amix = (struct audio_mixer *)parg; 1430 struct audio_mixer *amix = (struct audio_mixer *)parg;
1375
1376 ret = av7110_set_volume(av7110, amix->volume_left, amix->volume_right); 1431 ret = av7110_set_volume(av7110, amix->volume_left, amix->volume_right);
1377 break; 1432 break;
1378 } 1433 }
1434
1379 case AUDIO_SET_STREAMTYPE: 1435 case AUDIO_SET_STREAMTYPE:
1380 break; 1436 break;
1437
1381 default: 1438 default:
1382 ret = -ENOIOCTLCMD; 1439 ret = -ENOIOCTLCMD;
1383 } 1440 }
1441
1384 return ret; 1442 return ret;
1385} 1443}
1386 1444
diff --git a/drivers/media/dvb/ttpci/av7110_hw.c b/drivers/media/dvb/ttpci/av7110_hw.c
index 5e3f88911a1d..e162691b515d 100644
--- a/drivers/media/dvb/ttpci/av7110_hw.c
+++ b/drivers/media/dvb/ttpci/av7110_hw.c
@@ -1089,7 +1089,7 @@ int av7110_osd_cmd(struct av7110 *av7110, osd_cmd_t *dc)
1089 else { 1089 else {
1090 int i, len = dc->x0-dc->color+1; 1090 int i, len = dc->x0-dc->color+1;
1091 u8 __user *colors = (u8 __user *)dc->data; 1091 u8 __user *colors = (u8 __user *)dc->data;
1092 u8 r, g, b, blend; 1092 u8 r, g = 0, b = 0, blend = 0;
1093 ret = 0; 1093 ret = 0;
1094 for (i = 0; i<len; i++) { 1094 for (i = 0; i<len; i++) {
1095 if (get_user(r, colors + i * 4) || 1095 if (get_user(r, colors + i * 4) ||
diff --git a/drivers/media/dvb/ttpci/av7110_v4l.c b/drivers/media/dvb/ttpci/av7110_v4l.c
index 2210cff738e6..ce64c6214cc4 100644
--- a/drivers/media/dvb/ttpci/av7110_v4l.c
+++ b/drivers/media/dvb/ttpci/av7110_v4l.c
@@ -458,7 +458,7 @@ static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
458 dprintk(2, "VIDIOC_ENUMINPUT: %d\n", i->index); 458 dprintk(2, "VIDIOC_ENUMINPUT: %d\n", i->index);
459 459
460 if (av7110->analog_tuner_flags) { 460 if (av7110->analog_tuner_flags) {
461 if (i->index < 0 || i->index >= 4) 461 if (i->index >= 4)
462 return -EINVAL; 462 return -EINVAL;
463 } else { 463 } else {
464 if (i->index != 0) 464 if (i->index != 0)
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 855fe74b640b..8ea915227674 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -1413,7 +1413,7 @@ static struct v4l2_input knc1_inputs[KNC1_INPUTS] = {
1413static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i) 1413static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
1414{ 1414{
1415 dprintk(1, "VIDIOC_ENUMINPUT %d.\n", i->index); 1415 dprintk(1, "VIDIOC_ENUMINPUT %d.\n", i->index);
1416 if (i->index < 0 || i->index >= KNC1_INPUTS) 1416 if (i->index >= KNC1_INPUTS)
1417 return -EINVAL; 1417 return -EINVAL;
1418 memcpy(i, &knc1_inputs[i->index], sizeof(struct v4l2_input)); 1418 memcpy(i, &knc1_inputs[i->index], sizeof(struct v4l2_input));
1419 return 0; 1419 return 0;
diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
index 83e9e7750c8c..e48380c48990 100644
--- a/drivers/media/dvb/ttpci/budget.c
+++ b/drivers/media/dvb/ttpci/budget.c
@@ -47,6 +47,9 @@
47#include "bsru6.h" 47#include "bsru6.h"
48#include "bsbe1.h" 48#include "bsbe1.h"
49#include "tdhd1.h" 49#include "tdhd1.h"
50#include "stv6110x.h"
51#include "stv090x.h"
52#include "isl6423.h"
50 53
51static int diseqc_method; 54static int diseqc_method;
52module_param(diseqc_method, int, 0444); 55module_param(diseqc_method, int, 0444);
@@ -425,6 +428,44 @@ static u8 read_pwm(struct budget* budget)
425 return pwm; 428 return pwm;
426} 429}
427 430
431static struct stv090x_config tt1600_stv090x_config = {
432 .device = STV0903,
433 .demod_mode = STV090x_SINGLE,
434 .clk_mode = STV090x_CLK_EXT,
435
436 .xtal = 27000000,
437 .address = 0x68,
438 .ref_clk = 27000000,
439
440 .ts1_mode = STV090x_TSMODE_DVBCI,
441 .ts2_mode = STV090x_TSMODE_SERIAL_CONTINUOUS,
442
443 .repeater_level = STV090x_RPTLEVEL_16,
444
445 .tuner_init = NULL,
446 .tuner_set_mode = NULL,
447 .tuner_set_frequency = NULL,
448 .tuner_get_frequency = NULL,
449 .tuner_set_bandwidth = NULL,
450 .tuner_get_bandwidth = NULL,
451 .tuner_set_bbgain = NULL,
452 .tuner_get_bbgain = NULL,
453 .tuner_set_refclk = NULL,
454 .tuner_get_status = NULL,
455};
456
457static struct stv6110x_config tt1600_stv6110x_config = {
458 .addr = 0x60,
459 .refclk = 27000000,
460};
461
462static struct isl6423_config tt1600_isl6423_config = {
463 .current_max = SEC_CURRENT_515m,
464 .curlim = SEC_CURRENT_LIM_ON,
465 .mod_extern = 1,
466 .addr = 0x08,
467};
468
428static void frontend_init(struct budget *budget) 469static void frontend_init(struct budget *budget)
429{ 470{
430 (void)alps_bsbe1_config; /* avoid warning */ 471 (void)alps_bsbe1_config; /* avoid warning */
@@ -566,6 +607,48 @@ static void frontend_init(struct budget *budget)
566 } 607 }
567 break; 608 break;
568 } 609 }
610
611 case 0x101c: { /* TT S2-1600 */
612 struct stv6110x_devctl *ctl;
613 saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
614 msleep(50);
615 saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
616 msleep(250);
617
618 budget->dvb_frontend = dvb_attach(stv090x_attach,
619 &tt1600_stv090x_config,
620 &budget->i2c_adap,
621 STV090x_DEMODULATOR_0);
622
623 if (budget->dvb_frontend) {
624
625 ctl = dvb_attach(stv6110x_attach,
626 budget->dvb_frontend,
627 &tt1600_stv6110x_config,
628 &budget->i2c_adap);
629
630 tt1600_stv090x_config.tuner_init = ctl->tuner_init;
631 tt1600_stv090x_config.tuner_set_mode = ctl->tuner_set_mode;
632 tt1600_stv090x_config.tuner_set_frequency = ctl->tuner_set_frequency;
633 tt1600_stv090x_config.tuner_get_frequency = ctl->tuner_get_frequency;
634 tt1600_stv090x_config.tuner_set_bandwidth = ctl->tuner_set_bandwidth;
635 tt1600_stv090x_config.tuner_get_bandwidth = ctl->tuner_get_bandwidth;
636 tt1600_stv090x_config.tuner_set_bbgain = ctl->tuner_set_bbgain;
637 tt1600_stv090x_config.tuner_get_bbgain = ctl->tuner_get_bbgain;
638 tt1600_stv090x_config.tuner_set_refclk = ctl->tuner_set_refclk;
639 tt1600_stv090x_config.tuner_get_status = ctl->tuner_get_status;
640
641 dvb_attach(isl6423_attach,
642 budget->dvb_frontend,
643 &budget->i2c_adap,
644 &tt1600_isl6423_config);
645
646 } else {
647 dvb_frontend_detach(budget->dvb_frontend);
648 budget->dvb_frontend = NULL;
649 }
650 }
651 break;
569 } 652 }
570 653
571 if (budget->dvb_frontend == NULL) { 654 if (budget->dvb_frontend == NULL) {
@@ -641,6 +724,7 @@ MAKE_BUDGET_INFO(ttbc, "TT-Budget/WinTV-NOVA-C PCI", BUDGET_TT);
641MAKE_BUDGET_INFO(ttbt, "TT-Budget/WinTV-NOVA-T PCI", BUDGET_TT); 724MAKE_BUDGET_INFO(ttbt, "TT-Budget/WinTV-NOVA-T PCI", BUDGET_TT);
642MAKE_BUDGET_INFO(satel, "SATELCO Multimedia PCI", BUDGET_TT_HW_DISEQC); 725MAKE_BUDGET_INFO(satel, "SATELCO Multimedia PCI", BUDGET_TT_HW_DISEQC);
643MAKE_BUDGET_INFO(ttbs1401, "TT-Budget-S-1401 PCI", BUDGET_TT); 726MAKE_BUDGET_INFO(ttbs1401, "TT-Budget-S-1401 PCI", BUDGET_TT);
727MAKE_BUDGET_INFO(tt1600, "TT-Budget S2-1600 PCI", BUDGET_TT);
644MAKE_BUDGET_INFO(fsacs0, "Fujitsu Siemens Activy Budget-S PCI (rev GR/grundig frontend)", BUDGET_FS_ACTIVY); 728MAKE_BUDGET_INFO(fsacs0, "Fujitsu Siemens Activy Budget-S PCI (rev GR/grundig frontend)", BUDGET_FS_ACTIVY);
645MAKE_BUDGET_INFO(fsacs1, "Fujitsu Siemens Activy Budget-S PCI (rev AL/alps frontend)", BUDGET_FS_ACTIVY); 729MAKE_BUDGET_INFO(fsacs1, "Fujitsu Siemens Activy Budget-S PCI (rev AL/alps frontend)", BUDGET_FS_ACTIVY);
646MAKE_BUDGET_INFO(fsact, "Fujitsu Siemens Activy Budget-T PCI (rev GR/Grundig frontend)", BUDGET_FS_ACTIVY); 730MAKE_BUDGET_INFO(fsact, "Fujitsu Siemens Activy Budget-T PCI (rev GR/Grundig frontend)", BUDGET_FS_ACTIVY);
@@ -653,6 +737,7 @@ static struct pci_device_id pci_tbl[] = {
653 MAKE_EXTENSION_PCI(satel, 0x13c2, 0x1013), 737 MAKE_EXTENSION_PCI(satel, 0x13c2, 0x1013),
654 MAKE_EXTENSION_PCI(ttbs, 0x13c2, 0x1016), 738 MAKE_EXTENSION_PCI(ttbs, 0x13c2, 0x1016),
655 MAKE_EXTENSION_PCI(ttbs1401, 0x13c2, 0x1018), 739 MAKE_EXTENSION_PCI(ttbs1401, 0x13c2, 0x1018),
740 MAKE_EXTENSION_PCI(tt1600, 0x13c2, 0x101c),
656 MAKE_EXTENSION_PCI(fsacs1,0x1131, 0x4f60), 741 MAKE_EXTENSION_PCI(fsacs1,0x1131, 0x4f60),
657 MAKE_EXTENSION_PCI(fsacs0,0x1131, 0x4f61), 742 MAKE_EXTENSION_PCI(fsacs0,0x1131, 0x4f61),
658 MAKE_EXTENSION_PCI(fsact1, 0x1131, 0x5f60), 743 MAKE_EXTENSION_PCI(fsact1, 0x1131, 0x5f60),
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index 613576202294..ed9cd7ad0604 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -33,6 +33,10 @@
33 33
34 History: 34 History:
35 35
36 Version 0.46:
37 Removed usb_dsbr100_open/close calls and radio->users counter. Also,
38 radio->muted changed to radio->status and suspend/resume calls updated.
39
36 Version 0.45: 40 Version 0.45:
37 Converted to v4l2_device. 41 Converted to v4l2_device.
38 42
@@ -100,8 +104,8 @@
100 */ 104 */
101#include <linux/version.h> /* for KERNEL_VERSION MACRO */ 105#include <linux/version.h> /* for KERNEL_VERSION MACRO */
102 106
103#define DRIVER_VERSION "v0.45" 107#define DRIVER_VERSION "v0.46"
104#define RADIO_VERSION KERNEL_VERSION(0, 4, 5) 108#define RADIO_VERSION KERNEL_VERSION(0, 4, 6)
105 109
106#define DRIVER_AUTHOR "Markus Demleitner <msdemlei@tucana.harvard.edu>" 110#define DRIVER_AUTHOR "Markus Demleitner <msdemlei@tucana.harvard.edu>"
107#define DRIVER_DESC "D-Link DSB-R100 USB FM radio driver" 111#define DRIVER_DESC "D-Link DSB-R100 USB FM radio driver"
@@ -121,13 +125,15 @@ devices, that would be 76 and 91. */
121#define FREQ_MAX 108.0 125#define FREQ_MAX 108.0
122#define FREQ_MUL 16000 126#define FREQ_MUL 16000
123 127
128/* defines for radio->status */
129#define STARTED 0
130#define STOPPED 1
131
124#define videodev_to_radio(d) container_of(d, struct dsbr100_device, videodev) 132#define videodev_to_radio(d) container_of(d, struct dsbr100_device, videodev)
125 133
126static int usb_dsbr100_probe(struct usb_interface *intf, 134static int usb_dsbr100_probe(struct usb_interface *intf,
127 const struct usb_device_id *id); 135 const struct usb_device_id *id);
128static void usb_dsbr100_disconnect(struct usb_interface *intf); 136static void usb_dsbr100_disconnect(struct usb_interface *intf);
129static int usb_dsbr100_open(struct file *file);
130static int usb_dsbr100_close(struct file *file);
131static int usb_dsbr100_suspend(struct usb_interface *intf, 137static int usb_dsbr100_suspend(struct usb_interface *intf,
132 pm_message_t message); 138 pm_message_t message);
133static int usb_dsbr100_resume(struct usb_interface *intf); 139static int usb_dsbr100_resume(struct usb_interface *intf);
@@ -145,9 +151,8 @@ struct dsbr100_device {
145 struct mutex lock; /* buffer locking */ 151 struct mutex lock; /* buffer locking */
146 int curfreq; 152 int curfreq;
147 int stereo; 153 int stereo;
148 int users;
149 int removed; 154 int removed;
150 int muted; 155 int status;
151}; 156};
152 157
153static struct usb_device_id usb_dsbr100_device_table [] = { 158static struct usb_device_id usb_dsbr100_device_table [] = {
@@ -201,7 +206,7 @@ static int dsbr100_start(struct dsbr100_device *radio)
201 goto usb_control_msg_failed; 206 goto usb_control_msg_failed;
202 } 207 }
203 208
204 radio->muted = 0; 209 radio->status = STARTED;
205 mutex_unlock(&radio->lock); 210 mutex_unlock(&radio->lock);
206 return (radio->transfer_buffer)[0]; 211 return (radio->transfer_buffer)[0];
207 212
@@ -244,7 +249,7 @@ static int dsbr100_stop(struct dsbr100_device *radio)
244 goto usb_control_msg_failed; 249 goto usb_control_msg_failed;
245 } 250 }
246 251
247 radio->muted = 1; 252 radio->status = STOPPED;
248 mutex_unlock(&radio->lock); 253 mutex_unlock(&radio->lock);
249 return (radio->transfer_buffer)[0]; 254 return (radio->transfer_buffer)[0];
250 255
@@ -258,12 +263,12 @@ usb_control_msg_failed:
258} 263}
259 264
260/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */ 265/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
261static int dsbr100_setfreq(struct dsbr100_device *radio, int freq) 266static int dsbr100_setfreq(struct dsbr100_device *radio)
262{ 267{
263 int retval; 268 int retval;
264 int request; 269 int request;
270 int freq = (radio->curfreq / 16 * 80) / 1000 + 856;
265 271
266 freq = (freq / 16 * 80) / 1000 + 856;
267 mutex_lock(&radio->lock); 272 mutex_lock(&radio->lock);
268 273
269 retval = usb_control_msg(radio->usbdev, 274 retval = usb_control_msg(radio->usbdev,
@@ -431,7 +436,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
431 radio->curfreq = f->frequency; 436 radio->curfreq = f->frequency;
432 mutex_unlock(&radio->lock); 437 mutex_unlock(&radio->lock);
433 438
434 retval = dsbr100_setfreq(radio, radio->curfreq); 439 retval = dsbr100_setfreq(radio);
435 if (retval < 0) 440 if (retval < 0)
436 dev_warn(&radio->usbdev->dev, "Set frequency failed\n"); 441 dev_warn(&radio->usbdev->dev, "Set frequency failed\n");
437 return 0; 442 return 0;
@@ -473,7 +478,7 @@ static int vidioc_g_ctrl(struct file *file, void *priv,
473 478
474 switch (ctrl->id) { 479 switch (ctrl->id) {
475 case V4L2_CID_AUDIO_MUTE: 480 case V4L2_CID_AUDIO_MUTE:
476 ctrl->value = radio->muted; 481 ctrl->value = radio->status;
477 return 0; 482 return 0;
478 } 483 }
479 return -EINVAL; 484 return -EINVAL;
@@ -543,65 +548,27 @@ static int vidioc_s_audio(struct file *file, void *priv,
543 return 0; 548 return 0;
544} 549}
545 550
546static int usb_dsbr100_open(struct file *file)
547{
548 struct dsbr100_device *radio = video_drvdata(file);
549 int retval;
550
551 lock_kernel();
552 radio->users = 1;
553 radio->muted = 1;
554
555 retval = dsbr100_start(radio);
556 if (retval < 0) {
557 dev_warn(&radio->usbdev->dev,
558 "Radio did not start up properly\n");
559 radio->users = 0;
560 unlock_kernel();
561 return -EIO;
562 }
563
564 retval = dsbr100_setfreq(radio, radio->curfreq);
565 if (retval < 0)
566 dev_warn(&radio->usbdev->dev,
567 "set frequency failed\n");
568
569 unlock_kernel();
570 return 0;
571}
572
573static int usb_dsbr100_close(struct file *file)
574{
575 struct dsbr100_device *radio = video_drvdata(file);
576 int retval;
577
578 if (!radio)
579 return -ENODEV;
580
581 mutex_lock(&radio->lock);
582 radio->users = 0;
583 mutex_unlock(&radio->lock);
584
585 if (!radio->removed) {
586 retval = dsbr100_stop(radio);
587 if (retval < 0) {
588 dev_warn(&radio->usbdev->dev,
589 "dsbr100_stop failed\n");
590 }
591
592 }
593 return 0;
594}
595
596/* Suspend device - stop device. */ 551/* Suspend device - stop device. */
597static int usb_dsbr100_suspend(struct usb_interface *intf, pm_message_t message) 552static int usb_dsbr100_suspend(struct usb_interface *intf, pm_message_t message)
598{ 553{
599 struct dsbr100_device *radio = usb_get_intfdata(intf); 554 struct dsbr100_device *radio = usb_get_intfdata(intf);
600 int retval; 555 int retval;
601 556
602 retval = dsbr100_stop(radio); 557 if (radio->status == STARTED) {
603 if (retval < 0) 558 retval = dsbr100_stop(radio);
604 dev_warn(&intf->dev, "dsbr100_stop failed\n"); 559 if (retval < 0)
560 dev_warn(&intf->dev, "dsbr100_stop failed\n");
561
562 /* After dsbr100_stop() status set to STOPPED.
563 * If we want driver to start radio on resume
564 * we set status equal to STARTED.
565 * On resume we will check status and run radio if needed.
566 */
567
568 mutex_lock(&radio->lock);
569 radio->status = STARTED;
570 mutex_unlock(&radio->lock);
571 }
605 572
606 dev_info(&intf->dev, "going into suspend..\n"); 573 dev_info(&intf->dev, "going into suspend..\n");
607 574
@@ -614,9 +581,11 @@ static int usb_dsbr100_resume(struct usb_interface *intf)
614 struct dsbr100_device *radio = usb_get_intfdata(intf); 581 struct dsbr100_device *radio = usb_get_intfdata(intf);
615 int retval; 582 int retval;
616 583
617 retval = dsbr100_start(radio); 584 if (radio->status == STARTED) {
618 if (retval < 0) 585 retval = dsbr100_start(radio);
619 dev_warn(&intf->dev, "dsbr100_start failed\n"); 586 if (retval < 0)
587 dev_warn(&intf->dev, "dsbr100_start failed\n");
588 }
620 589
621 dev_info(&intf->dev, "coming out of suspend..\n"); 590 dev_info(&intf->dev, "coming out of suspend..\n");
622 591
@@ -636,8 +605,6 @@ static void usb_dsbr100_video_device_release(struct video_device *videodev)
636/* File system interface */ 605/* File system interface */
637static const struct v4l2_file_operations usb_dsbr100_fops = { 606static const struct v4l2_file_operations usb_dsbr100_fops = {
638 .owner = THIS_MODULE, 607 .owner = THIS_MODULE,
639 .open = usb_dsbr100_open,
640 .release = usb_dsbr100_close,
641 .ioctl = video_ioctl2, 608 .ioctl = video_ioctl2,
642}; 609};
643 610
@@ -695,9 +662,9 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
695 mutex_init(&radio->lock); 662 mutex_init(&radio->lock);
696 663
697 radio->removed = 0; 664 radio->removed = 0;
698 radio->users = 0;
699 radio->usbdev = interface_to_usbdev(intf); 665 radio->usbdev = interface_to_usbdev(intf);
700 radio->curfreq = FREQ_MIN * FREQ_MUL; 666 radio->curfreq = FREQ_MIN * FREQ_MUL;
667 radio->status = STOPPED;
701 668
702 video_set_drvdata(&radio->videodev, radio); 669 video_set_drvdata(&radio->videodev, radio);
703 670
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index cab19d05e02f..837467f93805 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -64,6 +64,7 @@
64#include <media/v4l2-ioctl.h> 64#include <media/v4l2-ioctl.h>
65#include <linux/usb.h> 65#include <linux/usb.h>
66#include <linux/version.h> /* for KERNEL_VERSION MACRO */ 66#include <linux/version.h> /* for KERNEL_VERSION MACRO */
67#include <linux/mutex.h>
67 68
68/* driver and module definitions */ 69/* driver and module definitions */
69#define DRIVER_AUTHOR "Alexey Klimov <klimov.linux@gmail.com>" 70#define DRIVER_AUTHOR "Alexey Klimov <klimov.linux@gmail.com>"
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 5cf6c45b91fe..49c4aab95dab 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -49,7 +49,6 @@ struct fmi
49 int io; 49 int io;
50 int curvol; /* 1 or 0 */ 50 int curvol; /* 1 or 0 */
51 unsigned long curfreq; /* freq in kHz */ 51 unsigned long curfreq; /* freq in kHz */
52 __u32 flags;
53 struct mutex lock; 52 struct mutex lock;
54}; 53};
55 54
@@ -57,7 +56,7 @@ static struct fmi fmi_card;
57static struct pnp_dev *dev; 56static struct pnp_dev *dev;
58 57
59/* freq is in 1/16 kHz to internal number, hw precision is 50 kHz */ 58/* freq is in 1/16 kHz to internal number, hw precision is 50 kHz */
60/* It is only useful to give freq in intervall of 800 (=0.05Mhz), 59/* It is only useful to give freq in interval of 800 (=0.05Mhz),
61 * other bits will be truncated, e.g 92.7400016 -> 92.7, but 60 * other bits will be truncated, e.g 92.7400016 -> 92.7, but
62 * 92.7400017 -> 92.75 61 * 92.7400017 -> 92.75
63 */ 62 */
@@ -142,7 +141,6 @@ static int vidioc_querycap(struct file *file, void *priv,
142static int vidioc_g_tuner(struct file *file, void *priv, 141static int vidioc_g_tuner(struct file *file, void *priv,
143 struct v4l2_tuner *v) 142 struct v4l2_tuner *v)
144{ 143{
145 int mult;
146 struct fmi *fmi = video_drvdata(file); 144 struct fmi *fmi = video_drvdata(file);
147 145
148 if (v->index > 0) 146 if (v->index > 0)
@@ -150,11 +148,10 @@ static int vidioc_g_tuner(struct file *file, void *priv,
150 148
151 strlcpy(v->name, "FM", sizeof(v->name)); 149 strlcpy(v->name, "FM", sizeof(v->name));
152 v->type = V4L2_TUNER_RADIO; 150 v->type = V4L2_TUNER_RADIO;
153 mult = (fmi->flags & V4L2_TUNER_CAP_LOW) ? 1 : 1000; 151 v->rangelow = RSF16_MINFREQ;
154 v->rangelow = RSF16_MINFREQ / mult; 152 v->rangehigh = RSF16_MAXFREQ;
155 v->rangehigh = RSF16_MAXFREQ / mult;
156 v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO; 153 v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
157 v->capability = fmi->flags & V4L2_TUNER_CAP_LOW; 154 v->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW;
158 v->audmode = V4L2_TUNER_MODE_STEREO; 155 v->audmode = V4L2_TUNER_MODE_STEREO;
159 v->signal = fmi_getsigstr(fmi); 156 v->signal = fmi_getsigstr(fmi);
160 return 0; 157 return 0;
@@ -171,8 +168,6 @@ static int vidioc_s_frequency(struct file *file, void *priv,
171{ 168{
172 struct fmi *fmi = video_drvdata(file); 169 struct fmi *fmi = video_drvdata(file);
173 170
174 if (!(fmi->flags & V4L2_TUNER_CAP_LOW))
175 f->frequency *= 1000;
176 if (f->frequency < RSF16_MINFREQ || 171 if (f->frequency < RSF16_MINFREQ ||
177 f->frequency > RSF16_MAXFREQ) 172 f->frequency > RSF16_MAXFREQ)
178 return -EINVAL; 173 return -EINVAL;
@@ -189,8 +184,6 @@ static int vidioc_g_frequency(struct file *file, void *priv,
189 184
190 f->type = V4L2_TUNER_RADIO; 185 f->type = V4L2_TUNER_RADIO;
191 f->frequency = fmi->curfreq; 186 f->frequency = fmi->curfreq;
192 if (!(fmi->flags & V4L2_TUNER_CAP_LOW))
193 f->frequency /= 1000;
194 return 0; 187 return 0;
195} 188}
196 189
@@ -347,7 +340,6 @@ static int __init fmi_init(void)
347 return res; 340 return res;
348 } 341 }
349 342
350 fmi->flags = V4L2_TUNER_CAP_LOW;
351 strlcpy(fmi->vdev.name, v4l2_dev->name, sizeof(fmi->vdev.name)); 343 strlcpy(fmi->vdev.name, v4l2_dev->name, sizeof(fmi->vdev.name));
352 fmi->vdev.v4l2_dev = v4l2_dev; 344 fmi->vdev.v4l2_dev = v4l2_dev;
353 fmi->vdev.fops = &fmi_fops; 345 fmi->vdev.fops = &fmi_fops;
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 935ff9bcdfcc..a11414f648d4 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -61,13 +61,12 @@ struct fmr2
61 int stereo; /* card is producing stereo audio */ 61 int stereo; /* card is producing stereo audio */
62 unsigned long curfreq; /* freq in kHz */ 62 unsigned long curfreq; /* freq in kHz */
63 int card_type; 63 int card_type;
64 u32 flags;
65}; 64};
66 65
67static struct fmr2 fmr2_card; 66static struct fmr2 fmr2_card;
68 67
69/* hw precision is 12.5 kHz 68/* hw precision is 12.5 kHz
70 * It is only useful to give freq in intervall of 200 (=0.0125Mhz), 69 * It is only useful to give freq in interval of 200 (=0.0125Mhz),
71 * other bits will be truncated 70 * other bits will be truncated
72 */ 71 */
73#define RSF16_ENCODE(x) ((x) / 200 + 856) 72#define RSF16_ENCODE(x) ((x) / 200 + 856)
@@ -221,7 +220,6 @@ static int vidioc_querycap(struct file *file, void *priv,
221static int vidioc_g_tuner(struct file *file, void *priv, 220static int vidioc_g_tuner(struct file *file, void *priv,
222 struct v4l2_tuner *v) 221 struct v4l2_tuner *v)
223{ 222{
224 int mult;
225 struct fmr2 *fmr2 = video_drvdata(file); 223 struct fmr2 *fmr2 = video_drvdata(file);
226 224
227 if (v->index > 0) 225 if (v->index > 0)
@@ -230,13 +228,12 @@ static int vidioc_g_tuner(struct file *file, void *priv,
230 strlcpy(v->name, "FM", sizeof(v->name)); 228 strlcpy(v->name, "FM", sizeof(v->name));
231 v->type = V4L2_TUNER_RADIO; 229 v->type = V4L2_TUNER_RADIO;
232 230
233 mult = (fmr2->flags & V4L2_TUNER_CAP_LOW) ? 1 : 1000; 231 v->rangelow = RSF16_MINFREQ;
234 v->rangelow = RSF16_MINFREQ / mult; 232 v->rangehigh = RSF16_MAXFREQ;
235 v->rangehigh = RSF16_MAXFREQ / mult; 233 v->rxsubchans = fmr2->stereo ? V4L2_TUNER_SUB_STEREO :
236 v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO; 234 V4L2_TUNER_SUB_MONO;
237 v->capability = fmr2->flags&V4L2_TUNER_CAP_LOW; 235 v->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW;
238 v->audmode = fmr2->stereo ? V4L2_TUNER_MODE_STEREO: 236 v->audmode = V4L2_TUNER_MODE_STEREO;
239 V4L2_TUNER_MODE_MONO;
240 mutex_lock(&fmr2->lock); 237 mutex_lock(&fmr2->lock);
241 v->signal = fmr2_getsigstr(fmr2); 238 v->signal = fmr2_getsigstr(fmr2);
242 mutex_unlock(&fmr2->lock); 239 mutex_unlock(&fmr2->lock);
@@ -254,8 +251,6 @@ static int vidioc_s_frequency(struct file *file, void *priv,
254{ 251{
255 struct fmr2 *fmr2 = video_drvdata(file); 252 struct fmr2 *fmr2 = video_drvdata(file);
256 253
257 if (!(fmr2->flags & V4L2_TUNER_CAP_LOW))
258 f->frequency *= 1000;
259 if (f->frequency < RSF16_MINFREQ || 254 if (f->frequency < RSF16_MINFREQ ||
260 f->frequency > RSF16_MAXFREQ) 255 f->frequency > RSF16_MAXFREQ)
261 return -EINVAL; 256 return -EINVAL;
@@ -279,8 +274,6 @@ static int vidioc_g_frequency(struct file *file, void *priv,
279 274
280 f->type = V4L2_TUNER_RADIO; 275 f->type = V4L2_TUNER_RADIO;
281 f->frequency = fmr2->curfreq; 276 f->frequency = fmr2->curfreq;
282 if (!(fmr2->flags & V4L2_TUNER_CAP_LOW))
283 f->frequency /= 1000;
284 return 0; 277 return 0;
285} 278}
286 279
@@ -406,7 +399,6 @@ static int __init fmr2_init(void)
406 strlcpy(v4l2_dev->name, "sf16fmr2", sizeof(v4l2_dev->name)); 399 strlcpy(v4l2_dev->name, "sf16fmr2", sizeof(v4l2_dev->name));
407 fmr2->io = io; 400 fmr2->io = io;
408 fmr2->stereo = 1; 401 fmr2->stereo = 1;
409 fmr2->flags = V4L2_TUNER_CAP_LOW;
410 mutex_init(&fmr2->lock); 402 mutex_init(&fmr2->lock);
411 403
412 if (!request_region(fmr2->io, 2, "sf16fmr2")) { 404 if (!request_region(fmr2->io, 2, "sf16fmr2")) {
diff --git a/drivers/media/radio/radio-si470x.c b/drivers/media/radio/radio-si470x.c
index bd945d04dc90..640421ceb24a 100644
--- a/drivers/media/radio/radio-si470x.c
+++ b/drivers/media/radio/radio-si470x.c
@@ -1214,7 +1214,6 @@ static int si470x_fops_release(struct file *file)
1214 usb_autopm_put_interface(radio->intf); 1214 usb_autopm_put_interface(radio->intf);
1215 } 1215 }
1216 1216
1217unlock:
1218 mutex_unlock(&radio->disconnect_lock); 1217 mutex_unlock(&radio->disconnect_lock);
1219 1218
1220done: 1219done:
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 57835f5715fc..94f440535c64 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -440,6 +440,24 @@ config VIDEO_ADV7175
440 To compile this driver as a module, choose M here: the 440 To compile this driver as a module, choose M here: the
441 module will be called adv7175. 441 module will be called adv7175.
442 442
443config VIDEO_THS7303
444 tristate "THS7303 Video Amplifier"
445 depends on I2C
446 help
447 Support for TI THS7303 video amplifier
448
449 To compile this driver as a module, choose M here: the
450 module will be called ths7303.
451
452config VIDEO_ADV7343
453 tristate "ADV7343 video encoder"
454 depends on I2C
455 help
456 Support for Analog Devices I2C bus based ADV7343 encoder.
457
458 To compile this driver as a module, choose M here: the
459 module will be called adv7343.
460
443comment "Video improvement chips" 461comment "Video improvement chips"
444 462
445config VIDEO_UPD64031A 463config VIDEO_UPD64031A
@@ -694,7 +712,7 @@ config VIDEO_CAFE_CCIC
694 712
695config SOC_CAMERA 713config SOC_CAMERA
696 tristate "SoC camera support" 714 tristate "SoC camera support"
697 depends on VIDEO_V4L2 && HAS_DMA 715 depends on VIDEO_V4L2 && HAS_DMA && I2C
698 select VIDEOBUF_GEN 716 select VIDEOBUF_GEN
699 help 717 help
700 SoC Camera is a common API to several cameras, not connecting 718 SoC Camera is a common API to several cameras, not connecting
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 3f1a0350a569..7fb3add1b387 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -12,6 +12,8 @@ omap2cam-objs := omap24xxcam.o omap24xxcam-dma.o
12 12
13videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o 13videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o
14 14
15# V4L2 core modules
16
15obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-int-device.o 17obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-int-device.o
16ifeq ($(CONFIG_COMPAT),y) 18ifeq ($(CONFIG_COMPAT),y)
17 obj-$(CONFIG_VIDEO_DEV) += v4l2-compat-ioctl32.o 19 obj-$(CONFIG_VIDEO_DEV) += v4l2-compat-ioctl32.o
@@ -23,21 +25,15 @@ ifeq ($(CONFIG_VIDEO_V4L1_COMPAT),y)
23 obj-$(CONFIG_VIDEO_DEV) += v4l1-compat.o 25 obj-$(CONFIG_VIDEO_DEV) += v4l1-compat.o
24endif 26endif
25 27
26obj-$(CONFIG_VIDEO_TUNER) += tuner.o 28# All i2c modules must come first:
27 29
28obj-$(CONFIG_VIDEO_BT848) += bt8xx/ 30obj-$(CONFIG_VIDEO_TUNER) += tuner.o
29obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
30obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o 31obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
31obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o 32obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
32obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o 33obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o
33
34obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o 34obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o
35obj-$(CONFIG_VIDEO_SAA5246A) += saa5246a.o 35obj-$(CONFIG_VIDEO_SAA5246A) += saa5246a.o
36obj-$(CONFIG_VIDEO_SAA5249) += saa5249.o 36obj-$(CONFIG_VIDEO_SAA5249) += saa5249.o
37obj-$(CONFIG_VIDEO_CQCAM) += c-qcam.o
38obj-$(CONFIG_VIDEO_BWQCAM) += bw-qcam.o
39obj-$(CONFIG_VIDEO_W9966) += w9966.o
40
41obj-$(CONFIG_VIDEO_TDA9840) += tda9840.o 37obj-$(CONFIG_VIDEO_TDA9840) += tda9840.o
42obj-$(CONFIG_VIDEO_TEA6415C) += tea6415c.o 38obj-$(CONFIG_VIDEO_TEA6415C) += tea6415c.o
43obj-$(CONFIG_VIDEO_TEA6420) += tea6420.o 39obj-$(CONFIG_VIDEO_TEA6420) += tea6420.o
@@ -49,16 +45,47 @@ obj-$(CONFIG_VIDEO_SAA7185) += saa7185.o
49obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o 45obj-$(CONFIG_VIDEO_SAA7191) += saa7191.o
50obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o 46obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
51obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o 47obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
48obj-$(CONFIG_VIDEO_ADV7343) += adv7343.o
52obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o 49obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
53obj-$(CONFIG_VIDEO_BT819) += bt819.o 50obj-$(CONFIG_VIDEO_BT819) += bt819.o
54obj-$(CONFIG_VIDEO_BT856) += bt856.o 51obj-$(CONFIG_VIDEO_BT856) += bt856.o
55obj-$(CONFIG_VIDEO_BT866) += bt866.o 52obj-$(CONFIG_VIDEO_BT866) += bt866.o
56obj-$(CONFIG_VIDEO_KS0127) += ks0127.o 53obj-$(CONFIG_VIDEO_KS0127) += ks0127.o
54obj-$(CONFIG_VIDEO_THS7303) += ths7303.o
55obj-$(CONFIG_VIDEO_VINO) += indycam.o
56obj-$(CONFIG_VIDEO_TVP5150) += tvp5150.o
57obj-$(CONFIG_VIDEO_TVP514X) += tvp514x.o
58obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
59obj-$(CONFIG_VIDEO_CS5345) += cs5345.o
60obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o
61obj-$(CONFIG_VIDEO_M52790) += m52790.o
62obj-$(CONFIG_VIDEO_TLV320AIC23B) += tlv320aic23b.o
63obj-$(CONFIG_VIDEO_WM8775) += wm8775.o
64obj-$(CONFIG_VIDEO_WM8739) += wm8739.o
65obj-$(CONFIG_VIDEO_VP27SMPX) += vp27smpx.o
66obj-$(CONFIG_VIDEO_CX25840) += cx25840/
67obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
68obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
69obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
70obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
71obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
57 72
58obj-$(CONFIG_VIDEO_ZORAN) += zoran/ 73obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
74obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o
75obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
76obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o
77obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o
78obj-$(CONFIG_SOC_CAMERA_TW9910) += tw9910.o
59 79
80# And now the v4l2 drivers:
81
82obj-$(CONFIG_VIDEO_BT848) += bt8xx/
83obj-$(CONFIG_VIDEO_ZORAN) += zoran/
84obj-$(CONFIG_VIDEO_CQCAM) += c-qcam.o
85obj-$(CONFIG_VIDEO_BWQCAM) += bw-qcam.o
86obj-$(CONFIG_VIDEO_W9966) += w9966.o
60obj-$(CONFIG_VIDEO_PMS) += pms.o 87obj-$(CONFIG_VIDEO_PMS) += pms.o
61obj-$(CONFIG_VIDEO_VINO) += vino.o indycam.o 88obj-$(CONFIG_VIDEO_VINO) += vino.o
62obj-$(CONFIG_VIDEO_STRADIS) += stradis.o 89obj-$(CONFIG_VIDEO_STRADIS) += stradis.o
63obj-$(CONFIG_VIDEO_CPIA) += cpia.o 90obj-$(CONFIG_VIDEO_CPIA) += cpia.o
64obj-$(CONFIG_VIDEO_CPIA_PP) += cpia_pp.o 91obj-$(CONFIG_VIDEO_CPIA_PP) += cpia_pp.o
@@ -69,17 +96,7 @@ obj-$(CONFIG_VIDEO_CX88) += cx88/
69obj-$(CONFIG_VIDEO_EM28XX) += em28xx/ 96obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
70obj-$(CONFIG_VIDEO_CX231XX) += cx231xx/ 97obj-$(CONFIG_VIDEO_CX231XX) += cx231xx/
71obj-$(CONFIG_VIDEO_USBVISION) += usbvision/ 98obj-$(CONFIG_VIDEO_USBVISION) += usbvision/
72obj-$(CONFIG_VIDEO_TVP5150) += tvp5150.o
73obj-$(CONFIG_VIDEO_TVP514X) += tvp514x.o
74obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/ 99obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
75obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
76obj-$(CONFIG_VIDEO_CS5345) += cs5345.o
77obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o
78obj-$(CONFIG_VIDEO_M52790) += m52790.o
79obj-$(CONFIG_VIDEO_TLV320AIC23B) += tlv320aic23b.o
80obj-$(CONFIG_VIDEO_WM8775) += wm8775.o
81obj-$(CONFIG_VIDEO_WM8739) += wm8739.o
82obj-$(CONFIG_VIDEO_VP27SMPX) += vp27smpx.o
83obj-$(CONFIG_VIDEO_OVCAMCHIP) += ovcamchip/ 100obj-$(CONFIG_VIDEO_OVCAMCHIP) += ovcamchip/
84obj-$(CONFIG_VIDEO_CPIA2) += cpia2/ 101obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
85obj-$(CONFIG_VIDEO_MXB) += mxb.o 102obj-$(CONFIG_VIDEO_MXB) += mxb.o
@@ -92,19 +109,12 @@ obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o
92obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o 109obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
93obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o 110obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o
94obj-$(CONFIG_VIDEO_BTCX) += btcx-risc.o 111obj-$(CONFIG_VIDEO_BTCX) += btcx-risc.o
95obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
96 112
97obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o 113obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
98 114
99obj-$(CONFIG_VIDEO_CX25840) += cx25840/
100obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
101obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
102obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o 115obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
103 116
104obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o 117obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o
105obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
106
107obj-$(CONFIG_VIDEO_TCM825X) += tcm825x.o
108 118
109obj-$(CONFIG_USB_DABUSB) += dabusb.o 119obj-$(CONFIG_USB_DABUSB) += dabusb.o
110obj-$(CONFIG_USB_OV511) += ov511.o 120obj-$(CONFIG_USB_OV511) += ov511.o
@@ -134,24 +144,21 @@ obj-$(CONFIG_VIDEO_CX18) += cx18/
134obj-$(CONFIG_VIDEO_VIVI) += vivi.o 144obj-$(CONFIG_VIDEO_VIVI) += vivi.o
135obj-$(CONFIG_VIDEO_CX23885) += cx23885/ 145obj-$(CONFIG_VIDEO_CX23885) += cx23885/
136 146
147obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
148obj-$(CONFIG_SOC_CAMERA) += soc_camera.o
149obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
150# soc-camera host drivers have to be linked after camera drivers
137obj-$(CONFIG_VIDEO_MX1) += mx1_camera.o 151obj-$(CONFIG_VIDEO_MX1) += mx1_camera.o
138obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o 152obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o
139obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o 153obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
140obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o 154obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
141obj-$(CONFIG_VIDEO_OMAP2) += omap2cam.o
142obj-$(CONFIG_SOC_CAMERA) += soc_camera.o
143obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
144obj-$(CONFIG_SOC_CAMERA_MT9M111) += mt9m111.o
145obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
146obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o
147obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o
148obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
149obj-$(CONFIG_SOC_CAMERA_TW9910) += tw9910.o
150 155
151obj-$(CONFIG_VIDEO_AU0828) += au0828/ 156obj-$(CONFIG_VIDEO_AU0828) += au0828/
152 157
153obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/ 158obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
154 159
160obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
161
155EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core 162EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
156EXTRA_CFLAGS += -Idrivers/media/dvb/frontends 163EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
157EXTRA_CFLAGS += -Idrivers/media/common/tuners 164EXTRA_CFLAGS += -Idrivers/media/common/tuners
diff --git a/drivers/media/video/adv7343.c b/drivers/media/video/adv7343.c
new file mode 100644
index 000000000000..30f5caf5dda5
--- /dev/null
+++ b/drivers/media/video/adv7343.c
@@ -0,0 +1,534 @@
1/*
2 * adv7343 - ADV7343 Video Encoder Driver
3 *
4 * The encoder hardware does not support SECAM.
5 *
6 * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation version 2.
11 *
12 * This program is distributed .as is. WITHOUT ANY WARRANTY of any
13 * kind, whether express or implied; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/ctype.h>
21#include <linux/i2c.h>
22#include <linux/device.h>
23#include <linux/delay.h>
24#include <linux/module.h>
25#include <linux/videodev2.h>
26#include <linux/uaccess.h>
27#include <linux/version.h>
28
29#include <media/adv7343.h>
30#include <media/v4l2-device.h>
31#include <media/v4l2-chip-ident.h>
32
33#include "adv7343_regs.h"
34
35MODULE_DESCRIPTION("ADV7343 video encoder driver");
36MODULE_LICENSE("GPL");
37
38static int debug;
39module_param(debug, int, 0644);
40MODULE_PARM_DESC(debug, "Debug level 0-1");
41
42struct adv7343_state {
43 struct v4l2_subdev sd;
44 u8 reg00;
45 u8 reg01;
46 u8 reg02;
47 u8 reg35;
48 u8 reg80;
49 u8 reg82;
50 int bright;
51 int hue;
52 int gain;
53 u32 output;
54 v4l2_std_id std;
55};
56
57static inline struct adv7343_state *to_state(struct v4l2_subdev *sd)
58{
59 return container_of(sd, struct adv7343_state, sd);
60}
61
62static inline int adv7343_write(struct v4l2_subdev *sd, u8 reg, u8 value)
63{
64 struct i2c_client *client = v4l2_get_subdevdata(sd);
65
66 return i2c_smbus_write_byte_data(client, reg, value);
67}
68
69static const u8 adv7343_init_reg_val[] = {
70 ADV7343_SOFT_RESET, ADV7343_SOFT_RESET_DEFAULT,
71 ADV7343_POWER_MODE_REG, ADV7343_POWER_MODE_REG_DEFAULT,
72
73 ADV7343_HD_MODE_REG1, ADV7343_HD_MODE_REG1_DEFAULT,
74 ADV7343_HD_MODE_REG2, ADV7343_HD_MODE_REG2_DEFAULT,
75 ADV7343_HD_MODE_REG3, ADV7343_HD_MODE_REG3_DEFAULT,
76 ADV7343_HD_MODE_REG4, ADV7343_HD_MODE_REG4_DEFAULT,
77 ADV7343_HD_MODE_REG5, ADV7343_HD_MODE_REG5_DEFAULT,
78 ADV7343_HD_MODE_REG6, ADV7343_HD_MODE_REG6_DEFAULT,
79 ADV7343_HD_MODE_REG7, ADV7343_HD_MODE_REG7_DEFAULT,
80
81 ADV7343_SD_MODE_REG1, ADV7343_SD_MODE_REG1_DEFAULT,
82 ADV7343_SD_MODE_REG2, ADV7343_SD_MODE_REG2_DEFAULT,
83 ADV7343_SD_MODE_REG3, ADV7343_SD_MODE_REG3_DEFAULT,
84 ADV7343_SD_MODE_REG4, ADV7343_SD_MODE_REG4_DEFAULT,
85 ADV7343_SD_MODE_REG5, ADV7343_SD_MODE_REG5_DEFAULT,
86 ADV7343_SD_MODE_REG6, ADV7343_SD_MODE_REG6_DEFAULT,
87 ADV7343_SD_MODE_REG7, ADV7343_SD_MODE_REG7_DEFAULT,
88 ADV7343_SD_MODE_REG8, ADV7343_SD_MODE_REG8_DEFAULT,
89
90 ADV7343_SD_HUE_REG, ADV7343_SD_HUE_REG_DEFAULT,
91 ADV7343_SD_CGMS_WSS0, ADV7343_SD_CGMS_WSS0_DEFAULT,
92 ADV7343_SD_BRIGHTNESS_WSS, ADV7343_SD_BRIGHTNESS_WSS_DEFAULT,
93};
94
95/*
96 * 2^32
97 * FSC(reg) = FSC (HZ) * --------
98 * 27000000
99 */
100static const struct adv7343_std_info stdinfo[] = {
101 {
102 /* FSC(Hz) = 3,579,545.45 Hz */
103 SD_STD_NTSC, 569408542, V4L2_STD_NTSC,
104 }, {
105 /* FSC(Hz) = 3,575,611.00 Hz */
106 SD_STD_PAL_M, 568782678, V4L2_STD_PAL_M,
107 }, {
108 /* FSC(Hz) = 3,582,056.00 */
109 SD_STD_PAL_N, 569807903, V4L2_STD_PAL_Nc,
110 }, {
111 /* FSC(Hz) = 4,433,618.75 Hz */
112 SD_STD_PAL_N, 705268427, V4L2_STD_PAL_N,
113 }, {
114 /* FSC(Hz) = 4,433,618.75 Hz */
115 SD_STD_PAL_BDGHI, 705268427, V4L2_STD_PAL,
116 }, {
117 /* FSC(Hz) = 4,433,618.75 Hz */
118 SD_STD_NTSC, 705268427, V4L2_STD_NTSC_443,
119 }, {
120 /* FSC(Hz) = 4,433,618.75 Hz */
121 SD_STD_PAL_M, 705268427, V4L2_STD_PAL_60,
122 },
123};
124
125static int adv7343_setstd(struct v4l2_subdev *sd, v4l2_std_id std)
126{
127 struct adv7343_state *state = to_state(sd);
128 struct adv7343_std_info *std_info;
129 int output_idx, num_std;
130 char *fsc_ptr;
131 u8 reg, val;
132 int err = 0;
133 int i = 0;
134
135 output_idx = state->output;
136
137 std_info = (struct adv7343_std_info *)stdinfo;
138 num_std = ARRAY_SIZE(stdinfo);
139
140 for (i = 0; i < num_std; i++) {
141 if (std_info[i].stdid & std)
142 break;
143 }
144
145 if (i == num_std) {
146 v4l2_dbg(1, debug, sd,
147 "Invalid std or std is not supported: %llx\n",
148 (unsigned long long)std);
149 return -EINVAL;
150 }
151
152 /* Set the standard */
153 val = state->reg80 & (~(SD_STD_MASK));
154 val |= std_info[i].standard_val3;
155 err = adv7343_write(sd, ADV7343_SD_MODE_REG1, val);
156 if (err < 0)
157 goto setstd_exit;
158
159 state->reg80 = val;
160
161 /* Configure the input mode register */
162 val = state->reg01 & (~((u8) INPUT_MODE_MASK));
163 val |= SD_INPUT_MODE;
164 err = adv7343_write(sd, ADV7343_MODE_SELECT_REG, val);
165 if (err < 0)
166 goto setstd_exit;
167
168 state->reg01 = val;
169
170 /* Program the sub carrier frequency registers */
171 fsc_ptr = (unsigned char *)&std_info[i].fsc_val;
172 reg = ADV7343_FSC_REG0;
173 for (i = 0; i < 4; i++, reg++, fsc_ptr++) {
174 err = adv7343_write(sd, reg, *fsc_ptr);
175 if (err < 0)
176 goto setstd_exit;
177 }
178
179 val = state->reg80;
180
181 /* Filter settings */
182 if (std & (V4L2_STD_NTSC | V4L2_STD_NTSC_443))
183 val &= 0x03;
184 else if (std & ~V4L2_STD_SECAM)
185 val |= 0x04;
186
187 err = adv7343_write(sd, ADV7343_SD_MODE_REG1, val);
188 if (err < 0)
189 goto setstd_exit;
190
191 state->reg80 = val;
192
193setstd_exit:
194 if (err != 0)
195 v4l2_err(sd, "Error setting std, write failed\n");
196
197 return err;
198}
199
200static int adv7343_setoutput(struct v4l2_subdev *sd, u32 output_type)
201{
202 struct adv7343_state *state = to_state(sd);
203 unsigned char val;
204 int err = 0;
205
206 if (output_type > ADV7343_SVIDEO_ID) {
207 v4l2_dbg(1, debug, sd,
208 "Invalid output type or output type not supported:%d\n",
209 output_type);
210 return -EINVAL;
211 }
212
213 /* Enable Appropriate DAC */
214 val = state->reg00 & 0x03;
215
216 if (output_type == ADV7343_COMPOSITE_ID)
217 val |= ADV7343_COMPOSITE_POWER_VALUE;
218 else if (output_type == ADV7343_COMPONENT_ID)
219 val |= ADV7343_COMPONENT_POWER_VALUE;
220 else
221 val |= ADV7343_SVIDEO_POWER_VALUE;
222
223 err = adv7343_write(sd, ADV7343_POWER_MODE_REG, val);
224 if (err < 0)
225 goto setoutput_exit;
226
227 state->reg00 = val;
228
229 /* Enable YUV output */
230 val = state->reg02 | YUV_OUTPUT_SELECT;
231 err = adv7343_write(sd, ADV7343_MODE_REG0, val);
232 if (err < 0)
233 goto setoutput_exit;
234
235 state->reg02 = val;
236
237 /* configure SD DAC Output 2 and SD DAC Output 1 bit to zero */
238 val = state->reg82 & (SD_DAC_1_DI & SD_DAC_2_DI);
239 err = adv7343_write(sd, ADV7343_SD_MODE_REG2, val);
240 if (err < 0)
241 goto setoutput_exit;
242
243 state->reg82 = val;
244
245 /* configure ED/HD Color DAC Swap and ED/HD RGB Input Enable bit to
246 * zero */
247 val = state->reg35 & (HD_RGB_INPUT_DI & HD_DAC_SWAP_DI);
248 err = adv7343_write(sd, ADV7343_HD_MODE_REG6, val);
249 if (err < 0)
250 goto setoutput_exit;
251
252 state->reg35 = val;
253
254setoutput_exit:
255 if (err != 0)
256 v4l2_err(sd, "Error setting output, write failed\n");
257
258 return err;
259}
260
261static int adv7343_log_status(struct v4l2_subdev *sd)
262{
263 struct adv7343_state *state = to_state(sd);
264
265 v4l2_info(sd, "Standard: %llx\n", (unsigned long long)state->std);
266 v4l2_info(sd, "Output: %s\n", (state->output == 0) ? "Composite" :
267 ((state->output == 1) ? "Component" : "S-Video"));
268 return 0;
269}
270
271static int adv7343_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
272{
273 switch (qc->id) {
274 case V4L2_CID_BRIGHTNESS:
275 return v4l2_ctrl_query_fill(qc, ADV7343_BRIGHTNESS_MIN,
276 ADV7343_BRIGHTNESS_MAX, 1,
277 ADV7343_BRIGHTNESS_DEF);
278 case V4L2_CID_HUE:
279 return v4l2_ctrl_query_fill(qc, ADV7343_HUE_MIN,
280 ADV7343_HUE_MAX, 1 ,
281 ADV7343_HUE_DEF);
282 case V4L2_CID_GAIN:
283 return v4l2_ctrl_query_fill(qc, ADV7343_GAIN_MIN,
284 ADV7343_GAIN_MAX, 1,
285 ADV7343_GAIN_DEF);
286 default:
287 break;
288 }
289
290 return 0;
291}
292
293static int adv7343_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
294{
295 struct adv7343_state *state = to_state(sd);
296 int err = 0;
297
298 switch (ctrl->id) {
299 case V4L2_CID_BRIGHTNESS:
300 if (ctrl->value < ADV7343_BRIGHTNESS_MIN ||
301 ctrl->value > ADV7343_BRIGHTNESS_MAX) {
302 v4l2_dbg(1, debug, sd,
303 "invalid brightness settings %d\n",
304 ctrl->value);
305 return -ERANGE;
306 }
307
308 state->bright = ctrl->value;
309 err = adv7343_write(sd, ADV7343_SD_BRIGHTNESS_WSS,
310 state->bright);
311 break;
312
313 case V4L2_CID_HUE:
314 if (ctrl->value < ADV7343_HUE_MIN ||
315 ctrl->value > ADV7343_HUE_MAX) {
316 v4l2_dbg(1, debug, sd, "invalid hue settings %d\n",
317 ctrl->value);
318 return -ERANGE;
319 }
320
321 state->hue = ctrl->value;
322 err = adv7343_write(sd, ADV7343_SD_HUE_REG, state->hue);
323 break;
324
325 case V4L2_CID_GAIN:
326 if (ctrl->value < ADV7343_GAIN_MIN ||
327 ctrl->value > ADV7343_GAIN_MAX) {
328 v4l2_dbg(1, debug, sd, "invalid gain settings %d\n",
329 ctrl->value);
330 return -ERANGE;
331 }
332
333 if ((ctrl->value > POSITIVE_GAIN_MAX) &&
334 (ctrl->value < NEGATIVE_GAIN_MIN)) {
335 v4l2_dbg(1, debug, sd,
336 "gain settings not within the specified range\n");
337 return -ERANGE;
338 }
339
340 state->gain = ctrl->value;
341 err = adv7343_write(sd, ADV7343_DAC2_OUTPUT_LEVEL, state->gain);
342 break;
343
344 default:
345 return -EINVAL;
346 }
347
348 if (err < 0)
349 v4l2_err(sd, "Failed to set the encoder controls\n");
350
351 return err;
352}
353
354static int adv7343_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
355{
356 struct adv7343_state *state = to_state(sd);
357
358 switch (ctrl->id) {
359 case V4L2_CID_BRIGHTNESS:
360 ctrl->value = state->bright;
361 break;
362
363 case V4L2_CID_HUE:
364 ctrl->value = state->hue;
365 break;
366
367 case V4L2_CID_GAIN:
368 ctrl->value = state->gain;
369 break;
370
371 default:
372 return -EINVAL;
373 }
374
375 return 0;
376}
377
378static int adv7343_g_chip_ident(struct v4l2_subdev *sd,
379 struct v4l2_dbg_chip_ident *chip)
380{
381 struct i2c_client *client = v4l2_get_subdevdata(sd);
382
383 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7343, 0);
384}
385
386static const struct v4l2_subdev_core_ops adv7343_core_ops = {
387 .log_status = adv7343_log_status,
388 .g_chip_ident = adv7343_g_chip_ident,
389 .g_ctrl = adv7343_g_ctrl,
390 .s_ctrl = adv7343_s_ctrl,
391 .queryctrl = adv7343_queryctrl,
392};
393
394static int adv7343_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
395{
396 struct adv7343_state *state = to_state(sd);
397 int err = 0;
398
399 if (state->std == std)
400 return 0;
401
402 err = adv7343_setstd(sd, std);
403 if (!err)
404 state->std = std;
405
406 return err;
407}
408
409static int adv7343_s_routing(struct v4l2_subdev *sd,
410 u32 input, u32 output, u32 config)
411{
412 struct adv7343_state *state = to_state(sd);
413 int err = 0;
414
415 if (state->output == output)
416 return 0;
417
418 err = adv7343_setoutput(sd, output);
419 if (!err)
420 state->output = output;
421
422 return err;
423}
424
425static const struct v4l2_subdev_video_ops adv7343_video_ops = {
426 .s_std_output = adv7343_s_std_output,
427 .s_routing = adv7343_s_routing,
428};
429
430static const struct v4l2_subdev_ops adv7343_ops = {
431 .core = &adv7343_core_ops,
432 .video = &adv7343_video_ops,
433};
434
435static int adv7343_initialize(struct v4l2_subdev *sd)
436{
437 struct adv7343_state *state = to_state(sd);
438 int err = 0;
439 int i;
440
441 for (i = 0; i < ARRAY_SIZE(adv7343_init_reg_val); i += 2) {
442
443 err = adv7343_write(sd, adv7343_init_reg_val[i],
444 adv7343_init_reg_val[i+1]);
445 if (err) {
446 v4l2_err(sd, "Error initializing\n");
447 return err;
448 }
449 }
450
451 /* Configure for default video standard */
452 err = adv7343_setoutput(sd, state->output);
453 if (err < 0) {
454 v4l2_err(sd, "Error setting output during init\n");
455 return -EINVAL;
456 }
457
458 err = adv7343_setstd(sd, state->std);
459 if (err < 0) {
460 v4l2_err(sd, "Error setting std during init\n");
461 return -EINVAL;
462 }
463
464 return err;
465}
466
467static int adv7343_probe(struct i2c_client *client,
468 const struct i2c_device_id *id)
469{
470 struct adv7343_state *state;
471
472 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
473 return -ENODEV;
474
475 v4l_info(client, "chip found @ 0x%x (%s)\n",
476 client->addr << 1, client->adapter->name);
477
478 state = kzalloc(sizeof(struct adv7343_state), GFP_KERNEL);
479 if (state == NULL)
480 return -ENOMEM;
481
482 state->reg00 = 0x80;
483 state->reg01 = 0x00;
484 state->reg02 = 0x20;
485 state->reg35 = 0x00;
486 state->reg80 = ADV7343_SD_MODE_REG1_DEFAULT;
487 state->reg82 = ADV7343_SD_MODE_REG2_DEFAULT;
488
489 state->output = ADV7343_COMPOSITE_ID;
490 state->std = V4L2_STD_NTSC;
491
492 v4l2_i2c_subdev_init(&state->sd, client, &adv7343_ops);
493 return adv7343_initialize(&state->sd);
494}
495
496static int adv7343_remove(struct i2c_client *client)
497{
498 struct v4l2_subdev *sd = i2c_get_clientdata(client);
499
500 v4l2_device_unregister_subdev(sd);
501 kfree(to_state(sd));
502
503 return 0;
504}
505
506static const struct i2c_device_id adv7343_id[] = {
507 {"adv7343", 0},
508 {},
509};
510
511MODULE_DEVICE_TABLE(i2c, adv7343_id);
512
513static struct i2c_driver adv7343_driver = {
514 .driver = {
515 .owner = THIS_MODULE,
516 .name = "adv7343",
517 },
518 .probe = adv7343_probe,
519 .remove = adv7343_remove,
520 .id_table = adv7343_id,
521};
522
523static __init int init_adv7343(void)
524{
525 return i2c_add_driver(&adv7343_driver);
526}
527
528static __exit void exit_adv7343(void)
529{
530 i2c_del_driver(&adv7343_driver);
531}
532
533module_init(init_adv7343);
534module_exit(exit_adv7343);
diff --git a/drivers/media/video/adv7343_regs.h b/drivers/media/video/adv7343_regs.h
new file mode 100644
index 000000000000..3431045b33da
--- /dev/null
+++ b/drivers/media/video/adv7343_regs.h
@@ -0,0 +1,185 @@
1/*
2 * ADV7343 encoder related structure and register definitions
3 *
4 * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed .as is. WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef ADV7343_REG_H
17#define ADV7343_REGS_H
18
19struct adv7343_std_info {
20 u32 standard_val3;
21 u32 fsc_val;
22 v4l2_std_id stdid;
23};
24
25/* Register offset macros */
26#define ADV7343_POWER_MODE_REG (0x00)
27#define ADV7343_MODE_SELECT_REG (0x01)
28#define ADV7343_MODE_REG0 (0x02)
29
30#define ADV7343_DAC2_OUTPUT_LEVEL (0x0b)
31
32#define ADV7343_SOFT_RESET (0x17)
33
34#define ADV7343_HD_MODE_REG1 (0x30)
35#define ADV7343_HD_MODE_REG2 (0x31)
36#define ADV7343_HD_MODE_REG3 (0x32)
37#define ADV7343_HD_MODE_REG4 (0x33)
38#define ADV7343_HD_MODE_REG5 (0x34)
39#define ADV7343_HD_MODE_REG6 (0x35)
40
41#define ADV7343_HD_MODE_REG7 (0x39)
42
43#define ADV7343_SD_MODE_REG1 (0x80)
44#define ADV7343_SD_MODE_REG2 (0x82)
45#define ADV7343_SD_MODE_REG3 (0x83)
46#define ADV7343_SD_MODE_REG4 (0x84)
47#define ADV7343_SD_MODE_REG5 (0x86)
48#define ADV7343_SD_MODE_REG6 (0x87)
49#define ADV7343_SD_MODE_REG7 (0x88)
50#define ADV7343_SD_MODE_REG8 (0x89)
51
52#define ADV7343_FSC_REG0 (0x8C)
53#define ADV7343_FSC_REG1 (0x8D)
54#define ADV7343_FSC_REG2 (0x8E)
55#define ADV7343_FSC_REG3 (0x8F)
56
57#define ADV7343_SD_CGMS_WSS0 (0x99)
58
59#define ADV7343_SD_HUE_REG (0xA0)
60#define ADV7343_SD_BRIGHTNESS_WSS (0xA1)
61
62/* Default values for the registers */
63#define ADV7343_POWER_MODE_REG_DEFAULT (0x10)
64#define ADV7343_HD_MODE_REG1_DEFAULT (0x3C) /* Changed Default
65 720p EAVSAV code*/
66#define ADV7343_HD_MODE_REG2_DEFAULT (0x01) /* Changed Pixel data
67 valid */
68#define ADV7343_HD_MODE_REG3_DEFAULT (0x00) /* Color delay 0 clks */
69#define ADV7343_HD_MODE_REG4_DEFAULT (0xE8) /* Changed */
70#define ADV7343_HD_MODE_REG5_DEFAULT (0x08)
71#define ADV7343_HD_MODE_REG6_DEFAULT (0x00)
72#define ADV7343_HD_MODE_REG7_DEFAULT (0x00)
73#define ADV7343_SD_MODE_REG8_DEFAULT (0x00)
74#define ADV7343_SOFT_RESET_DEFAULT (0x02)
75#define ADV7343_COMPOSITE_POWER_VALUE (0x80)
76#define ADV7343_COMPONENT_POWER_VALUE (0x1C)
77#define ADV7343_SVIDEO_POWER_VALUE (0x60)
78#define ADV7343_SD_HUE_REG_DEFAULT (127)
79#define ADV7343_SD_BRIGHTNESS_WSS_DEFAULT (0x03)
80
81#define ADV7343_SD_CGMS_WSS0_DEFAULT (0x10)
82
83#define ADV7343_SD_MODE_REG1_DEFAULT (0x00)
84#define ADV7343_SD_MODE_REG2_DEFAULT (0xC9)
85#define ADV7343_SD_MODE_REG3_DEFAULT (0x10)
86#define ADV7343_SD_MODE_REG4_DEFAULT (0x01)
87#define ADV7343_SD_MODE_REG5_DEFAULT (0x02)
88#define ADV7343_SD_MODE_REG6_DEFAULT (0x0C)
89#define ADV7343_SD_MODE_REG7_DEFAULT (0x04)
90#define ADV7343_SD_MODE_REG8_DEFAULT (0x00)
91
92/* Bit masks for Mode Select Register */
93#define INPUT_MODE_MASK (0x70)
94#define SD_INPUT_MODE (0x00)
95#define HD_720P_INPUT_MODE (0x10)
96#define HD_1080I_INPUT_MODE (0x10)
97
98/* Bit masks for Mode Register 0 */
99#define TEST_PATTERN_BLACK_BAR_EN (0x04)
100#define YUV_OUTPUT_SELECT (0x20)
101#define RGB_OUTPUT_SELECT (0xDF)
102
103/* Bit masks for DAC output levels */
104#define DAC_OUTPUT_LEVEL_MASK (0xFF)
105#define POSITIVE_GAIN_MAX (0x40)
106#define POSITIVE_GAIN_MIN (0x00)
107#define NEGATIVE_GAIN_MAX (0xFF)
108#define NEGATIVE_GAIN_MIN (0xC0)
109
110/* Bit masks for soft reset register */
111#define SOFT_RESET (0x02)
112
113/* Bit masks for HD Mode Register 1 */
114#define OUTPUT_STD_MASK (0x03)
115#define OUTPUT_STD_SHIFT (0)
116#define OUTPUT_STD_EIA0_2 (0x00)
117#define OUTPUT_STD_EIA0_1 (0x01)
118#define OUTPUT_STD_FULL (0x02)
119#define EMBEDDED_SYNC (0x04)
120#define EXTERNAL_SYNC (0xFB)
121#define STD_MODE_SHIFT (3)
122#define STD_MODE_MASK (0x1F)
123#define STD_MODE_720P (0x05)
124#define STD_MODE_720P_25 (0x08)
125#define STD_MODE_720P_30 (0x07)
126#define STD_MODE_720P_50 (0x06)
127#define STD_MODE_1080I (0x0D)
128#define STD_MODE_1080I_25fps (0x0E)
129#define STD_MODE_1080P_24 (0x12)
130#define STD_MODE_1080P_25 (0x10)
131#define STD_MODE_1080P_30 (0x0F)
132#define STD_MODE_525P (0x00)
133#define STD_MODE_625P (0x03)
134
135/* Bit masks for SD Mode Register 1 */
136#define SD_STD_MASK (0x03)
137#define SD_STD_NTSC (0x00)
138#define SD_STD_PAL_BDGHI (0x01)
139#define SD_STD_PAL_M (0x02)
140#define SD_STD_PAL_N (0x03)
141#define SD_LUMA_FLTR_MASK (0x7)
142#define SD_LUMA_FLTR_SHIFT (0x2)
143#define SD_CHROMA_FLTR_MASK (0x7)
144#define SD_CHROMA_FLTR_SHIFT (0x5)
145
146/* Bit masks for SD Mode Register 2 */
147#define SD_PBPR_SSAF_EN (0x01)
148#define SD_PBPR_SSAF_DI (0xFE)
149#define SD_DAC_1_DI (0xFD)
150#define SD_DAC_2_DI (0xFB)
151#define SD_PEDESTAL_EN (0x08)
152#define SD_PEDESTAL_DI (0xF7)
153#define SD_SQUARE_PIXEL_EN (0x10)
154#define SD_SQUARE_PIXEL_DI (0xEF)
155#define SD_PIXEL_DATA_VALID (0x40)
156#define SD_ACTIVE_EDGE_EN (0x80)
157#define SD_ACTIVE_EDGE_DI (0x7F)
158
159/* Bit masks for HD Mode Register 6 */
160#define HD_RGB_INPUT_EN (0x02)
161#define HD_RGB_INPUT_DI (0xFD)
162#define HD_PBPR_SYNC_EN (0x04)
163#define HD_PBPR_SYNC_DI (0xFB)
164#define HD_DAC_SWAP_EN (0x08)
165#define HD_DAC_SWAP_DI (0xF7)
166#define HD_GAMMA_CURVE_A (0xEF)
167#define HD_GAMMA_CURVE_B (0x10)
168#define HD_GAMMA_EN (0x20)
169#define HD_GAMMA_DI (0xDF)
170#define HD_ADPT_FLTR_MODEB (0x40)
171#define HD_ADPT_FLTR_MODEA (0xBF)
172#define HD_ADPT_FLTR_EN (0x80)
173#define HD_ADPT_FLTR_DI (0x7F)
174
175#define ADV7343_BRIGHTNESS_MAX (127)
176#define ADV7343_BRIGHTNESS_MIN (0)
177#define ADV7343_BRIGHTNESS_DEF (3)
178#define ADV7343_HUE_MAX (255)
179#define ADV7343_HUE_MIN (0)
180#define ADV7343_HUE_DEF (127)
181#define ADV7343_GAIN_MAX (255)
182#define ADV7343_GAIN_MIN (0)
183#define ADV7343_GAIN_DEF (0)
184
185#endif
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 053bbe8c8e3a..830c4a933f63 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -136,9 +136,9 @@ int au0828_tuner_callback(void *priv, int component, int command, int arg)
136 /* Tuner Reset Command from xc5000 */ 136 /* Tuner Reset Command from xc5000 */
137 /* Drive the tuner into reset and out */ 137 /* Drive the tuner into reset and out */
138 au0828_clear(dev, REG_001, 2); 138 au0828_clear(dev, REG_001, 2);
139 mdelay(200); 139 mdelay(10);
140 au0828_set(dev, REG_001, 2); 140 au0828_set(dev, REG_001, 2);
141 mdelay(50); 141 mdelay(10);
142 return 0; 142 return 0;
143 } else { 143 } else {
144 printk(KERN_ERR 144 printk(KERN_ERR
diff --git a/drivers/media/video/au0828/au0828-core.c b/drivers/media/video/au0828/au0828-core.c
index a1e4c0d769a6..3544a2f12f13 100644
--- a/drivers/media/video/au0828/au0828-core.c
+++ b/drivers/media/video/au0828/au0828-core.c
@@ -36,6 +36,11 @@ int au0828_debug;
36module_param_named(debug, au0828_debug, int, 0644); 36module_param_named(debug, au0828_debug, int, 0644);
37MODULE_PARM_DESC(debug, "enable debug messages"); 37MODULE_PARM_DESC(debug, "enable debug messages");
38 38
39static unsigned int disable_usb_speed_check;
40module_param(disable_usb_speed_check, int, 0444);
41MODULE_PARM_DESC(disable_usb_speed_check,
42 "override min bandwidth requirement of 480M bps");
43
39#define _AU0828_BULKPIPE 0x03 44#define _AU0828_BULKPIPE 0x03
40#define _BULKPIPESIZE 0xffff 45#define _BULKPIPESIZE 0xffff
41 46
@@ -181,6 +186,18 @@ static int au0828_usb_probe(struct usb_interface *interface,
181 le16_to_cpu(usbdev->descriptor.idProduct), 186 le16_to_cpu(usbdev->descriptor.idProduct),
182 ifnum); 187 ifnum);
183 188
189 /*
190 * Make sure we have 480 Mbps of bandwidth, otherwise things like
191 * video stream wouldn't likely work, since 12 Mbps is generally
192 * not enough even for most Digital TV streams.
193 */
194 if (usbdev->speed != USB_SPEED_HIGH && disable_usb_speed_check == 0) {
195 printk(KERN_ERR "au0828: Device initialization failed.\n");
196 printk(KERN_ERR "au0828: Device must be connected to a "
197 "high-speed USB 2.0 port.\n");
198 return -ENODEV;
199 }
200
184 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 201 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
185 if (dev == NULL) { 202 if (dev == NULL) {
186 printk(KERN_ERR "%s() Unable to allocate memory\n", __func__); 203 printk(KERN_ERR "%s() Unable to allocate memory\n", __func__);
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index 27bedc6c7791..51527d7b55a7 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -829,6 +829,9 @@ static int au0828_v4l2_close(struct file *filp)
829 829
830 au0828_uninit_isoc(dev); 830 au0828_uninit_isoc(dev);
831 831
832 /* Save some power by putting tuner to sleep */
833 v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_standby);
834
832 /* When close the device, set the usb intf0 into alt0 to free 835 /* When close the device, set the usb intf0 into alt0 to free
833 USB bandwidth */ 836 USB bandwidth */
834 ret = usb_set_interface(dev->usbdev, 0, 0); 837 ret = usb_set_interface(dev->usbdev, 0, 0);
@@ -910,11 +913,6 @@ static int au0828_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
910 913
911 rc = videobuf_mmap_mapper(&fh->vb_vidq, vma); 914 rc = videobuf_mmap_mapper(&fh->vb_vidq, vma);
912 915
913 dprintk(2, "vma start=0x%08lx, size=%ld, ret=%d\n",
914 (unsigned long)vma->vm_start,
915 (unsigned long)vma->vm_end-(unsigned long)vma->vm_start,
916 rc);
917
918 return rc; 916 return rc;
919} 917}
920 918
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 23b7499b3185..5eb1464af670 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -3152,6 +3152,7 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait)
3152 struct bttv_fh *fh = file->private_data; 3152 struct bttv_fh *fh = file->private_data;
3153 struct bttv_buffer *buf; 3153 struct bttv_buffer *buf;
3154 enum v4l2_field field; 3154 enum v4l2_field field;
3155 unsigned int rc = POLLERR;
3155 3156
3156 if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) { 3157 if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
3157 if (!check_alloc_btres(fh->btv,fh,RESOURCE_VBI)) 3158 if (!check_alloc_btres(fh->btv,fh,RESOURCE_VBI))
@@ -3160,9 +3161,10 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait)
3160 } 3161 }
3161 3162
3162 if (check_btres(fh,RESOURCE_VIDEO_STREAM)) { 3163 if (check_btres(fh,RESOURCE_VIDEO_STREAM)) {
3164 mutex_lock(&fh->cap.vb_lock);
3163 /* streaming capture */ 3165 /* streaming capture */
3164 if (list_empty(&fh->cap.stream)) 3166 if (list_empty(&fh->cap.stream))
3165 return POLLERR; 3167 goto err;
3166 buf = list_entry(fh->cap.stream.next,struct bttv_buffer,vb.stream); 3168 buf = list_entry(fh->cap.stream.next,struct bttv_buffer,vb.stream);
3167 } else { 3169 } else {
3168 /* read() capture */ 3170 /* read() capture */
@@ -3191,11 +3193,12 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait)
3191 poll_wait(file, &buf->vb.done, wait); 3193 poll_wait(file, &buf->vb.done, wait);
3192 if (buf->vb.state == VIDEOBUF_DONE || 3194 if (buf->vb.state == VIDEOBUF_DONE ||
3193 buf->vb.state == VIDEOBUF_ERROR) 3195 buf->vb.state == VIDEOBUF_ERROR)
3194 return POLLIN|POLLRDNORM; 3196 rc = POLLIN|POLLRDNORM;
3195 return 0; 3197 else
3198 rc = 0;
3196err: 3199err:
3197 mutex_unlock(&fh->cap.vb_lock); 3200 mutex_unlock(&fh->cap.vb_lock);
3198 return POLLERR; 3201 return rc;
3199} 3202}
3200 3203
3201static int bttv_open(struct file *file) 3204static int bttv_open(struct file *file)
@@ -4166,7 +4169,6 @@ static struct video_device *vdev_init(struct bttv *btv,
4166 if (NULL == vfd) 4169 if (NULL == vfd)
4167 return NULL; 4170 return NULL;
4168 *vfd = *template; 4171 *vfd = *template;
4169 vfd->minor = -1;
4170 vfd->v4l2_dev = &btv->c.v4l2_dev; 4172 vfd->v4l2_dev = &btv->c.v4l2_dev;
4171 vfd->release = video_device_release; 4173 vfd->release = video_device_release;
4172 vfd->debug = bttv_debug; 4174 vfd->debug = bttv_debug;
@@ -4629,7 +4631,7 @@ static int __init bttv_init_module(void)
4629#endif 4631#endif
4630 if (gbuffers < 2 || gbuffers > VIDEO_MAX_FRAME) 4632 if (gbuffers < 2 || gbuffers > VIDEO_MAX_FRAME)
4631 gbuffers = 2; 4633 gbuffers = 2;
4632 if (gbufsize < 0 || gbufsize > BTTV_MAX_FBUF) 4634 if (gbufsize > BTTV_MAX_FBUF)
4633 gbufsize = BTTV_MAX_FBUF; 4635 gbufsize = BTTV_MAX_FBUF;
4634 gbufsize = (gbufsize + PAGE_SIZE - 1) & PAGE_MASK; 4636 gbufsize = (gbufsize + PAGE_SIZE - 1) & PAGE_MASK;
4635 if (bttv_verbose) 4637 if (bttv_verbose)
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index a99d92fac3dc..ebd1ee9dc871 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -389,6 +389,27 @@ int __devinit init_bttv_i2c(struct bttv *btv)
389 } 389 }
390 if (0 == btv->i2c_rc && i2c_scan) 390 if (0 == btv->i2c_rc && i2c_scan)
391 do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client); 391 do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client);
392
393 /* Instantiate the IR receiver device, if present */
394 if (0 == btv->i2c_rc) {
395 struct i2c_board_info info;
396 /* The external IR receiver is at i2c address 0x34 (0x35 for
397 reads). Future Hauppauge cards will have an internal
398 receiver at 0x30 (0x31 for reads). In theory, both can be
399 fitted, and Hauppauge suggest an external overrides an
400 internal.
401
402 That's why we probe 0x1a (~0x34) first. CB
403 */
404 const unsigned short addr_list[] = {
405 0x1a, 0x18, 0x4b, 0x64, 0x30,
406 I2C_CLIENT_END
407 };
408
409 memset(&info, 0, sizeof(struct i2c_board_info));
410 strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
411 i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list);
412 }
392 return btv->i2c_rc; 413 return btv->i2c_rc;
393} 414}
394 415
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index d4099f5312ac..0b4a8f309cfa 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -1064,7 +1064,7 @@ static int ioctl_querymenu(void *arg,struct camera_data *cam)
1064 1064
1065 switch(m->id) { 1065 switch(m->id) {
1066 case CPIA2_CID_FLICKER_MODE: 1066 case CPIA2_CID_FLICKER_MODE:
1067 if(m->index < 0 || m->index >= NUM_FLICKER_CONTROLS) 1067 if (m->index >= NUM_FLICKER_CONTROLS)
1068 return -EINVAL; 1068 return -EINVAL;
1069 1069
1070 strcpy(m->name, flicker_controls[m->index].name); 1070 strcpy(m->name, flicker_controls[m->index].name);
@@ -1082,14 +1082,14 @@ static int ioctl_querymenu(void *arg,struct camera_data *cam)
1082 maximum = i; 1082 maximum = i;
1083 } 1083 }
1084 } 1084 }
1085 if(m->index < 0 || m->index > maximum) 1085 if (m->index > maximum)
1086 return -EINVAL; 1086 return -EINVAL;
1087 1087
1088 strcpy(m->name, framerate_controls[m->index].name); 1088 strcpy(m->name, framerate_controls[m->index].name);
1089 break; 1089 break;
1090 } 1090 }
1091 case CPIA2_CID_LIGHTS: 1091 case CPIA2_CID_LIGHTS:
1092 if(m->index < 0 || m->index >= NUM_LIGHTS_CONTROLS) 1092 if (m->index >= NUM_LIGHTS_CONTROLS)
1093 return -EINVAL; 1093 return -EINVAL;
1094 1094
1095 strcpy(m->name, lights_controls[m->index].name); 1095 strcpy(m->name, lights_controls[m->index].name);
diff --git a/drivers/media/video/cx18/cx18-audio.c b/drivers/media/video/cx18/cx18-audio.c
index 7a8ad5963de8..35268923911c 100644
--- a/drivers/media/video/cx18/cx18-audio.c
+++ b/drivers/media/video/cx18/cx18-audio.c
@@ -26,14 +26,18 @@
26#include "cx18-cards.h" 26#include "cx18-cards.h"
27#include "cx18-audio.h" 27#include "cx18-audio.h"
28 28
29#define CX18_AUDIO_ENABLE 0xc72014 29#define CX18_AUDIO_ENABLE 0xc72014
30#define CX18_AI1_MUX_MASK 0x30
31#define CX18_AI1_MUX_I2S1 0x00
32#define CX18_AI1_MUX_I2S2 0x10
33#define CX18_AI1_MUX_843_I2S 0x20
30 34
31/* Selects the audio input and output according to the current 35/* Selects the audio input and output according to the current
32 settings. */ 36 settings. */
33int cx18_audio_set_io(struct cx18 *cx) 37int cx18_audio_set_io(struct cx18 *cx)
34{ 38{
35 const struct cx18_card_audio_input *in; 39 const struct cx18_card_audio_input *in;
36 u32 val; 40 u32 u, v;
37 int err; 41 int err;
38 42
39 /* Determine which input to use */ 43 /* Determine which input to use */
@@ -52,9 +56,37 @@ int cx18_audio_set_io(struct cx18 *cx)
52 return err; 56 return err;
53 57
54 /* FIXME - this internal mux should be abstracted to a subdev */ 58 /* FIXME - this internal mux should be abstracted to a subdev */
55 val = cx18_read_reg(cx, CX18_AUDIO_ENABLE) & ~0x30; 59 u = cx18_read_reg(cx, CX18_AUDIO_ENABLE);
56 val |= (in->audio_input > CX18_AV_AUDIO_SERIAL2) ? 0x20 : 60 v = u & ~CX18_AI1_MUX_MASK;
57 (in->audio_input << 4); 61 switch (in->audio_input) {
58 cx18_write_reg_expect(cx, val | 0xb00, CX18_AUDIO_ENABLE, val, 0x30); 62 case CX18_AV_AUDIO_SERIAL1:
63 v |= CX18_AI1_MUX_I2S1;
64 break;
65 case CX18_AV_AUDIO_SERIAL2:
66 v |= CX18_AI1_MUX_I2S2;
67 break;
68 default:
69 v |= CX18_AI1_MUX_843_I2S;
70 break;
71 }
72 if (v == u) {
73 /* force a toggle of some AI1 MUX control bits */
74 u &= ~CX18_AI1_MUX_MASK;
75 switch (in->audio_input) {
76 case CX18_AV_AUDIO_SERIAL1:
77 u |= CX18_AI1_MUX_843_I2S;
78 break;
79 case CX18_AV_AUDIO_SERIAL2:
80 u |= CX18_AI1_MUX_843_I2S;
81 break;
82 default:
83 u |= CX18_AI1_MUX_I2S1;
84 break;
85 }
86 cx18_write_reg_expect(cx, u | 0xb00, CX18_AUDIO_ENABLE,
87 u, CX18_AI1_MUX_MASK);
88 }
89 cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE,
90 v, CX18_AI1_MUX_MASK);
59 return 0; 91 return 0;
60} 92}
diff --git a/drivers/media/video/cx18/cx18-av-core.c b/drivers/media/video/cx18/cx18-av-core.c
index cf2bd888a429..536dedb23ba3 100644
--- a/drivers/media/video/cx18/cx18-av-core.c
+++ b/drivers/media/video/cx18/cx18-av-core.c
@@ -99,9 +99,39 @@ int cx18_av_and_or4(struct cx18 *cx, u16 addr, u32 and_mask,
99 or_value); 99 or_value);
100} 100}
101 101
102static void cx18_av_initialize(struct cx18 *cx) 102static int cx18_av_init(struct v4l2_subdev *sd, u32 val)
103{ 103{
104 struct cx18_av_state *state = &cx->av_state; 104 struct cx18 *cx = v4l2_get_subdevdata(sd);
105
106 /*
107 * The crystal freq used in calculations in this driver will be
108 * 28.636360 MHz.
109 * Aim to run the PLLs' VCOs near 400 MHz to minimze errors.
110 */
111
112 /*
113 * VDCLK Integer = 0x0f, Post Divider = 0x04
114 * AIMCLK Integer = 0x0e, Post Divider = 0x16
115 */
116 cx18_av_write4(cx, CXADEC_PLL_CTRL1, 0x160e040f);
117
118 /* VDCLK Fraction = 0x2be2fe */
119 /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz before post divide */
120 cx18_av_write4(cx, CXADEC_VID_PLL_FRAC, 0x002be2fe);
121
122 /* AIMCLK Fraction = 0x05227ad */
123 /* xtal * 0xe.2913d68/0x16 = 48000 * 384: 406 MHz pre post-div*/
124 cx18_av_write4(cx, CXADEC_AUX_PLL_FRAC, 0x005227ad);
125
126 /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x16 */
127 cx18_av_write(cx, CXADEC_I2S_MCLK, 0x56);
128 return 0;
129}
130
131static void cx18_av_initialize(struct v4l2_subdev *sd)
132{
133 struct cx18_av_state *state = to_cx18_av_state(sd);
134 struct cx18 *cx = v4l2_get_subdevdata(sd);
105 u32 v; 135 u32 v;
106 136
107 cx18_av_loadfw(cx); 137 cx18_av_loadfw(cx);
@@ -150,6 +180,26 @@ static void cx18_av_initialize(struct cx18 *cx)
150 cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0x8000); 180 cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0x8000);
151 cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0); 181 cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0);
152 182
183 /*
184 * Disable Video Auto-config of the Analog Front End and Video PLL.
185 *
186 * Since we only use BT.656 pixel mode, which works for both 525 and 625
187 * line systems, it's just easier for us to set registers
188 * 0x102 (CXADEC_CHIP_CTRL), 0x104-0x106 (CXADEC_AFE_CTRL),
189 * 0x108-0x109 (CXADEC_PLL_CTRL1), and 0x10c-0x10f (CXADEC_VID_PLL_FRAC)
190 * ourselves, than to run around cleaning up after the auto-config.
191 *
192 * (Note: my CX23418 chip doesn't seem to let the ACFG_DIS bit
193 * get set to 1, but OTOH, it doesn't seem to do AFE and VID PLL
194 * autoconfig either.)
195 *
196 * As a default, also turn off Dual mode for ADC2 and set ADC2 to CH3.
197 */
198 cx18_av_and_or4(cx, CXADEC_CHIP_CTRL, 0xFFFBFFFF, 0x00120000);
199
200 /* Setup the Video and and Aux/Audio PLLs */
201 cx18_av_init(sd, 0);
202
153 /* set video to auto-detect */ 203 /* set video to auto-detect */
154 /* Clear bits 11-12 to enable slow locking mode. Set autodetect mode */ 204 /* Clear bits 11-12 to enable slow locking mode. Set autodetect mode */
155 /* set the comb notch = 1 */ 205 /* set the comb notch = 1 */
@@ -176,12 +226,23 @@ static void cx18_av_initialize(struct cx18 *cx)
176 /* EncSetSignalStd(dwDevNum, pEnc->dwSigStd); */ 226 /* EncSetSignalStd(dwDevNum, pEnc->dwSigStd); */
177 /* EncSetVideoInput(dwDevNum, pEnc->VidIndSelection); */ 227 /* EncSetVideoInput(dwDevNum, pEnc->VidIndSelection); */
178 228
179 v = cx18_av_read4(cx, CXADEC_AFE_CTRL); 229 /*
180 v &= 0xFFFBFFFF; /* turn OFF bit 18 for droop_comp_ch1 */ 230 * Analog Front End (AFE)
181 v &= 0xFFFF7FFF; /* turn OFF bit 9 for clamp_sel_ch1 */ 231 * Default to luma on ch1/ADC1, chroma on ch2/ADC2, SIF on ch3/ADC2
182 v &= 0xFFFFFFFE; /* turn OFF bit 0 for 12db_ch1 */ 232 * bypass_ch[1-3] use filter
183 /* v |= 0x00000001;*/ /* turn ON bit 0 for 12db_ch1 */ 233 * droop_comp_ch[1-3] disable
184 cx18_av_write4(cx, CXADEC_AFE_CTRL, v); 234 * clamp_en_ch[1-3] disable
235 * aud_in_sel ADC2
236 * luma_in_sel ADC1
237 * chroma_in_sel ADC2
238 * clamp_sel_ch[2-3] midcode
239 * clamp_sel_ch1 video decoder
240 * vga_sel_ch3 audio decoder
241 * vga_sel_ch[1-2] video decoder
242 * half_bw_ch[1-3] disable
243 * +12db_ch[1-3] disable
244 */
245 cx18_av_and_or4(cx, CXADEC_AFE_CTRL, 0xFF000000, 0x00005D00);
185 246
186/* if(dwEnable && dw3DCombAvailable) { */ 247/* if(dwEnable && dw3DCombAvailable) { */
187/* CxDevWrReg(CXADEC_SRC_COMB_CFG, 0x7728021F); */ 248/* CxDevWrReg(CXADEC_SRC_COMB_CFG, 0x7728021F); */
@@ -195,50 +256,18 @@ static void cx18_av_initialize(struct cx18 *cx)
195 256
196static int cx18_av_reset(struct v4l2_subdev *sd, u32 val) 257static int cx18_av_reset(struct v4l2_subdev *sd, u32 val)
197{ 258{
198 struct cx18 *cx = v4l2_get_subdevdata(sd); 259 cx18_av_initialize(sd);
199
200 cx18_av_initialize(cx);
201 return 0;
202}
203
204static int cx18_av_init(struct v4l2_subdev *sd, u32 val)
205{
206 struct cx18 *cx = v4l2_get_subdevdata(sd);
207
208 /*
209 * The crystal freq used in calculations in this driver will be
210 * 28.636360 MHz.
211 * Aim to run the PLLs' VCOs near 400 MHz to minimze errors.
212 */
213
214 /*
215 * VDCLK Integer = 0x0f, Post Divider = 0x04
216 * AIMCLK Integer = 0x0e, Post Divider = 0x16
217 */
218 cx18_av_write4(cx, CXADEC_PLL_CTRL1, 0x160e040f);
219
220 /* VDCLK Fraction = 0x2be2fe */
221 /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz before post divide */
222 cx18_av_write4(cx, CXADEC_VID_PLL_FRAC, 0x002be2fe);
223
224 /* AIMCLK Fraction = 0x05227ad */
225 /* xtal * 0xe.2913d68/0x16 = 48000 * 384: 406 MHz pre post-div*/
226 cx18_av_write4(cx, CXADEC_AUX_PLL_FRAC, 0x005227ad);
227
228 /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x16 */
229 cx18_av_write(cx, CXADEC_I2S_MCLK, 0x56);
230 return 0; 260 return 0;
231} 261}
232 262
233static int cx18_av_load_fw(struct v4l2_subdev *sd) 263static int cx18_av_load_fw(struct v4l2_subdev *sd)
234{ 264{
235 struct cx18_av_state *state = to_cx18_av_state(sd); 265 struct cx18_av_state *state = to_cx18_av_state(sd);
236 struct cx18 *cx = v4l2_get_subdevdata(sd);
237 266
238 if (!state->is_initialized) { 267 if (!state->is_initialized) {
239 /* initialize on first use */ 268 /* initialize on first use */
240 state->is_initialized = 1; 269 state->is_initialized = 1;
241 cx18_av_initialize(cx); 270 cx18_av_initialize(sd);
242 } 271 }
243 return 0; 272 return 0;
244} 273}
@@ -248,8 +277,15 @@ void cx18_av_std_setup(struct cx18 *cx)
248 struct cx18_av_state *state = &cx->av_state; 277 struct cx18_av_state *state = &cx->av_state;
249 struct v4l2_subdev *sd = &state->sd; 278 struct v4l2_subdev *sd = &state->sd;
250 v4l2_std_id std = state->std; 279 v4l2_std_id std = state->std;
280
281 /*
282 * Video ADC crystal clock to pixel clock SRC decimation ratio
283 * 28.636360 MHz/13.5 Mpps * 256 = 0x21f.07b
284 */
285 const int src_decimation = 0x21f;
286
251 int hblank, hactive, burst, vblank, vactive, sc; 287 int hblank, hactive, burst, vblank, vactive, sc;
252 int vblank656, src_decimation; 288 int vblank656;
253 int luma_lpf, uv_lpf, comb; 289 int luma_lpf, uv_lpf, comb;
254 u32 pll_int, pll_frac, pll_post; 290 u32 pll_int, pll_frac, pll_post;
255 291
@@ -259,40 +295,96 @@ void cx18_av_std_setup(struct cx18 *cx)
259 else 295 else
260 cx18_av_write(cx, 0x49f, 0x14); 296 cx18_av_write(cx, 0x49f, 0x14);
261 297
298 /*
299 * Note: At the end of a field, there are 3 sets of half line duration
300 * (double horizontal rate) pulses:
301 *
302 * 5 (625) or 6 (525) half-lines to blank for the vertical retrace
303 * 5 (625) or 6 (525) vertical sync pulses of half line duration
304 * 5 (625) or 6 (525) half-lines of equalization pulses
305 */
262 if (std & V4L2_STD_625_50) { 306 if (std & V4L2_STD_625_50) {
263 /* FIXME - revisit these for Sliced VBI */ 307 /*
308 * The following relationships of half line counts should hold:
309 * 625 = vblank656 + vactive
310 * 10 = vblank656 - vblank = vsync pulses + equalization pulses
311 *
312 * vblank656: half lines after line 625/mid-313 of blanked video
313 * vblank: half lines, after line 5/317, of blanked video
314 * vactive: half lines of active video +
315 * 5 half lines after the end of active video
316 *
317 * As far as I can tell:
318 * vblank656 starts counting from the falling edge of the first
319 * vsync pulse (start of line 1 or mid-313)
320 * vblank starts counting from the after the 5 vsync pulses and
321 * 5 or 4 equalization pulses (start of line 6 or 318)
322 *
323 * For 625 line systems the driver will extract VBI information
324 * from lines 6-23 and lines 318-335 (but the slicer can only
325 * handle 17 lines, not the 18 in the vblank region).
326 * In addition, we need vblank656 and vblank to be one whole
327 * line longer, to cover line 24 and 336, so the SAV/EAV RP
328 * codes get generated such that the encoder can actually
329 * extract line 23 & 335 (WSS). We'll lose 1 line in each field
330 * at the top of the screen.
331 *
332 * It appears the 5 half lines that happen after active
333 * video must be included in vactive (579 instead of 574),
334 * otherwise the colors get badly displayed in various regions
335 * of the screen. I guess the chroma comb filter gets confused
336 * without them (at least when a PVR-350 is the PAL source).
337 */
338 vblank656 = 48; /* lines 1 - 24 & 313 - 336 */
339 vblank = 38; /* lines 6 - 24 & 318 - 336 */
340 vactive = 579; /* lines 24 - 313 & 337 - 626 */
341
342 /*
343 * For a 13.5 Mpps clock and 15,625 Hz line rate, a line is
344 * is 864 pixels = 720 active + 144 blanking. ITU-R BT.601
345 * specifies 12 luma clock periods or ~ 0.9 * 13.5 Mpps after
346 * the end of active video to start a horizontal line, so that
347 * leaves 132 pixels of hblank to ignore.
348 */
264 hblank = 132; 349 hblank = 132;
265 hactive = 720; 350 hactive = 720;
266 burst = 93;
267 vblank = 36;
268 vactive = 580;
269 vblank656 = 40;
270 src_decimation = 0x21f;
271 351
352 /*
353 * Burst gate delay (for 625 line systems)
354 * Hsync leading edge to color burst rise = 5.6 us
355 * Color burst width = 2.25 us
356 * Gate width = 4 pixel clocks
357 * (5.6 us + 2.25/2 us) * 13.5 Mpps + 4/2 clocks = 92.79 clocks
358 */
359 burst = 93;
272 luma_lpf = 2; 360 luma_lpf = 2;
273 if (std & V4L2_STD_PAL) { 361 if (std & V4L2_STD_PAL) {
274 uv_lpf = 1; 362 uv_lpf = 1;
275 comb = 0x20; 363 comb = 0x20;
276 sc = 688739; 364 /* sc = 4433618.75 * src_decimation/28636360 * 2^13 */
365 sc = 688700;
277 } else if (std == V4L2_STD_PAL_Nc) { 366 } else if (std == V4L2_STD_PAL_Nc) {
278 uv_lpf = 1; 367 uv_lpf = 1;
279 comb = 0x20; 368 comb = 0x20;
280 sc = 556453; 369 /* sc = 3582056.25 * src_decimation/28636360 * 2^13 */
370 sc = 556422;
281 } else { /* SECAM */ 371 } else { /* SECAM */
282 uv_lpf = 0; 372 uv_lpf = 0;
283 comb = 0; 373 comb = 0;
284 sc = 672351; 374 /* (fr + fb)/2 = (4406260 + 4250000)/2 = 4328130 */
375 /* sc = 4328130 * src_decimation/28636360 * 2^13 */
376 sc = 672314;
285 } 377 }
286 } else { 378 } else {
287 /* 379 /*
288 * The following relationships of half line counts should hold: 380 * The following relationships of half line counts should hold:
289 * 525 = vsync + vactive + vblank656 381 * 525 = prevsync + vblank656 + vactive
290 * 12 = vblank656 - vblank 382 * 12 = vblank656 - vblank = vsync pulses + equalization pulses
291 * 383 *
292 * vsync: always 6 half-lines of vsync pulses 384 * prevsync: 6 half-lines before the vsync pulses
293 * vactive: half lines of active video
294 * vblank656: half lines, after line 3/mid-266, of blanked video 385 * vblank656: half lines, after line 3/mid-266, of blanked video
295 * vblank: half lines, after line 9/272, of blanked video 386 * vblank: half lines, after line 9/272, of blanked video
387 * vactive: half lines of active video
296 * 388 *
297 * As far as I can tell: 389 * As far as I can tell:
298 * vblank656 starts counting from the falling edge of the first 390 * vblank656 starts counting from the falling edge of the first
@@ -319,20 +411,30 @@ void cx18_av_std_setup(struct cx18 *cx)
319 luma_lpf = 1; 411 luma_lpf = 1;
320 uv_lpf = 1; 412 uv_lpf = 1;
321 413
322 src_decimation = 0x21f; 414 /*
415 * Burst gate delay (for 525 line systems)
416 * Hsync leading edge to color burst rise = 5.3 us
417 * Color burst width = 2.5 us
418 * Gate width = 4 pixel clocks
419 * (5.3 us + 2.5/2 us) * 13.5 Mpps + 4/2 clocks = 90.425 clocks
420 */
323 if (std == V4L2_STD_PAL_60) { 421 if (std == V4L2_STD_PAL_60) {
324 burst = 0x5b; 422 burst = 90;
325 luma_lpf = 2; 423 luma_lpf = 2;
326 comb = 0x20; 424 comb = 0x20;
327 sc = 688739; 425 /* sc = 4433618.75 * src_decimation/28636360 * 2^13 */
426 sc = 688700;
328 } else if (std == V4L2_STD_PAL_M) { 427 } else if (std == V4L2_STD_PAL_M) {
329 burst = 0x61; 428 /* The 97 needs to be verified against PAL-M timings */
429 burst = 97;
330 comb = 0x20; 430 comb = 0x20;
331 sc = 555452; 431 /* sc = 3575611.49 * src_decimation/28636360 * 2^13 */
432 sc = 555421;
332 } else { 433 } else {
333 burst = 0x5b; 434 burst = 90;
334 comb = 0x66; 435 comb = 0x66;
335 sc = 556063; 436 /* sc = 3579545.45.. * src_decimation/28636360 * 2^13 */
437 sc = 556032;
336 } 438 }
337 } 439 }
338 440
@@ -344,23 +446,26 @@ void cx18_av_std_setup(struct cx18 *cx)
344 pll_int, pll_frac, pll_post); 446 pll_int, pll_frac, pll_post);
345 447
346 if (pll_post) { 448 if (pll_post) {
347 int fin, fsc, pll; 449 int fsc, pll;
450 u64 tmp;
348 451
349 pll = (28636360L * ((((u64)pll_int) << 25) + pll_frac)) >> 25; 452 pll = (28636360L * ((((u64)pll_int) << 25) + pll_frac)) >> 25;
350 pll /= pll_post; 453 pll /= pll_post;
351 CX18_DEBUG_INFO_DEV(sd, "PLL = %d.%06d MHz\n", 454 CX18_DEBUG_INFO_DEV(sd, "Video PLL = %d.%06d MHz\n",
352 pll / 1000000, pll % 1000000); 455 pll / 1000000, pll % 1000000);
353 CX18_DEBUG_INFO_DEV(sd, "PLL/8 = %d.%06d MHz\n", 456 CX18_DEBUG_INFO_DEV(sd, "Pixel rate = %d.%06d Mpixel/sec\n",
354 pll / 8000000, (pll / 8) % 1000000); 457 pll / 8000000, (pll / 8) % 1000000);
355 458
356 fin = ((u64)src_decimation * pll) >> 12; 459 CX18_DEBUG_INFO_DEV(sd, "ADC XTAL/pixel clock decimation ratio "
357 CX18_DEBUG_INFO_DEV(sd, "ADC Sampling freq = %d.%06d MHz\n", 460 "= %d.%03d\n", src_decimation / 256,
358 fin / 1000000, fin % 1000000); 461 ((src_decimation % 256) * 1000) / 256);
359 462
360 fsc = (((u64)sc) * pll) >> 24L; 463 tmp = 28636360 * (u64) sc;
464 do_div(tmp, src_decimation);
465 fsc = tmp >> 13;
361 CX18_DEBUG_INFO_DEV(sd, 466 CX18_DEBUG_INFO_DEV(sd,
362 "Chroma sub-carrier freq = %d.%06d MHz\n", 467 "Chroma sub-carrier initial freq = %d.%06d "
363 fsc / 1000000, fsc % 1000000); 468 "MHz\n", fsc / 1000000, fsc % 1000000);
364 469
365 CX18_DEBUG_INFO_DEV(sd, "hblank %i, hactive %i, vblank %i, " 470 CX18_DEBUG_INFO_DEV(sd, "hblank %i, hactive %i, vblank %i, "
366 "vactive %i, vblank656 %i, src_dec %i, " 471 "vactive %i, vblank656 %i, src_dec %i, "
@@ -470,16 +575,23 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
470{ 575{
471 struct cx18_av_state *state = &cx->av_state; 576 struct cx18_av_state *state = &cx->av_state;
472 struct v4l2_subdev *sd = &state->sd; 577 struct v4l2_subdev *sd = &state->sd;
473 u8 is_composite = (vid_input >= CX18_AV_COMPOSITE1 && 578
474 vid_input <= CX18_AV_COMPOSITE8); 579 enum analog_signal_type {
475 u8 reg; 580 NONE, CVBS, Y, C, SIF, Pb, Pr
476 u8 v; 581 } ch[3] = {NONE, NONE, NONE};
582
583 u8 afe_mux_cfg;
584 u8 adc2_cfg;
585 u32 afe_cfg;
586 int i;
477 587
478 CX18_DEBUG_INFO_DEV(sd, "decoder set video input %d, audio input %d\n", 588 CX18_DEBUG_INFO_DEV(sd, "decoder set video input %d, audio input %d\n",
479 vid_input, aud_input); 589 vid_input, aud_input);
480 590
481 if (is_composite) { 591 if (vid_input >= CX18_AV_COMPOSITE1 &&
482 reg = 0xf0 + (vid_input - CX18_AV_COMPOSITE1); 592 vid_input <= CX18_AV_COMPOSITE8) {
593 afe_mux_cfg = 0xf0 + (vid_input - CX18_AV_COMPOSITE1);
594 ch[0] = CVBS;
483 } else { 595 } else {
484 int luma = vid_input & 0xf0; 596 int luma = vid_input & 0xf0;
485 int chroma = vid_input & 0xf00; 597 int chroma = vid_input & 0xf00;
@@ -493,26 +605,45 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
493 vid_input); 605 vid_input);
494 return -EINVAL; 606 return -EINVAL;
495 } 607 }
496 reg = 0xf0 + ((luma - CX18_AV_SVIDEO_LUMA1) >> 4); 608 afe_mux_cfg = 0xf0 + ((luma - CX18_AV_SVIDEO_LUMA1) >> 4);
609 ch[0] = Y;
497 if (chroma >= CX18_AV_SVIDEO_CHROMA7) { 610 if (chroma >= CX18_AV_SVIDEO_CHROMA7) {
498 reg &= 0x3f; 611 afe_mux_cfg &= 0x3f;
499 reg |= (chroma - CX18_AV_SVIDEO_CHROMA7) >> 2; 612 afe_mux_cfg |= (chroma - CX18_AV_SVIDEO_CHROMA7) >> 2;
613 ch[2] = C;
500 } else { 614 } else {
501 reg &= 0xcf; 615 afe_mux_cfg &= 0xcf;
502 reg |= (chroma - CX18_AV_SVIDEO_CHROMA4) >> 4; 616 afe_mux_cfg |= (chroma - CX18_AV_SVIDEO_CHROMA4) >> 4;
617 ch[1] = C;
503 } 618 }
504 } 619 }
620 /* TODO: LeadTek WinFast DVR3100 H & WinFast PVR2100 can do Y/Pb/Pr */
505 621
506 switch (aud_input) { 622 switch (aud_input) {
507 case CX18_AV_AUDIO_SERIAL1: 623 case CX18_AV_AUDIO_SERIAL1:
508 case CX18_AV_AUDIO_SERIAL2: 624 case CX18_AV_AUDIO_SERIAL2:
509 /* do nothing, use serial audio input */ 625 /* do nothing, use serial audio input */
510 break; 626 break;
511 case CX18_AV_AUDIO4: reg &= ~0x30; break; 627 case CX18_AV_AUDIO4:
512 case CX18_AV_AUDIO5: reg &= ~0x30; reg |= 0x10; break; 628 afe_mux_cfg &= ~0x30;
513 case CX18_AV_AUDIO6: reg &= ~0x30; reg |= 0x20; break; 629 ch[1] = SIF;
514 case CX18_AV_AUDIO7: reg &= ~0xc0; break; 630 break;
515 case CX18_AV_AUDIO8: reg &= ~0xc0; reg |= 0x40; break; 631 case CX18_AV_AUDIO5:
632 afe_mux_cfg = (afe_mux_cfg & ~0x30) | 0x10;
633 ch[1] = SIF;
634 break;
635 case CX18_AV_AUDIO6:
636 afe_mux_cfg = (afe_mux_cfg & ~0x30) | 0x20;
637 ch[1] = SIF;
638 break;
639 case CX18_AV_AUDIO7:
640 afe_mux_cfg &= ~0xc0;
641 ch[2] = SIF;
642 break;
643 case CX18_AV_AUDIO8:
644 afe_mux_cfg = (afe_mux_cfg & ~0xc0) | 0x40;
645 ch[2] = SIF;
646 break;
516 647
517 default: 648 default:
518 CX18_ERR_DEV(sd, "0x%04x is not a valid audio input!\n", 649 CX18_ERR_DEV(sd, "0x%04x is not a valid audio input!\n",
@@ -520,24 +651,65 @@ static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
520 return -EINVAL; 651 return -EINVAL;
521 } 652 }
522 653
523 cx18_av_write_expect(cx, 0x103, reg, reg, 0xf7); 654 /* Set up analog front end multiplexers */
655 cx18_av_write_expect(cx, 0x103, afe_mux_cfg, afe_mux_cfg, 0xf7);
524 /* Set INPUT_MODE to Composite (0) or S-Video (1) */ 656 /* Set INPUT_MODE to Composite (0) or S-Video (1) */
525 cx18_av_and_or(cx, 0x401, ~0x6, is_composite ? 0 : 0x02); 657 cx18_av_and_or(cx, 0x401, ~0x6, ch[0] == CVBS ? 0 : 0x02);
526 658
527 /* Set CH_SEL_ADC2 to 1 if input comes from CH3 */ 659 /* Set CH_SEL_ADC2 to 1 if input comes from CH3 */
528 v = cx18_av_read(cx, 0x102); 660 adc2_cfg = cx18_av_read(cx, 0x102);
529 if (reg & 0x80) 661 if (ch[2] == NONE)
530 v &= ~0x2; 662 adc2_cfg &= ~0x2; /* No sig on CH3, set ADC2 to CH2 for input */
531 else 663 else
532 v |= 0x2; 664 adc2_cfg |= 0x2; /* Signal on CH3, set ADC2 to CH3 for input */
665
533 /* Set DUAL_MODE_ADC2 to 1 if input comes from both CH2 and CH3 */ 666 /* Set DUAL_MODE_ADC2 to 1 if input comes from both CH2 and CH3 */
534 if ((reg & 0xc0) != 0xc0 && (reg & 0x30) != 0x30) 667 if (ch[1] != NONE && ch[2] != NONE)
535 v |= 0x4; 668 adc2_cfg |= 0x4; /* Set dual mode */
536 else 669 else
537 v &= ~0x4; 670 adc2_cfg &= ~0x4; /* Clear dual mode */
538 cx18_av_write_expect(cx, 0x102, v, v, 0x17); 671 cx18_av_write_expect(cx, 0x102, adc2_cfg, adc2_cfg, 0x17);
672
673 /* Configure the analog front end */
674 afe_cfg = cx18_av_read4(cx, CXADEC_AFE_CTRL);
675 afe_cfg &= 0xff000000;
676 afe_cfg |= 0x00005000; /* CHROMA_IN, AUD_IN: ADC2; LUMA_IN: ADC1 */
677 if (ch[1] != NONE && ch[2] != NONE)
678 afe_cfg |= 0x00000030; /* half_bw_ch[2-3] since in dual mode */
679
680 for (i = 0; i < 3; i++) {
681 switch (ch[i]) {
682 default:
683 case NONE:
684 /* CLAMP_SEL = Fixed to midcode clamp level */
685 afe_cfg |= (0x00000200 << i);
686 break;
687 case CVBS:
688 case Y:
689 if (i > 0)
690 afe_cfg |= 0x00002000; /* LUMA_IN_SEL: ADC2 */
691 break;
692 case C:
693 case Pb:
694 case Pr:
695 /* CLAMP_SEL = Fixed to midcode clamp level */
696 afe_cfg |= (0x00000200 << i);
697 if (i == 0 && ch[i] == C)
698 afe_cfg &= ~0x00001000; /* CHROMA_IN_SEL ADC1 */
699 break;
700 case SIF:
701 /*
702 * VGA_GAIN_SEL = Audio Decoder
703 * CLAMP_SEL = Fixed to midcode clamp level
704 */
705 afe_cfg |= (0x00000240 << i);
706 if (i == 0)
707 afe_cfg &= ~0x00004000; /* AUD_IN_SEL ADC1 */
708 break;
709 }
710 }
539 711
540 /*cx18_av_and_or4(cx, 0x104, ~0x001b4180, 0x00004180);*/ 712 cx18_av_write4(cx, CXADEC_AFE_CTRL, afe_cfg);
541 713
542 state->vid_input = vid_input; 714 state->vid_input = vid_input;
543 state->aud_input = aud_input; 715 state->aud_input = aud_input;
@@ -858,9 +1030,9 @@ static int cx18_av_s_fmt(struct v4l2_subdev *sd, struct v4l2_format *fmt)
858 * cx18_av_std_setup(), above standard values: 1030 * cx18_av_std_setup(), above standard values:
859 * 1031 *
860 * 480 + 1 for 60 Hz systems 1032 * 480 + 1 for 60 Hz systems
861 * 576 + 4 for 50 Hz systems 1033 * 576 + 3 for 50 Hz systems
862 */ 1034 */
863 Vlines = pix->height + (is_50Hz ? 4 : 1); 1035 Vlines = pix->height + (is_50Hz ? 3 : 1);
864 1036
865 /* 1037 /*
866 * Invalid height and width scaling requests are: 1038 * Invalid height and width scaling requests are:
diff --git a/drivers/media/video/cx18/cx18-av-firmware.c b/drivers/media/video/cx18/cx18-av-firmware.c
index 49a55cc8d839..b9e8cc5d264a 100644
--- a/drivers/media/video/cx18/cx18-av-firmware.c
+++ b/drivers/media/video/cx18/cx18-av-firmware.c
@@ -24,15 +24,63 @@
24#include "cx18-io.h" 24#include "cx18-io.h"
25#include <linux/firmware.h> 25#include <linux/firmware.h>
26 26
27#define CX18_AUDIO_ENABLE 0xc72014 27#define CX18_AUDIO_ENABLE 0xc72014
28#define CX18_AI1_MUX_MASK 0x30
29#define CX18_AI1_MUX_I2S1 0x00
30#define CX18_AI1_MUX_I2S2 0x10
31#define CX18_AI1_MUX_843_I2S 0x20
32#define CX18_AI1_MUX_INVALID 0x30
33
28#define FWFILE "v4l-cx23418-dig.fw" 34#define FWFILE "v4l-cx23418-dig.fw"
29 35
36static int cx18_av_verifyfw(struct cx18 *cx, const struct firmware *fw)
37{
38 struct v4l2_subdev *sd = &cx->av_state.sd;
39 int ret = 0;
40 const u8 *data;
41 u32 size;
42 int addr;
43 u32 expected, dl_control;
44
45 /* Ensure we put the 8051 in reset and enable firmware upload mode */
46 dl_control = cx18_av_read4(cx, CXADEC_DL_CTL);
47 do {
48 dl_control &= 0x00ffffff;
49 dl_control |= 0x0f000000;
50 cx18_av_write4_noretry(cx, CXADEC_DL_CTL, dl_control);
51 dl_control = cx18_av_read4(cx, CXADEC_DL_CTL);
52 } while ((dl_control & 0xff000000) != 0x0f000000);
53
54 /* Read and auto increment until at address 0x0000 */
55 while (dl_control & 0x3fff)
56 dl_control = cx18_av_read4(cx, CXADEC_DL_CTL);
57
58 data = fw->data;
59 size = fw->size;
60 for (addr = 0; addr < size; addr++) {
61 dl_control &= 0xffff3fff; /* ignore top 2 bits of address */
62 expected = 0x0f000000 | ((u32)data[addr] << 16) | addr;
63 if (expected != dl_control) {
64 CX18_ERR_DEV(sd, "verification of %s firmware load "
65 "failed: expected %#010x got %#010x\n",
66 FWFILE, expected, dl_control);
67 ret = -EIO;
68 break;
69 }
70 dl_control = cx18_av_read4(cx, CXADEC_DL_CTL);
71 }
72 if (ret == 0)
73 CX18_INFO_DEV(sd, "verified load of %s firmware (%d bytes)\n",
74 FWFILE, size);
75 return ret;
76}
77
30int cx18_av_loadfw(struct cx18 *cx) 78int cx18_av_loadfw(struct cx18 *cx)
31{ 79{
32 struct v4l2_subdev *sd = &cx->av_state.sd; 80 struct v4l2_subdev *sd = &cx->av_state.sd;
33 const struct firmware *fw = NULL; 81 const struct firmware *fw = NULL;
34 u32 size; 82 u32 size;
35 u32 v; 83 u32 u, v;
36 const u8 *ptr; 84 const u8 *ptr;
37 int i; 85 int i;
38 int retries1 = 0; 86 int retries1 = 0;
@@ -95,6 +143,12 @@ int cx18_av_loadfw(struct cx18 *cx)
95 } 143 }
96 144
97 cx18_av_write4_expect(cx, CXADEC_DL_CTL, 145 cx18_av_write4_expect(cx, CXADEC_DL_CTL,
146 0x03000000 | fw->size, 0x03000000, 0x13000000);
147
148 CX18_INFO_DEV(sd, "loaded %s firmware (%d bytes)\n", FWFILE, size);
149
150 if (cx18_av_verifyfw(cx, fw) == 0)
151 cx18_av_write4_expect(cx, CXADEC_DL_CTL,
98 0x13000000 | fw->size, 0x13000000, 0x13000000); 152 0x13000000 | fw->size, 0x13000000, 0x13000000);
99 153
100 /* Output to the 416 */ 154 /* Output to the 416 */
@@ -135,6 +189,28 @@ int cx18_av_loadfw(struct cx18 *cx)
135 cx18_write_reg_expect(cx, v & 0xFFFFFBFF, CX18_AUDIO_ENABLE, 189 cx18_write_reg_expect(cx, v & 0xFFFFFBFF, CX18_AUDIO_ENABLE,
136 0, 0x400); 190 0, 0x400);
137 191
192 /* Toggle the AI1 MUX */
193 v = cx18_read_reg(cx, CX18_AUDIO_ENABLE);
194 u = v & CX18_AI1_MUX_MASK;
195 v &= ~CX18_AI1_MUX_MASK;
196 if (u == CX18_AI1_MUX_843_I2S || u == CX18_AI1_MUX_INVALID) {
197 /* Switch to I2S1 */
198 v |= CX18_AI1_MUX_I2S1;
199 cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE,
200 v, CX18_AI1_MUX_MASK);
201 /* Switch back to the A/V decoder core I2S output */
202 v = (v & ~CX18_AI1_MUX_MASK) | CX18_AI1_MUX_843_I2S;
203 } else {
204 /* Switch to the A/V decoder core I2S output */
205 v |= CX18_AI1_MUX_843_I2S;
206 cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE,
207 v, CX18_AI1_MUX_MASK);
208 /* Switch back to I2S1 or I2S2 */
209 v = (v & ~CX18_AI1_MUX_MASK) | u;
210 }
211 cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE,
212 v, CX18_AI1_MUX_MASK);
213
138 /* Enable WW auto audio standard detection */ 214 /* Enable WW auto audio standard detection */
139 v = cx18_av_read4(cx, CXADEC_STD_DET_CTL); 215 v = cx18_av_read4(cx, CXADEC_STD_DET_CTL);
140 v |= 0xFF; /* Auto by default */ 216 v |= 0xFF; /* Auto by default */
@@ -143,7 +219,5 @@ int cx18_av_loadfw(struct cx18 *cx)
143 cx18_av_write4_expect(cx, CXADEC_STD_DET_CTL, v, v, 0x3F00FFFF); 219 cx18_av_write4_expect(cx, CXADEC_STD_DET_CTL, v, v, 0x3F00FFFF);
144 220
145 release_firmware(fw); 221 release_firmware(fw);
146
147 CX18_INFO_DEV(sd, "loaded %s firmware (%d bytes)\n", FWFILE, size);
148 return 0; 222 return 0;
149} 223}
diff --git a/drivers/media/video/cx18/cx18-av-vbi.c b/drivers/media/video/cx18/cx18-av-vbi.c
index 23b31670bf1d..a51732bcca4b 100644
--- a/drivers/media/video/cx18/cx18-av-vbi.c
+++ b/drivers/media/video/cx18/cx18-av-vbi.c
@@ -255,8 +255,8 @@ int cx18_av_vbi_s_fmt(struct cx18 *cx, struct v4l2_format *fmt)
255 } 255 }
256 256
257 cx18_av_write(cx, 0x43c, 0x16); 257 cx18_av_write(cx, 0x43c, 0x16);
258 /* FIXME - should match vblank set in cx18_av_std_setup() */ 258 /* Should match vblank set in cx18_av_std_setup() */
259 cx18_av_write(cx, 0x474, is_pal ? 0x2a : 26); 259 cx18_av_write(cx, 0x474, is_pal ? 38 : 26);
260 return 0; 260 return 0;
261} 261}
262 262
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index 9bc221837847..c92a25036f0e 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -340,13 +340,12 @@ static const struct cx18_card cx18_card_toshiba_qosmio_dvbt = {
340 340
341static const struct cx18_card_pci_info cx18_pci_leadtek_pvr2100[] = { 341static const struct cx18_card_pci_info cx18_pci_leadtek_pvr2100[] = {
342 { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_LEADTEK, 0x6f27 }, /* PVR2100 */ 342 { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_LEADTEK, 0x6f27 }, /* PVR2100 */
343 { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_LEADTEK, 0x6690 }, /* DVR3100 H */
344 { 0, 0, 0 } 343 { 0, 0, 0 }
345}; 344};
346 345
347static const struct cx18_card cx18_card_leadtek_pvr2100 = { 346static const struct cx18_card cx18_card_leadtek_pvr2100 = {
348 .type = CX18_CARD_LEADTEK_PVR2100, 347 .type = CX18_CARD_LEADTEK_PVR2100,
349 .name = "Leadtek WinFast PVR2100/DVR3100 H", 348 .name = "Leadtek WinFast PVR2100",
350 .comment = "Experimenters and photos needed for device to work well.\n" 349 .comment = "Experimenters and photos needed for device to work well.\n"
351 "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n", 350 "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n",
352 .v4l2_capabilities = CX18_CAP_ENCODER, 351 .v4l2_capabilities = CX18_CAP_ENCODER,
@@ -365,15 +364,12 @@ static const struct cx18_card cx18_card_leadtek_pvr2100 = {
365 { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 }, 364 { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 },
366 }, 365 },
367 .tuners = { 366 .tuners = {
368 /* XC3028 tuner */ 367 /* XC2028 tuner */
369 { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, 368 { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
370 }, 369 },
371 .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 }, 370 .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 },
372 .ddr = { 371 .ddr = {
373 /* 372 /* Pointer to proper DDR config values provided by Terry Wu */
374 * Pointer to proper DDR config values provided by
375 * Terry Wu <terrywu at leadtek.com.tw>
376 */
377 .chip_config = 0x303, 373 .chip_config = 0x303,
378 .refresh = 0x3bb, 374 .refresh = 0x3bb,
379 .timing1 = 0x24220e83, 375 .timing1 = 0x24220e83,
@@ -392,6 +388,58 @@ static const struct cx18_card cx18_card_leadtek_pvr2100 = {
392 388
393/* ------------------------------------------------------------------------- */ 389/* ------------------------------------------------------------------------- */
394 390
391/* Leadtek WinFast DVR3100 H */
392
393static const struct cx18_card_pci_info cx18_pci_leadtek_dvr3100h[] = {
394 { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_LEADTEK, 0x6690 }, /* DVR3100 H */
395 { 0, 0, 0 }
396};
397
398static const struct cx18_card cx18_card_leadtek_dvr3100h = {
399 .type = CX18_CARD_LEADTEK_DVR3100H,
400 .name = "Leadtek WinFast DVR3100 H",
401 .comment = "Simultaneous DVB-T and Analog capture supported,\n"
402 "\texcept when capturing Analog from the antenna input.\n",
403 .v4l2_capabilities = CX18_CAP_ENCODER,
404 .hw_audio_ctrl = CX18_HW_418_AV,
405 .hw_muxer = CX18_HW_GPIO_MUX,
406 .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_MUX |
407 CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL,
408 .video_inputs = {
409 { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 },
410 { CX18_CARD_INPUT_SVIDEO1, 1,
411 CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 },
412 { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE7 },
413 },
414 .audio_inputs = {
415 { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 },
416 { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 },
417 },
418 .tuners = {
419 /* XC3028 tuner */
420 { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
421 },
422 .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 },
423 .ddr = {
424 /* Pointer to proper DDR config values provided by Terry Wu */
425 .chip_config = 0x303,
426 .refresh = 0x3bb,
427 .timing1 = 0x24220e83,
428 .timing2 = 0x1f,
429 .tune_lane = 0,
430 .initial_emrs = 0x2,
431 },
432 .gpio_init.initial_value = 0x6,
433 .gpio_init.direction = 0x7,
434 .gpio_audio_input = { .mask = 0x7,
435 .tuner = 0x6, .linein = 0x2, .radio = 0x2 },
436 .xceive_pin = 1,
437 .pci_list = cx18_pci_leadtek_dvr3100h,
438 .i2c = &cx18_i2c_std,
439};
440
441/* ------------------------------------------------------------------------- */
442
395static const struct cx18_card *cx18_card_list[] = { 443static const struct cx18_card *cx18_card_list[] = {
396 &cx18_card_hvr1600_esmt, 444 &cx18_card_hvr1600_esmt,
397 &cx18_card_hvr1600_samsung, 445 &cx18_card_hvr1600_samsung,
@@ -400,6 +448,7 @@ static const struct cx18_card *cx18_card_list[] = {
400 &cx18_card_cnxt_raptor_pal, 448 &cx18_card_cnxt_raptor_pal,
401 &cx18_card_toshiba_qosmio_dvbt, 449 &cx18_card_toshiba_qosmio_dvbt,
402 &cx18_card_leadtek_pvr2100, 450 &cx18_card_leadtek_pvr2100,
451 &cx18_card_leadtek_dvr3100h,
403}; 452};
404 453
405const struct cx18_card *cx18_get_card(u16 index) 454const struct cx18_card *cx18_get_card(u16 index)
diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c
index 82fc2f9d4021..8e35c3aed544 100644
--- a/drivers/media/video/cx18/cx18-controls.c
+++ b/drivers/media/video/cx18/cx18-controls.c
@@ -176,8 +176,10 @@ static int cx18_setup_vbi_fmt(struct cx18 *cx,
176 return -EBUSY; 176 return -EBUSY;
177 177
178 if (fmt != V4L2_MPEG_STREAM_VBI_FMT_IVTV || 178 if (fmt != V4L2_MPEG_STREAM_VBI_FMT_IVTV ||
179 type != V4L2_MPEG_STREAM_TYPE_MPEG2_PS) { 179 !(type == V4L2_MPEG_STREAM_TYPE_MPEG2_PS ||
180 /* We don't do VBI insertion aside from IVTV format in a PS */ 180 type == V4L2_MPEG_STREAM_TYPE_MPEG2_DVD ||
181 type == V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD)) {
182 /* Only IVTV fmt VBI insertion & only MPEG-2 PS type streams */
181 cx->vbi.insert_mpeg = V4L2_MPEG_STREAM_VBI_FMT_NONE; 183 cx->vbi.insert_mpeg = V4L2_MPEG_STREAM_VBI_FMT_NONE;
182 CX18_DEBUG_INFO("disabled insertion of sliced VBI data into " 184 CX18_DEBUG_INFO("disabled insertion of sliced VBI data into "
183 "the MPEG stream\n"); 185 "the MPEG stream\n");
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 49b1c3d7b1a8..92026e82e10e 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -30,6 +30,7 @@
30#include "cx18-irq.h" 30#include "cx18-irq.h"
31#include "cx18-gpio.h" 31#include "cx18-gpio.h"
32#include "cx18-firmware.h" 32#include "cx18-firmware.h"
33#include "cx18-queue.h"
33#include "cx18-streams.h" 34#include "cx18-streams.h"
34#include "cx18-av-core.h" 35#include "cx18-av-core.h"
35#include "cx18-scb.h" 36#include "cx18-scb.h"
@@ -151,7 +152,8 @@ MODULE_PARM_DESC(cardtype,
151 "\t\t\t 4 = Yuan MPC718\n" 152 "\t\t\t 4 = Yuan MPC718\n"
152 "\t\t\t 5 = Conexant Raptor PAL/SECAM\n" 153 "\t\t\t 5 = Conexant Raptor PAL/SECAM\n"
153 "\t\t\t 6 = Toshiba Qosmio DVB-T/Analog\n" 154 "\t\t\t 6 = Toshiba Qosmio DVB-T/Analog\n"
154 "\t\t\t 7 = Leadtek WinFast PVR2100/DVR3100 H\n" 155 "\t\t\t 7 = Leadtek WinFast PVR2100\n"
156 "\t\t\t 8 = Leadtek WinFast DVR3100 H\n"
155 "\t\t\t 0 = Autodetect (default)\n" 157 "\t\t\t 0 = Autodetect (default)\n"
156 "\t\t\t-1 = Ignore this card\n\t\t"); 158 "\t\t\t-1 = Ignore this card\n\t\t");
157MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60"); 159MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60");
@@ -312,7 +314,7 @@ static void cx18_process_eeprom(struct cx18 *cx)
312 CX18_INFO("Autodetected %s\n", cx->card_name); 314 CX18_INFO("Autodetected %s\n", cx->card_name);
313 315
314 if (tv.tuner_type == TUNER_ABSENT) 316 if (tv.tuner_type == TUNER_ABSENT)
315 CX18_ERR("tveeprom cannot autodetect tuner!"); 317 CX18_ERR("tveeprom cannot autodetect tuner!\n");
316 318
317 if (cx->options.tuner == -1) 319 if (cx->options.tuner == -1)
318 cx->options.tuner = tv.tuner_type; 320 cx->options.tuner = tv.tuner_type;
@@ -546,6 +548,40 @@ done:
546 cx->card_i2c = cx->card->i2c; 548 cx->card_i2c = cx->card->i2c;
547} 549}
548 550
551static int __devinit cx18_create_in_workq(struct cx18 *cx)
552{
553 snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in",
554 cx->v4l2_dev.name);
555 cx->in_work_queue = create_singlethread_workqueue(cx->in_workq_name);
556 if (cx->in_work_queue == NULL) {
557 CX18_ERR("Unable to create incoming mailbox handler thread\n");
558 return -ENOMEM;
559 }
560 return 0;
561}
562
563static int __devinit cx18_create_out_workq(struct cx18 *cx)
564{
565 snprintf(cx->out_workq_name, sizeof(cx->out_workq_name), "%s-out",
566 cx->v4l2_dev.name);
567 cx->out_work_queue = create_workqueue(cx->out_workq_name);
568 if (cx->out_work_queue == NULL) {
569 CX18_ERR("Unable to create outgoing mailbox handler threads\n");
570 return -ENOMEM;
571 }
572 return 0;
573}
574
575static void __devinit cx18_init_in_work_orders(struct cx18 *cx)
576{
577 int i;
578 for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++) {
579 cx->in_work_order[i].cx = cx;
580 cx->in_work_order[i].str = cx->epu_debug_str;
581 INIT_WORK(&cx->in_work_order[i].work, cx18_in_work_handler);
582 }
583}
584
549/* Precondition: the cx18 structure has been memset to 0. Only 585/* Precondition: the cx18 structure has been memset to 0. Only
550 the dev and instance fields have been filled in. 586 the dev and instance fields have been filled in.
551 No assumptions on the card type may be made here (see cx18_init_struct2 587 No assumptions on the card type may be made here (see cx18_init_struct2
@@ -553,7 +589,7 @@ done:
553 */ 589 */
554static int __devinit cx18_init_struct1(struct cx18 *cx) 590static int __devinit cx18_init_struct1(struct cx18 *cx)
555{ 591{
556 int i; 592 int ret;
557 593
558 cx->base_addr = pci_resource_start(cx->pci_dev, 0); 594 cx->base_addr = pci_resource_start(cx->pci_dev, 0);
559 595
@@ -562,18 +598,18 @@ static int __devinit cx18_init_struct1(struct cx18 *cx)
562 mutex_init(&cx->epu2apu_mb_lock); 598 mutex_init(&cx->epu2apu_mb_lock);
563 mutex_init(&cx->epu2cpu_mb_lock); 599 mutex_init(&cx->epu2cpu_mb_lock);
564 600
565 cx->work_queue = create_singlethread_workqueue(cx->v4l2_dev.name); 601 ret = cx18_create_out_workq(cx);
566 if (cx->work_queue == NULL) { 602 if (ret)
567 CX18_ERR("Unable to create work hander thread\n"); 603 return ret;
568 return -ENOMEM;
569 }
570 604
571 for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) { 605 ret = cx18_create_in_workq(cx);
572 cx->epu_work_order[i].cx = cx; 606 if (ret) {
573 cx->epu_work_order[i].str = cx->epu_debug_str; 607 destroy_workqueue(cx->out_work_queue);
574 INIT_WORK(&cx->epu_work_order[i].work, cx18_epu_work_handler); 608 return ret;
575 } 609 }
576 610
611 cx18_init_in_work_orders(cx);
612
577 /* start counting open_id at 1 */ 613 /* start counting open_id at 1 */
578 cx->open_id = 1; 614 cx->open_id = 1;
579 615
@@ -759,17 +795,17 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
759 retval = -ENODEV; 795 retval = -ENODEV;
760 goto err; 796 goto err;
761 } 797 }
762 if (cx18_init_struct1(cx)) { 798
763 retval = -ENOMEM; 799 retval = cx18_init_struct1(cx);
800 if (retval)
764 goto err; 801 goto err;
765 }
766 802
767 CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr); 803 CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr);
768 804
769 /* PCI Device Setup */ 805 /* PCI Device Setup */
770 retval = cx18_setup_pci(cx, pci_dev, pci_id); 806 retval = cx18_setup_pci(cx, pci_dev, pci_id);
771 if (retval != 0) 807 if (retval != 0)
772 goto free_workqueue; 808 goto free_workqueues;
773 809
774 /* map io memory */ 810 /* map io memory */
775 CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n", 811 CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
@@ -943,8 +979,9 @@ free_map:
943 cx18_iounmap(cx); 979 cx18_iounmap(cx);
944free_mem: 980free_mem:
945 release_mem_region(cx->base_addr, CX18_MEM_SIZE); 981 release_mem_region(cx->base_addr, CX18_MEM_SIZE);
946free_workqueue: 982free_workqueues:
947 destroy_workqueue(cx->work_queue); 983 destroy_workqueue(cx->in_work_queue);
984 destroy_workqueue(cx->out_work_queue);
948err: 985err:
949 if (retval == 0) 986 if (retval == 0)
950 retval = -ENODEV; 987 retval = -ENODEV;
@@ -1053,11 +1090,19 @@ int cx18_init_on_first_open(struct cx18 *cx)
1053 return 0; 1090 return 0;
1054} 1091}
1055 1092
1056static void cx18_cancel_epu_work_orders(struct cx18 *cx) 1093static void cx18_cancel_in_work_orders(struct cx18 *cx)
1057{ 1094{
1058 int i; 1095 int i;
1059 for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) 1096 for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++)
1060 cancel_work_sync(&cx->epu_work_order[i].work); 1097 cancel_work_sync(&cx->in_work_order[i].work);
1098}
1099
1100static void cx18_cancel_out_work_orders(struct cx18 *cx)
1101{
1102 int i;
1103 for (i = 0; i < CX18_MAX_STREAMS; i++)
1104 if (&cx->streams[i].video_dev != NULL)
1105 cancel_work_sync(&cx->streams[i].out_work_order);
1061} 1106}
1062 1107
1063static void cx18_remove(struct pci_dev *pci_dev) 1108static void cx18_remove(struct pci_dev *pci_dev)
@@ -1073,15 +1118,20 @@ static void cx18_remove(struct pci_dev *pci_dev)
1073 if (atomic_read(&cx->tot_capturing) > 0) 1118 if (atomic_read(&cx->tot_capturing) > 0)
1074 cx18_stop_all_captures(cx); 1119 cx18_stop_all_captures(cx);
1075 1120
1076 /* Interrupts */ 1121 /* Stop interrupts that cause incoming work to be queued */
1077 cx18_sw1_irq_disable(cx, IRQ_CPU_TO_EPU | IRQ_APU_TO_EPU); 1122 cx18_sw1_irq_disable(cx, IRQ_CPU_TO_EPU | IRQ_APU_TO_EPU);
1123
1124 /* Incoming work can cause outgoing work, so clean up incoming first */
1125 cx18_cancel_in_work_orders(cx);
1126 cx18_cancel_out_work_orders(cx);
1127
1128 /* Stop ack interrupts that may have been needed for work to finish */
1078 cx18_sw2_irq_disable(cx, IRQ_CPU_TO_EPU_ACK | IRQ_APU_TO_EPU_ACK); 1129 cx18_sw2_irq_disable(cx, IRQ_CPU_TO_EPU_ACK | IRQ_APU_TO_EPU_ACK);
1079 1130
1080 cx18_halt_firmware(cx); 1131 cx18_halt_firmware(cx);
1081 1132
1082 cx18_cancel_epu_work_orders(cx); 1133 destroy_workqueue(cx->in_work_queue);
1083 1134 destroy_workqueue(cx->out_work_queue);
1084 destroy_workqueue(cx->work_queue);
1085 1135
1086 cx18_streams_cleanup(cx, 1); 1136 cx18_streams_cleanup(cx, 1);
1087 1137
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index ece4f281ef42..c6a1e907f63a 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -80,8 +80,9 @@
80#define CX18_CARD_YUAN_MPC718 3 /* Yuan MPC718 */ 80#define CX18_CARD_YUAN_MPC718 3 /* Yuan MPC718 */
81#define CX18_CARD_CNXT_RAPTOR_PAL 4 /* Conexant Raptor PAL */ 81#define CX18_CARD_CNXT_RAPTOR_PAL 4 /* Conexant Raptor PAL */
82#define CX18_CARD_TOSHIBA_QOSMIO_DVBT 5 /* Toshiba Qosmio Interal DVB-T/Analog*/ 82#define CX18_CARD_TOSHIBA_QOSMIO_DVBT 5 /* Toshiba Qosmio Interal DVB-T/Analog*/
83#define CX18_CARD_LEADTEK_PVR2100 6 /* Leadtek WinFast PVR2100/DVR3100 H */ 83#define CX18_CARD_LEADTEK_PVR2100 6 /* Leadtek WinFast PVR2100 */
84#define CX18_CARD_LAST 6 84#define CX18_CARD_LEADTEK_DVR3100H 7 /* Leadtek WinFast DVR3100 H */
85#define CX18_CARD_LAST 7
85 86
86#define CX18_ENC_STREAM_TYPE_MPG 0 87#define CX18_ENC_STREAM_TYPE_MPG 0
87#define CX18_ENC_STREAM_TYPE_TS 1 88#define CX18_ENC_STREAM_TYPE_TS 1
@@ -254,6 +255,7 @@ struct cx18_options {
254#define CX18_F_S_INTERNAL_USE 5 /* this stream is used internally (sliced VBI processing) */ 255#define CX18_F_S_INTERNAL_USE 5 /* this stream is used internally (sliced VBI processing) */
255#define CX18_F_S_STREAMOFF 7 /* signal end of stream EOS */ 256#define CX18_F_S_STREAMOFF 7 /* signal end of stream EOS */
256#define CX18_F_S_APPL_IO 8 /* this stream is used read/written by an application */ 257#define CX18_F_S_APPL_IO 8 /* this stream is used read/written by an application */
258#define CX18_F_S_STOPPING 9 /* telling the fw to stop capturing */
257 259
258/* per-cx18, i_flags */ 260/* per-cx18, i_flags */
259#define CX18_F_I_LOADED_FW 0 /* Loaded firmware 1st time */ 261#define CX18_F_I_LOADED_FW 0 /* Loaded firmware 1st time */
@@ -285,6 +287,7 @@ struct cx18_queue {
285 struct list_head list; 287 struct list_head list;
286 atomic_t buffers; 288 atomic_t buffers;
287 u32 bytesused; 289 u32 bytesused;
290 spinlock_t lock;
288}; 291};
289 292
290struct cx18_dvb { 293struct cx18_dvb {
@@ -305,7 +308,7 @@ struct cx18_scb; /* forward reference */
305 308
306 309
307#define CX18_MAX_MDL_ACKS 2 310#define CX18_MAX_MDL_ACKS 2
308#define CX18_MAX_EPU_WORK_ORDERS (CX18_MAX_FW_MDLS_PER_STREAM + 7) 311#define CX18_MAX_IN_WORK_ORDERS (CX18_MAX_FW_MDLS_PER_STREAM + 7)
309/* CPU_DE_RELEASE_MDL can burst CX18_MAX_FW_MDLS_PER_STREAM orders in a group */ 312/* CPU_DE_RELEASE_MDL can burst CX18_MAX_FW_MDLS_PER_STREAM orders in a group */
310 313
311#define CX18_F_EWO_MB_STALE_UPON_RECEIPT 0x1 314#define CX18_F_EWO_MB_STALE_UPON_RECEIPT 0x1
@@ -313,7 +316,7 @@ struct cx18_scb; /* forward reference */
313#define CX18_F_EWO_MB_STALE \ 316#define CX18_F_EWO_MB_STALE \
314 (CX18_F_EWO_MB_STALE_UPON_RECEIPT | CX18_F_EWO_MB_STALE_WHILE_PROC) 317 (CX18_F_EWO_MB_STALE_UPON_RECEIPT | CX18_F_EWO_MB_STALE_WHILE_PROC)
315 318
316struct cx18_epu_work_order { 319struct cx18_in_work_order {
317 struct work_struct work; 320 struct work_struct work;
318 atomic_t pending; 321 atomic_t pending;
319 struct cx18 *cx; 322 struct cx18 *cx;
@@ -337,7 +340,6 @@ struct cx18_stream {
337 unsigned mdl_offset; 340 unsigned mdl_offset;
338 341
339 u32 id; 342 u32 id;
340 struct mutex qlock; /* locks access to the queues */
341 unsigned long s_flags; /* status flags, see above */ 343 unsigned long s_flags; /* status flags, see above */
342 int dma; /* can be PCI_DMA_TODEVICE, 344 int dma; /* can be PCI_DMA_TODEVICE,
343 PCI_DMA_FROMDEVICE or 345 PCI_DMA_FROMDEVICE or
@@ -353,6 +355,8 @@ struct cx18_stream {
353 struct cx18_queue q_busy; /* busy buffers - in use by firmware */ 355 struct cx18_queue q_busy; /* busy buffers - in use by firmware */
354 struct cx18_queue q_full; /* full buffers - data for user apps */ 356 struct cx18_queue q_full; /* full buffers - data for user apps */
355 357
358 struct work_struct out_work_order;
359
356 /* DVB / Digital Transport */ 360 /* DVB / Digital Transport */
357 struct cx18_dvb dvb; 361 struct cx18_dvb dvb;
358}; 362};
@@ -568,10 +572,14 @@ struct cx18 {
568 u32 sw2_irq_mask; 572 u32 sw2_irq_mask;
569 u32 hw2_irq_mask; 573 u32 hw2_irq_mask;
570 574
571 struct workqueue_struct *work_queue; 575 struct workqueue_struct *in_work_queue;
572 struct cx18_epu_work_order epu_work_order[CX18_MAX_EPU_WORK_ORDERS]; 576 char in_workq_name[11]; /* "cx18-NN-in" */
577 struct cx18_in_work_order in_work_order[CX18_MAX_IN_WORK_ORDERS];
573 char epu_debug_str[256]; /* CX18_EPU_DEBUG is rare: use shared space */ 578 char epu_debug_str[256]; /* CX18_EPU_DEBUG is rare: use shared space */
574 579
580 struct workqueue_struct *out_work_queue;
581 char out_workq_name[12]; /* "cx18-NN-out" */
582
575 /* i2c */ 583 /* i2c */
576 struct i2c_adapter i2c_adap[2]; 584 struct i2c_adapter i2c_adap[2];
577 struct i2c_algo_bit_data i2c_algo[2]; 585 struct i2c_algo_bit_data i2c_algo[2];
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c
index 3b86f57cd15a..6ea3fe623ef4 100644
--- a/drivers/media/video/cx18/cx18-dvb.c
+++ b/drivers/media/video/cx18/cx18-dvb.c
@@ -23,14 +23,20 @@
23#include "cx18-version.h" 23#include "cx18-version.h"
24#include "cx18-dvb.h" 24#include "cx18-dvb.h"
25#include "cx18-io.h" 25#include "cx18-io.h"
26#include "cx18-queue.h"
26#include "cx18-streams.h" 27#include "cx18-streams.h"
27#include "cx18-cards.h" 28#include "cx18-cards.h"
29#include "cx18-gpio.h"
28#include "s5h1409.h" 30#include "s5h1409.h"
29#include "mxl5005s.h" 31#include "mxl5005s.h"
32#include "zl10353.h"
33#include "tuner-xc2028.h"
30 34
31DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); 35DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
32 36
33#define CX18_REG_DMUX_NUM_PORT_0_CONTROL 0xd5a000 37#define CX18_REG_DMUX_NUM_PORT_0_CONTROL 0xd5a000
38#define CX18_CLOCK_ENABLE2 0xc71024
39#define CX18_DMUX_CLK_MASK 0x0080
34 40
35static struct mxl5005s_config hauppauge_hvr1600_tuner = { 41static struct mxl5005s_config hauppauge_hvr1600_tuner = {
36 .i2c_address = 0xC6 >> 1, 42 .i2c_address = 0xC6 >> 1,
@@ -57,7 +63,15 @@ static struct s5h1409_config hauppauge_hvr1600_config = {
57 .inversion = S5H1409_INVERSION_OFF, 63 .inversion = S5H1409_INVERSION_OFF,
58 .status_mode = S5H1409_DEMODLOCKING, 64 .status_mode = S5H1409_DEMODLOCKING,
59 .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK 65 .mpeg_timing = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK
66};
60 67
68/* Information/confirmation of proper config values provided by Terry Wu */
69static struct zl10353_config leadtek_dvr3100h_demod = {
70 .demod_address = 0x1e >> 1, /* Datasheet suggested straps */
71 .if2 = 45600, /* 4.560 MHz IF from the XC3028 */
72 .parallel_ts = 1, /* Not a serial TS */
73 .no_tuner = 1, /* XC3028 is not behind the gate */
74 .disable_i2c_gate_ctrl = 1, /* Disable the I2C gate */
61}; 75};
62 76
63static int dvb_register(struct cx18_stream *stream); 77static int dvb_register(struct cx18_stream *stream);
@@ -98,6 +112,7 @@ static int cx18_dvb_start_feed(struct dvb_demux_feed *feed)
98 cx18_write_reg(cx, v, CX18_REG_DMUX_NUM_PORT_0_CONTROL); 112 cx18_write_reg(cx, v, CX18_REG_DMUX_NUM_PORT_0_CONTROL);
99 break; 113 break;
100 114
115 case CX18_CARD_LEADTEK_DVR3100H:
101 default: 116 default:
102 /* Assumption - Parallel transport - Signalling 117 /* Assumption - Parallel transport - Signalling
103 * undefined or default. 118 * undefined or default.
@@ -267,8 +282,7 @@ void cx18_dvb_unregister(struct cx18_stream *stream)
267} 282}
268 283
269/* All the DVB attach calls go here, this function get's modified 284/* All the DVB attach calls go here, this function get's modified
270 * for each new card. No other function in this file needs 285 * for each new card. cx18_dvb_start_feed() will also need changes.
271 * to change.
272 */ 286 */
273static int dvb_register(struct cx18_stream *stream) 287static int dvb_register(struct cx18_stream *stream)
274{ 288{
@@ -289,6 +303,29 @@ static int dvb_register(struct cx18_stream *stream)
289 ret = 0; 303 ret = 0;
290 } 304 }
291 break; 305 break;
306 case CX18_CARD_LEADTEK_DVR3100H:
307 dvb->fe = dvb_attach(zl10353_attach,
308 &leadtek_dvr3100h_demod,
309 &cx->i2c_adap[1]);
310 if (dvb->fe != NULL) {
311 struct dvb_frontend *fe;
312 struct xc2028_config cfg = {
313 .i2c_adap = &cx->i2c_adap[1],
314 .i2c_addr = 0xc2 >> 1,
315 .ctrl = NULL,
316 };
317 static struct xc2028_ctrl ctrl = {
318 .fname = XC2028_DEFAULT_FIRMWARE,
319 .max_len = 64,
320 .demod = XC3028_FE_ZARLINK456,
321 .type = XC2028_AUTO,
322 };
323
324 fe = dvb_attach(xc2028_attach, dvb->fe, &cfg);
325 if (fe != NULL && fe->ops.tuner_ops.set_config != NULL)
326 fe->ops.tuner_ops.set_config(fe, &ctrl);
327 }
328 break;
292 default: 329 default:
293 /* No Digital Tv Support */ 330 /* No Digital Tv Support */
294 break; 331 break;
@@ -299,6 +336,8 @@ static int dvb_register(struct cx18_stream *stream)
299 return -1; 336 return -1;
300 } 337 }
301 338
339 dvb->fe->callback = cx18_reset_tuner_gpio;
340
302 ret = dvb_register_frontend(&dvb->dvb_adapter, dvb->fe); 341 ret = dvb_register_frontend(&dvb->dvb_adapter, dvb->fe);
303 if (ret < 0) { 342 if (ret < 0) {
304 if (dvb->fe->ops.release) 343 if (dvb->fe->ops.release)
@@ -306,5 +345,16 @@ static int dvb_register(struct cx18_stream *stream)
306 return ret; 345 return ret;
307 } 346 }
308 347
348 /*
349 * The firmware seems to enable the TS DMUX clock
350 * under various circumstances. However, since we know we
351 * might use it, let's just turn it on ourselves here.
352 */
353 cx18_write_reg_expect(cx,
354 (CX18_DMUX_CLK_MASK << 16) | CX18_DMUX_CLK_MASK,
355 CX18_CLOCK_ENABLE2,
356 CX18_DMUX_CLK_MASK,
357 (CX18_DMUX_CLK_MASK << 16) | CX18_DMUX_CLK_MASK);
358
309 return ret; 359 return ret;
310} 360}
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
index b3889c0b2697..29969c18949c 100644
--- a/drivers/media/video/cx18/cx18-fileops.c
+++ b/drivers/media/video/cx18/cx18-fileops.c
@@ -265,8 +265,13 @@ static size_t cx18_copy_buf_to_user(struct cx18_stream *s,
265 * an MPEG-2 Program Pack start code, and provide only 265 * an MPEG-2 Program Pack start code, and provide only
266 * up to that point to the user, so it's easy to insert VBI data 266 * up to that point to the user, so it's easy to insert VBI data
267 * the next time around. 267 * the next time around.
268 *
269 * This will not work for an MPEG-2 TS and has only been
270 * verified by analysis to work for an MPEG-2 PS. Helen Buus
271 * pointed out this works for the CX23416 MPEG-2 DVD compatible
272 * stream, and research indicates both the MPEG 2 SVCD and DVD
273 * stream types use an MPEG-2 PS container.
268 */ 274 */
269 /* FIXME - This only works for an MPEG-2 PS, not a TS */
270 /* 275 /*
271 * An MPEG-2 Program Stream (PS) is a series of 276 * An MPEG-2 Program Stream (PS) is a series of
272 * MPEG-2 Program Packs terminated by an 277 * MPEG-2 Program Packs terminated by an
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
index 2226e5791e99..afe46c3d4057 100644
--- a/drivers/media/video/cx18/cx18-mailbox.c
+++ b/drivers/media/video/cx18/cx18-mailbox.c
@@ -131,7 +131,7 @@ static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
131 * Functions that run in a work_queue work handling context 131 * Functions that run in a work_queue work handling context
132 */ 132 */
133 133
134static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order) 134static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
135{ 135{
136 u32 handle, mdl_ack_count, id; 136 u32 handle, mdl_ack_count, id;
137 struct cx18_mailbox *mb; 137 struct cx18_mailbox *mb;
@@ -191,29 +191,30 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
191 if (buf == NULL) { 191 if (buf == NULL) {
192 CX18_WARN("Could not find buf %d for stream %s\n", 192 CX18_WARN("Could not find buf %d for stream %s\n",
193 id, s->name); 193 id, s->name);
194 /* Put as many buffers as possible back into fw use */
195 cx18_stream_load_fw_queue(s);
196 continue; 194 continue;
197 } 195 }
198 196
199 if (s->type == CX18_ENC_STREAM_TYPE_TS && s->dvb.enabled) { 197 CX18_DEBUG_HI_DMA("%s recv bytesused = %d\n",
200 CX18_DEBUG_HI_DMA("TS recv bytesused = %d\n", 198 s->name, buf->bytesused);
201 buf->bytesused); 199
202 dvb_dmx_swfilter(&s->dvb.demux, buf->buf, 200 if (s->type != CX18_ENC_STREAM_TYPE_TS)
203 buf->bytesused); 201 cx18_enqueue(s, buf, &s->q_full);
202 else {
203 if (s->dvb.enabled)
204 dvb_dmx_swfilter(&s->dvb.demux, buf->buf,
205 buf->bytesused);
206 cx18_enqueue(s, buf, &s->q_free);
204 } 207 }
205 /* Put as many buffers as possible back into fw use */
206 cx18_stream_load_fw_queue(s);
207 /* Put back TS buffer, since it was removed from all queues */
208 if (s->type == CX18_ENC_STREAM_TYPE_TS)
209 cx18_stream_put_buf_fw(s, buf);
210 } 208 }
209 /* Put as many buffers as possible back into fw use */
210 cx18_stream_load_fw_queue(s);
211
211 wake_up(&cx->dma_waitq); 212 wake_up(&cx->dma_waitq);
212 if (s->id != -1) 213 if (s->id != -1)
213 wake_up(&s->waitq); 214 wake_up(&s->waitq);
214} 215}
215 216
216static void epu_debug(struct cx18 *cx, struct cx18_epu_work_order *order) 217static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order)
217{ 218{
218 char *p; 219 char *p;
219 char *str = order->str; 220 char *str = order->str;
@@ -224,7 +225,7 @@ static void epu_debug(struct cx18 *cx, struct cx18_epu_work_order *order)
224 CX18_INFO("FW version: %s\n", p - 1); 225 CX18_INFO("FW version: %s\n", p - 1);
225} 226}
226 227
227static void epu_cmd(struct cx18 *cx, struct cx18_epu_work_order *order) 228static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order)
228{ 229{
229 switch (order->rpu) { 230 switch (order->rpu) {
230 case CPU: 231 case CPU:
@@ -253,18 +254,18 @@ static void epu_cmd(struct cx18 *cx, struct cx18_epu_work_order *order)
253} 254}
254 255
255static 256static
256void free_epu_work_order(struct cx18 *cx, struct cx18_epu_work_order *order) 257void free_in_work_order(struct cx18 *cx, struct cx18_in_work_order *order)
257{ 258{
258 atomic_set(&order->pending, 0); 259 atomic_set(&order->pending, 0);
259} 260}
260 261
261void cx18_epu_work_handler(struct work_struct *work) 262void cx18_in_work_handler(struct work_struct *work)
262{ 263{
263 struct cx18_epu_work_order *order = 264 struct cx18_in_work_order *order =
264 container_of(work, struct cx18_epu_work_order, work); 265 container_of(work, struct cx18_in_work_order, work);
265 struct cx18 *cx = order->cx; 266 struct cx18 *cx = order->cx;
266 epu_cmd(cx, order); 267 epu_cmd(cx, order);
267 free_epu_work_order(cx, order); 268 free_in_work_order(cx, order);
268} 269}
269 270
270 271
@@ -272,7 +273,7 @@ void cx18_epu_work_handler(struct work_struct *work)
272 * Functions that run in an interrupt handling context 273 * Functions that run in an interrupt handling context
273 */ 274 */
274 275
275static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order) 276static void mb_ack_irq(struct cx18 *cx, struct cx18_in_work_order *order)
276{ 277{
277 struct cx18_mailbox __iomem *ack_mb; 278 struct cx18_mailbox __iomem *ack_mb;
278 u32 ack_irq, req; 279 u32 ack_irq, req;
@@ -308,7 +309,7 @@ static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
308 return; 309 return;
309} 310}
310 311
311static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order) 312static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
312{ 313{
313 u32 handle, mdl_ack_offset, mdl_ack_count; 314 u32 handle, mdl_ack_offset, mdl_ack_count;
314 struct cx18_mailbox *mb; 315 struct cx18_mailbox *mb;
@@ -334,7 +335,7 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
334} 335}
335 336
336static 337static
337int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order) 338int epu_debug_irq(struct cx18 *cx, struct cx18_in_work_order *order)
338{ 339{
339 u32 str_offset; 340 u32 str_offset;
340 char *str = order->str; 341 char *str = order->str;
@@ -355,7 +356,7 @@ int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
355} 356}
356 357
357static inline 358static inline
358int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order) 359int epu_cmd_irq(struct cx18 *cx, struct cx18_in_work_order *order)
359{ 360{
360 int ret = -1; 361 int ret = -1;
361 362
@@ -387,12 +388,12 @@ int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
387} 388}
388 389
389static inline 390static inline
390struct cx18_epu_work_order *alloc_epu_work_order_irq(struct cx18 *cx) 391struct cx18_in_work_order *alloc_in_work_order_irq(struct cx18 *cx)
391{ 392{
392 int i; 393 int i;
393 struct cx18_epu_work_order *order = NULL; 394 struct cx18_in_work_order *order = NULL;
394 395
395 for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) { 396 for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++) {
396 /* 397 /*
397 * We only need "pending" atomic to inspect its contents, 398 * We only need "pending" atomic to inspect its contents,
398 * and need not do a check and set because: 399 * and need not do a check and set because:
@@ -401,8 +402,8 @@ struct cx18_epu_work_order *alloc_epu_work_order_irq(struct cx18 *cx)
401 * 2. "pending" is only set here, and we're serialized because 402 * 2. "pending" is only set here, and we're serialized because
402 * we're called in an IRQ handler context. 403 * we're called in an IRQ handler context.
403 */ 404 */
404 if (atomic_read(&cx->epu_work_order[i].pending) == 0) { 405 if (atomic_read(&cx->in_work_order[i].pending) == 0) {
405 order = &cx->epu_work_order[i]; 406 order = &cx->in_work_order[i];
406 atomic_set(&order->pending, 1); 407 atomic_set(&order->pending, 1);
407 break; 408 break;
408 } 409 }
@@ -414,7 +415,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
414{ 415{
415 struct cx18_mailbox __iomem *mb; 416 struct cx18_mailbox __iomem *mb;
416 struct cx18_mailbox *order_mb; 417 struct cx18_mailbox *order_mb;
417 struct cx18_epu_work_order *order; 418 struct cx18_in_work_order *order;
418 int submit; 419 int submit;
419 420
420 switch (rpu) { 421 switch (rpu) {
@@ -428,7 +429,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
428 return; 429 return;
429 } 430 }
430 431
431 order = alloc_epu_work_order_irq(cx); 432 order = alloc_in_work_order_irq(cx);
432 if (order == NULL) { 433 if (order == NULL) {
433 CX18_WARN("Unable to find blank work order form to schedule " 434 CX18_WARN("Unable to find blank work order form to schedule "
434 "incoming mailbox command processing\n"); 435 "incoming mailbox command processing\n");
@@ -461,7 +462,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
461 */ 462 */
462 submit = epu_cmd_irq(cx, order); 463 submit = epu_cmd_irq(cx, order);
463 if (submit > 0) { 464 if (submit > 0) {
464 queue_work(cx->work_queue, &order->work); 465 queue_work(cx->in_work_queue, &order->work);
465 } 466 }
466} 467}
467 468
@@ -478,9 +479,10 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
478 u32 __iomem *xpu_state; 479 u32 __iomem *xpu_state;
479 wait_queue_head_t *waitq; 480 wait_queue_head_t *waitq;
480 struct mutex *mb_lock; 481 struct mutex *mb_lock;
481 long int timeout, ret; 482 unsigned long int t0, timeout, ret;
482 int i; 483 int i;
483 char argstr[MAX_MB_ARGUMENTS*11+1]; 484 char argstr[MAX_MB_ARGUMENTS*11+1];
485 DEFINE_WAIT(w);
484 486
485 if (info == NULL) { 487 if (info == NULL) {
486 CX18_WARN("unknown cmd %x\n", cmd); 488 CX18_WARN("unknown cmd %x\n", cmd);
@@ -562,25 +564,49 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
562 564
563 CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n", 565 CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n",
564 irq, info->name); 566 irq, info->name);
567
568 /* So we don't miss the wakeup, prepare to wait before notifying fw */
569 prepare_to_wait(waitq, &w, TASK_UNINTERRUPTIBLE);
565 cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq); 570 cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq);
566 571
567 ret = wait_event_timeout( 572 t0 = jiffies;
568 *waitq, 573 ack = cx18_readl(cx, &mb->ack);
569 cx18_readl(cx, &mb->ack) == cx18_readl(cx, &mb->request), 574 if (ack != req) {
570 timeout); 575 schedule_timeout(timeout);
576 ret = jiffies - t0;
577 ack = cx18_readl(cx, &mb->ack);
578 } else {
579 ret = jiffies - t0;
580 }
571 581
572 if (ret == 0) { 582 finish_wait(waitq, &w);
573 /* Timed out */ 583
584 if (req != ack) {
574 mutex_unlock(mb_lock); 585 mutex_unlock(mb_lock);
575 CX18_DEBUG_WARN("sending %s timed out waiting %d msecs for RPU " 586 if (ret >= timeout) {
576 "acknowledgement\n", 587 /* Timed out */
577 info->name, jiffies_to_msecs(timeout)); 588 CX18_DEBUG_WARN("sending %s timed out waiting %d msecs "
589 "for RPU acknowledgement\n",
590 info->name, jiffies_to_msecs(ret));
591 } else {
592 CX18_DEBUG_WARN("woken up before mailbox ack was ready "
593 "after submitting %s to RPU. only "
594 "waited %d msecs on req %u but awakened"
595 " with unmatched ack %u\n",
596 info->name,
597 jiffies_to_msecs(ret),
598 req, ack);
599 }
578 return -EINVAL; 600 return -EINVAL;
579 } 601 }
580 602
581 if (ret != timeout) 603 if (ret >= timeout)
604 CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment "
605 "sending %s; timed out waiting %d msecs\n",
606 info->name, jiffies_to_msecs(ret));
607 else
582 CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n", 608 CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n",
583 jiffies_to_msecs(timeout-ret), info->name); 609 jiffies_to_msecs(ret), info->name);
584 610
585 /* Collect data returned by the XPU */ 611 /* Collect data returned by the XPU */
586 for (i = 0; i < MAX_MB_ARGUMENTS; i++) 612 for (i = 0; i < MAX_MB_ARGUMENTS; i++)
diff --git a/drivers/media/video/cx18/cx18-mailbox.h b/drivers/media/video/cx18/cx18-mailbox.h
index ce2b6686aa00..e23aaac5b280 100644
--- a/drivers/media/video/cx18/cx18-mailbox.h
+++ b/drivers/media/video/cx18/cx18-mailbox.h
@@ -95,6 +95,6 @@ int cx18_api_func(void *priv, u32 cmd, int in, int out,
95 95
96void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu); 96void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu);
97 97
98void cx18_epu_work_handler(struct work_struct *work); 98void cx18_in_work_handler(struct work_struct *work);
99 99
100#endif 100#endif
diff --git a/drivers/media/video/cx18/cx18-queue.c b/drivers/media/video/cx18/cx18-queue.c
index 3046b8e74345..fa1ed7897d97 100644
--- a/drivers/media/video/cx18/cx18-queue.c
+++ b/drivers/media/video/cx18/cx18-queue.c
@@ -23,8 +23,8 @@
23 */ 23 */
24 24
25#include "cx18-driver.h" 25#include "cx18-driver.h"
26#include "cx18-streams.h"
27#include "cx18-queue.h" 26#include "cx18-queue.h"
27#include "cx18-streams.h"
28#include "cx18-scb.h" 28#include "cx18-scb.h"
29 29
30void cx18_buf_swap(struct cx18_buffer *buf) 30void cx18_buf_swap(struct cx18_buffer *buf)
@@ -53,13 +53,13 @@ struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
53 buf->skipped = 0; 53 buf->skipped = 0;
54 } 54 }
55 55
56 mutex_lock(&s->qlock);
57
58 /* q_busy is restricted to a max buffer count imposed by firmware */ 56 /* q_busy is restricted to a max buffer count imposed by firmware */
59 if (q == &s->q_busy && 57 if (q == &s->q_busy &&
60 atomic_read(&q->buffers) >= CX18_MAX_FW_MDLS_PER_STREAM) 58 atomic_read(&q->buffers) >= CX18_MAX_FW_MDLS_PER_STREAM)
61 q = &s->q_free; 59 q = &s->q_free;
62 60
61 spin_lock(&q->lock);
62
63 if (to_front) 63 if (to_front)
64 list_add(&buf->list, &q->list); /* LIFO */ 64 list_add(&buf->list, &q->list); /* LIFO */
65 else 65 else
@@ -67,7 +67,7 @@ struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
67 q->bytesused += buf->bytesused - buf->readpos; 67 q->bytesused += buf->bytesused - buf->readpos;
68 atomic_inc(&q->buffers); 68 atomic_inc(&q->buffers);
69 69
70 mutex_unlock(&s->qlock); 70 spin_unlock(&q->lock);
71 return q; 71 return q;
72} 72}
73 73
@@ -75,7 +75,7 @@ struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
75{ 75{
76 struct cx18_buffer *buf = NULL; 76 struct cx18_buffer *buf = NULL;
77 77
78 mutex_lock(&s->qlock); 78 spin_lock(&q->lock);
79 if (!list_empty(&q->list)) { 79 if (!list_empty(&q->list)) {
80 buf = list_first_entry(&q->list, struct cx18_buffer, list); 80 buf = list_first_entry(&q->list, struct cx18_buffer, list);
81 list_del_init(&buf->list); 81 list_del_init(&buf->list);
@@ -83,7 +83,7 @@ struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
83 buf->skipped = 0; 83 buf->skipped = 0;
84 atomic_dec(&q->buffers); 84 atomic_dec(&q->buffers);
85 } 85 }
86 mutex_unlock(&s->qlock); 86 spin_unlock(&q->lock);
87 return buf; 87 return buf;
88} 88}
89 89
@@ -94,9 +94,23 @@ struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id,
94 struct cx18_buffer *buf; 94 struct cx18_buffer *buf;
95 struct cx18_buffer *tmp; 95 struct cx18_buffer *tmp;
96 struct cx18_buffer *ret = NULL; 96 struct cx18_buffer *ret = NULL;
97 97 LIST_HEAD(sweep_up);
98 mutex_lock(&s->qlock); 98
99 /*
100 * We don't have to acquire multiple q locks here, because we are
101 * serialized by the single threaded work handler.
102 * Buffers from the firmware will thus remain in order as
103 * they are moved from q_busy to q_full or to the dvb ring buffer.
104 */
105 spin_lock(&s->q_busy.lock);
99 list_for_each_entry_safe(buf, tmp, &s->q_busy.list, list) { 106 list_for_each_entry_safe(buf, tmp, &s->q_busy.list, list) {
107 /*
108 * We should find what the firmware told us is done,
109 * right at the front of the queue. If we don't, we likely have
110 * missed a buffer done message from the firmware.
111 * Once we skip a buffer repeatedly, relative to the size of
112 * q_busy, we have high confidence we've missed it.
113 */
100 if (buf->id != id) { 114 if (buf->id != id) {
101 buf->skipped++; 115 buf->skipped++;
102 if (buf->skipped >= atomic_read(&s->q_busy.buffers)-1) { 116 if (buf->skipped >= atomic_read(&s->q_busy.buffers)-1) {
@@ -105,38 +119,41 @@ struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id,
105 "times - it must have dropped out of " 119 "times - it must have dropped out of "
106 "rotation\n", s->name, buf->id, 120 "rotation\n", s->name, buf->id,
107 buf->skipped); 121 buf->skipped);
108 /* move it to q_free */ 122 /* Sweep it up to put it back into rotation */
109 list_move_tail(&buf->list, &s->q_free.list); 123 list_move_tail(&buf->list, &sweep_up);
110 buf->bytesused = buf->readpos = buf->b_flags =
111 buf->skipped = 0;
112 atomic_dec(&s->q_busy.buffers); 124 atomic_dec(&s->q_busy.buffers);
113 atomic_inc(&s->q_free.buffers);
114 } 125 }
115 continue; 126 continue;
116 } 127 }
117 128 /*
118 buf->bytesused = bytesused; 129 * We pull the desired buffer off of the queue here. Something
119 /* Sync the buffer before we release the qlock */ 130 * will have to put it back on a queue later.
120 cx18_buf_sync_for_cpu(s, buf); 131 */
121 if (s->type == CX18_ENC_STREAM_TYPE_TS) { 132 list_del_init(&buf->list);
122 /*
123 * TS doesn't use q_full. As we pull the buffer off of
124 * the queue here, the caller will have to put it back.
125 */
126 list_del_init(&buf->list);
127 } else {
128 /* Move buffer from q_busy to q_full */
129 list_move_tail(&buf->list, &s->q_full.list);
130 set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags);
131 s->q_full.bytesused += buf->bytesused;
132 atomic_inc(&s->q_full.buffers);
133 }
134 atomic_dec(&s->q_busy.buffers); 133 atomic_dec(&s->q_busy.buffers);
135
136 ret = buf; 134 ret = buf;
137 break; 135 break;
138 } 136 }
139 mutex_unlock(&s->qlock); 137 spin_unlock(&s->q_busy.lock);
138
139 /*
140 * We found the buffer for which we were looking. Get it ready for
141 * the caller to put on q_full or in the dvb ring buffer.
142 */
143 if (ret != NULL) {
144 ret->bytesused = bytesused;
145 ret->skipped = 0;
146 /* readpos and b_flags were 0'ed when the buf went on q_busy */
147 cx18_buf_sync_for_cpu(s, ret);
148 if (s->type != CX18_ENC_STREAM_TYPE_TS)
149 set_bit(CX18_F_B_NEED_BUF_SWAP, &ret->b_flags);
150 }
151
152 /* Put any buffers the firmware is ignoring back into normal rotation */
153 list_for_each_entry_safe(buf, tmp, &sweep_up, list) {
154 list_del_init(&buf->list);
155 cx18_enqueue(s, buf, &s->q_free);
156 }
140 return ret; 157 return ret;
141} 158}
142 159
@@ -148,7 +165,7 @@ static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q)
148 if (q == &s->q_free) 165 if (q == &s->q_free)
149 return; 166 return;
150 167
151 mutex_lock(&s->qlock); 168 spin_lock(&q->lock);
152 while (!list_empty(&q->list)) { 169 while (!list_empty(&q->list)) {
153 buf = list_first_entry(&q->list, struct cx18_buffer, list); 170 buf = list_first_entry(&q->list, struct cx18_buffer, list);
154 list_move_tail(&buf->list, &s->q_free.list); 171 list_move_tail(&buf->list, &s->q_free.list);
@@ -156,7 +173,7 @@ static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q)
156 atomic_inc(&s->q_free.buffers); 173 atomic_inc(&s->q_free.buffers);
157 } 174 }
158 cx18_queue_init(q); 175 cx18_queue_init(q);
159 mutex_unlock(&s->qlock); 176 spin_unlock(&q->lock);
160} 177}
161 178
162void cx18_flush_queues(struct cx18_stream *s) 179void cx18_flush_queues(struct cx18_stream *s)
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index 0932b76b2373..54d248e16d85 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -116,12 +116,16 @@ static void cx18_stream_init(struct cx18 *cx, int type)
116 s->buffers = cx->stream_buffers[type]; 116 s->buffers = cx->stream_buffers[type];
117 s->buf_size = cx->stream_buf_size[type]; 117 s->buf_size = cx->stream_buf_size[type];
118 118
119 mutex_init(&s->qlock);
120 init_waitqueue_head(&s->waitq); 119 init_waitqueue_head(&s->waitq);
121 s->id = -1; 120 s->id = -1;
121 spin_lock_init(&s->q_free.lock);
122 cx18_queue_init(&s->q_free); 122 cx18_queue_init(&s->q_free);
123 spin_lock_init(&s->q_busy.lock);
123 cx18_queue_init(&s->q_busy); 124 cx18_queue_init(&s->q_busy);
125 spin_lock_init(&s->q_full.lock);
124 cx18_queue_init(&s->q_full); 126 cx18_queue_init(&s->q_full);
127
128 INIT_WORK(&s->out_work_order, cx18_out_work_handler);
125} 129}
126 130
127static int cx18_prep_dev(struct cx18 *cx, int type) 131static int cx18_prep_dev(struct cx18 *cx, int type)
@@ -367,9 +371,14 @@ static void cx18_vbi_setup(struct cx18_stream *s)
367 * Tell the encoder to capture 21-4+1=18 lines per field, 371 * Tell the encoder to capture 21-4+1=18 lines per field,
368 * since we want lines 10 through 21. 372 * since we want lines 10 through 21.
369 * 373 *
370 * FIXME - revisit for 625/50 systems 374 * For 625/50 systems, according to the VIP 2 & BT.656 std:
375 * The EAV RP code's Field bit toggles on line 1, a few lines
376 * after the Vertcal Blank bit has already toggled.
377 * (We've actually set the digitizer so that the Field bit
378 * toggles on line 2.) Tell the encoder to capture 23-2+1=22
379 * lines per field, since we want lines 6 through 23.
371 */ 380 */
372 lines = cx->is_60hz ? (21 - 4 + 1) * 2 : 38; 381 lines = cx->is_60hz ? (21 - 4 + 1) * 2 : (23 - 2 + 1) * 2;
373 } 382 }
374 383
375 data[0] = s->handle; 384 data[0] = s->handle;
@@ -431,14 +440,16 @@ static void cx18_vbi_setup(struct cx18_stream *s)
431 cx18_api(cx, CX18_CPU_SET_RAW_VBI_PARAM, 6, data); 440 cx18_api(cx, CX18_CPU_SET_RAW_VBI_PARAM, 6, data);
432} 441}
433 442
434struct cx18_queue *cx18_stream_put_buf_fw(struct cx18_stream *s, 443static
435 struct cx18_buffer *buf) 444struct cx18_queue *_cx18_stream_put_buf_fw(struct cx18_stream *s,
445 struct cx18_buffer *buf)
436{ 446{
437 struct cx18 *cx = s->cx; 447 struct cx18 *cx = s->cx;
438 struct cx18_queue *q; 448 struct cx18_queue *q;
439 449
440 /* Don't give it to the firmware, if we're not running a capture */ 450 /* Don't give it to the firmware, if we're not running a capture */
441 if (s->handle == CX18_INVALID_TASK_HANDLE || 451 if (s->handle == CX18_INVALID_TASK_HANDLE ||
452 test_bit(CX18_F_S_STOPPING, &s->s_flags) ||
442 !test_bit(CX18_F_S_STREAMING, &s->s_flags)) 453 !test_bit(CX18_F_S_STREAMING, &s->s_flags))
443 return cx18_enqueue(s, buf, &s->q_free); 454 return cx18_enqueue(s, buf, &s->q_free);
444 455
@@ -453,7 +464,8 @@ struct cx18_queue *cx18_stream_put_buf_fw(struct cx18_stream *s,
453 return q; 464 return q;
454} 465}
455 466
456void cx18_stream_load_fw_queue(struct cx18_stream *s) 467static
468void _cx18_stream_load_fw_queue(struct cx18_stream *s)
457{ 469{
458 struct cx18_queue *q; 470 struct cx18_queue *q;
459 struct cx18_buffer *buf; 471 struct cx18_buffer *buf;
@@ -467,11 +479,19 @@ void cx18_stream_load_fw_queue(struct cx18_stream *s)
467 buf = cx18_dequeue(s, &s->q_free); 479 buf = cx18_dequeue(s, &s->q_free);
468 if (buf == NULL) 480 if (buf == NULL)
469 break; 481 break;
470 q = cx18_stream_put_buf_fw(s, buf); 482 q = _cx18_stream_put_buf_fw(s, buf);
471 } while (atomic_read(&s->q_busy.buffers) < CX18_MAX_FW_MDLS_PER_STREAM 483 } while (atomic_read(&s->q_busy.buffers) < CX18_MAX_FW_MDLS_PER_STREAM
472 && q == &s->q_busy); 484 && q == &s->q_busy);
473} 485}
474 486
487void cx18_out_work_handler(struct work_struct *work)
488{
489 struct cx18_stream *s =
490 container_of(work, struct cx18_stream, out_work_order);
491
492 _cx18_stream_load_fw_queue(s);
493}
494
475int cx18_start_v4l2_encode_stream(struct cx18_stream *s) 495int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
476{ 496{
477 u32 data[MAX_MB_ARGUMENTS]; 497 u32 data[MAX_MB_ARGUMENTS];
@@ -600,19 +620,20 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
600 620
601 /* Init all the cpu_mdls for this stream */ 621 /* Init all the cpu_mdls for this stream */
602 cx18_flush_queues(s); 622 cx18_flush_queues(s);
603 mutex_lock(&s->qlock); 623 spin_lock(&s->q_free.lock);
604 list_for_each_entry(buf, &s->q_free.list, list) { 624 list_for_each_entry(buf, &s->q_free.list, list) {
605 cx18_writel(cx, buf->dma_handle, 625 cx18_writel(cx, buf->dma_handle,
606 &cx->scb->cpu_mdl[buf->id].paddr); 626 &cx->scb->cpu_mdl[buf->id].paddr);
607 cx18_writel(cx, s->buf_size, &cx->scb->cpu_mdl[buf->id].length); 627 cx18_writel(cx, s->buf_size, &cx->scb->cpu_mdl[buf->id].length);
608 } 628 }
609 mutex_unlock(&s->qlock); 629 spin_unlock(&s->q_free.lock);
610 cx18_stream_load_fw_queue(s); 630 _cx18_stream_load_fw_queue(s);
611 631
612 /* begin_capture */ 632 /* begin_capture */
613 if (cx18_vapi(cx, CX18_CPU_CAPTURE_START, 1, s->handle)) { 633 if (cx18_vapi(cx, CX18_CPU_CAPTURE_START, 1, s->handle)) {
614 CX18_DEBUG_WARN("Error starting capture!\n"); 634 CX18_DEBUG_WARN("Error starting capture!\n");
615 /* Ensure we're really not capturing before releasing MDLs */ 635 /* Ensure we're really not capturing before releasing MDLs */
636 set_bit(CX18_F_S_STOPPING, &s->s_flags);
616 if (s->type == CX18_ENC_STREAM_TYPE_MPG) 637 if (s->type == CX18_ENC_STREAM_TYPE_MPG)
617 cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, 1); 638 cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, 1);
618 else 639 else
@@ -622,6 +643,7 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
622 cx18_vapi(cx, CX18_CPU_DE_RELEASE_MDL, 1, s->handle); 643 cx18_vapi(cx, CX18_CPU_DE_RELEASE_MDL, 1, s->handle);
623 cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle); 644 cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle);
624 s->handle = CX18_INVALID_TASK_HANDLE; 645 s->handle = CX18_INVALID_TASK_HANDLE;
646 clear_bit(CX18_F_S_STOPPING, &s->s_flags);
625 if (atomic_read(&cx->tot_capturing) == 0) { 647 if (atomic_read(&cx->tot_capturing) == 0) {
626 set_bit(CX18_F_I_EOS, &cx->i_flags); 648 set_bit(CX18_F_I_EOS, &cx->i_flags);
627 cx18_write_reg(cx, 5, CX18_DSP0_INTERRUPT_MASK); 649 cx18_write_reg(cx, 5, CX18_DSP0_INTERRUPT_MASK);
@@ -666,6 +688,7 @@ int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end)
666 if (atomic_read(&cx->tot_capturing) == 0) 688 if (atomic_read(&cx->tot_capturing) == 0)
667 return 0; 689 return 0;
668 690
691 set_bit(CX18_F_S_STOPPING, &s->s_flags);
669 if (s->type == CX18_ENC_STREAM_TYPE_MPG) 692 if (s->type == CX18_ENC_STREAM_TYPE_MPG)
670 cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, !gop_end); 693 cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, !gop_end);
671 else 694 else
@@ -689,6 +712,7 @@ int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end)
689 712
690 cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle); 713 cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle);
691 s->handle = CX18_INVALID_TASK_HANDLE; 714 s->handle = CX18_INVALID_TASK_HANDLE;
715 clear_bit(CX18_F_S_STOPPING, &s->s_flags);
692 716
693 if (atomic_read(&cx->tot_capturing) > 0) 717 if (atomic_read(&cx->tot_capturing) > 0)
694 return 0; 718 return 0;
diff --git a/drivers/media/video/cx18/cx18-streams.h b/drivers/media/video/cx18/cx18-streams.h
index 420e0a172945..1afc3fd9d822 100644
--- a/drivers/media/video/cx18/cx18-streams.h
+++ b/drivers/media/video/cx18/cx18-streams.h
@@ -28,10 +28,24 @@ int cx18_streams_setup(struct cx18 *cx);
28int cx18_streams_register(struct cx18 *cx); 28int cx18_streams_register(struct cx18 *cx);
29void cx18_streams_cleanup(struct cx18 *cx, int unregister); 29void cx18_streams_cleanup(struct cx18 *cx, int unregister);
30 30
31/* Related to submission of buffers to firmware */
32static inline void cx18_stream_load_fw_queue(struct cx18_stream *s)
33{
34 struct cx18 *cx = s->cx;
35 queue_work(cx->out_work_queue, &s->out_work_order);
36}
37
38static inline void cx18_stream_put_buf_fw(struct cx18_stream *s,
39 struct cx18_buffer *buf)
40{
41 /* Put buf on q_free; the out work handler will move buf(s) to q_busy */
42 cx18_enqueue(s, buf, &s->q_free);
43 cx18_stream_load_fw_queue(s);
44}
45
46void cx18_out_work_handler(struct work_struct *work);
47
31/* Capture related */ 48/* Capture related */
32void cx18_stream_load_fw_queue(struct cx18_stream *s);
33struct cx18_queue *cx18_stream_put_buf_fw(struct cx18_stream *s,
34 struct cx18_buffer *buf);
35int cx18_start_v4l2_encode_stream(struct cx18_stream *s); 49int cx18_start_v4l2_encode_stream(struct cx18_stream *s);
36int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end); 50int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end);
37 51
diff --git a/drivers/media/video/cx18/cx18-version.h b/drivers/media/video/cx18/cx18-version.h
index bd9bd44da791..45494b094e7f 100644
--- a/drivers/media/video/cx18/cx18-version.h
+++ b/drivers/media/video/cx18/cx18-version.h
@@ -24,7 +24,7 @@
24 24
25#define CX18_DRIVER_NAME "cx18" 25#define CX18_DRIVER_NAME "cx18"
26#define CX18_DRIVER_VERSION_MAJOR 1 26#define CX18_DRIVER_VERSION_MAJOR 1
27#define CX18_DRIVER_VERSION_MINOR 1 27#define CX18_DRIVER_VERSION_MINOR 2
28#define CX18_DRIVER_VERSION_PATCHLEVEL 0 28#define CX18_DRIVER_VERSION_PATCHLEVEL 0
29 29
30#define CX18_VERSION __stringify(CX18_DRIVER_VERSION_MAJOR) "." __stringify(CX18_DRIVER_VERSION_MINOR) "." __stringify(CX18_DRIVER_VERSION_PATCHLEVEL) 30#define CX18_VERSION __stringify(CX18_DRIVER_VERSION_MAJOR) "." __stringify(CX18_DRIVER_VERSION_MINOR) "." __stringify(CX18_DRIVER_VERSION_PATCHLEVEL)
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index 1be3881be991..6a9464079b4c 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -29,7 +29,6 @@
29#include <linux/bitmap.h> 29#include <linux/bitmap.h>
30#include <linux/usb.h> 30#include <linux/usb.h>
31#include <linux/i2c.h> 31#include <linux/i2c.h>
32#include <linux/version.h>
33#include <linux/mm.h> 32#include <linux/mm.h>
34#include <linux/mutex.h> 33#include <linux/mutex.h>
35 34
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index c8a32b1b5381..63d2239fd324 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -281,12 +281,12 @@ static void cx231xx_config_tuner(struct cx231xx *dev)
281} 281}
282 282
283/* ----------------------------------------------------------------------- */ 283/* ----------------------------------------------------------------------- */
284void cx231xx_set_ir(struct cx231xx *dev, struct IR_i2c *ir) 284void cx231xx_register_i2c_ir(struct cx231xx *dev)
285{ 285{
286 if (disable_ir) { 286 if (disable_ir)
287 ir->get_key = NULL;
288 return; 287 return;
289 } 288
289 /* REVISIT: instantiate IR device */
290 290
291 /* detect & configure */ 291 /* detect & configure */
292 switch (dev->model) { 292 switch (dev->model) {
diff --git a/drivers/media/video/cx231xx/cx231xx-i2c.c b/drivers/media/video/cx231xx/cx231xx-i2c.c
index b4a03d813e00..33219dc4d649 100644
--- a/drivers/media/video/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/video/cx231xx/cx231xx-i2c.c
@@ -424,34 +424,6 @@ static u32 functionality(struct i2c_adapter *adap)
424 return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C; 424 return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C;
425} 425}
426 426
427/*
428 * attach_inform()
429 * gets called when a device attaches to the i2c bus
430 * does some basic configuration
431 */
432static int attach_inform(struct i2c_client *client)
433{
434 struct cx231xx_i2c *bus = i2c_get_adapdata(client->adapter);
435 struct cx231xx *dev = bus->dev;
436
437 switch (client->addr << 1) {
438 case 0x8e:
439 {
440 struct IR_i2c *ir = i2c_get_clientdata(client);
441 dprintk1(1, "attach_inform: IR detected (%s).\n",
442 ir->phys);
443 cx231xx_set_ir(dev, ir);
444 break;
445 }
446 break;
447
448 default:
449 break;
450 }
451
452 return 0;
453}
454
455static struct i2c_algorithm cx231xx_algo = { 427static struct i2c_algorithm cx231xx_algo = {
456 .master_xfer = cx231xx_i2c_xfer, 428 .master_xfer = cx231xx_i2c_xfer,
457 .functionality = functionality, 429 .functionality = functionality,
@@ -462,7 +434,6 @@ static struct i2c_adapter cx231xx_adap_template = {
462 .name = "cx231xx", 434 .name = "cx231xx",
463 .id = I2C_HW_B_CX231XX, 435 .id = I2C_HW_B_CX231XX,
464 .algo = &cx231xx_algo, 436 .algo = &cx231xx_algo,
465 .client_register = attach_inform,
466}; 437};
467 438
468static struct i2c_client cx231xx_client_template = { 439static struct i2c_client cx231xx_client_template = {
@@ -537,6 +508,9 @@ int cx231xx_i2c_register(struct cx231xx_i2c *bus)
537 if (0 == bus->i2c_rc) { 508 if (0 == bus->i2c_rc) {
538 if (i2c_scan) 509 if (i2c_scan)
539 cx231xx_do_i2c_scan(dev, &bus->i2c_client); 510 cx231xx_do_i2c_scan(dev, &bus->i2c_client);
511
512 /* Instantiate the IR receiver device, if present */
513 cx231xx_register_i2c_ir(dev);
540 } else 514 } else
541 cx231xx_warn("%s: i2c bus %d register FAILED\n", 515 cx231xx_warn("%s: i2c bus %d register FAILED\n",
542 dev->name, bus->nr); 516 dev->name, bus->nr);
diff --git a/drivers/media/video/cx231xx/cx231xx-input.c b/drivers/media/video/cx231xx/cx231xx-input.c
index 97e304c3c799..48f22fa38e6c 100644
--- a/drivers/media/video/cx231xx/cx231xx-input.c
+++ b/drivers/media/video/cx231xx/cx231xx-input.c
@@ -36,7 +36,7 @@ MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
36 36
37#define i2cdprintk(fmt, arg...) \ 37#define i2cdprintk(fmt, arg...) \
38 if (ir_debug) { \ 38 if (ir_debug) { \
39 printk(KERN_DEBUG "%s/ir: " fmt, ir->c.name , ## arg); \ 39 printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
40 } 40 }
41 41
42#define dprintk(fmt, arg...) \ 42#define dprintk(fmt, arg...) \
diff --git a/drivers/media/video/cx231xx/cx231xx-vbi.c b/drivers/media/video/cx231xx/cx231xx-vbi.c
index 94180526909c..e97b8023a655 100644
--- a/drivers/media/video/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/video/cx231xx/cx231xx-vbi.c
@@ -26,7 +26,6 @@
26#include <linux/bitmap.h> 26#include <linux/bitmap.h>
27#include <linux/usb.h> 27#include <linux/usb.h>
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/version.h>
30#include <linux/mm.h> 29#include <linux/mm.h>
31#include <linux/mutex.h> 30#include <linux/mutex.h>
32 31
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index aa4a23ef491a..e38eb2d425f7 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -738,7 +738,7 @@ extern void cx231xx_card_setup(struct cx231xx *dev);
738extern struct cx231xx_board cx231xx_boards[]; 738extern struct cx231xx_board cx231xx_boards[];
739extern struct usb_device_id cx231xx_id_table[]; 739extern struct usb_device_id cx231xx_id_table[];
740extern const unsigned int cx231xx_bcount; 740extern const unsigned int cx231xx_bcount;
741void cx231xx_set_ir(struct cx231xx *dev, struct IR_i2c *ir); 741void cx231xx_register_i2c_ir(struct cx231xx *dev);
742int cx231xx_tuner_callback(void *ptr, int component, int command, int arg); 742int cx231xx_tuner_callback(void *ptr, int component, int command, int arg);
743 743
744/* Provided by cx231xx-input.c */ 744/* Provided by cx231xx-input.c */
diff --git a/drivers/media/video/cx23885/cimax2.c b/drivers/media/video/cx23885/cimax2.c
index 9a6536998d90..08582e58bdbf 100644
--- a/drivers/media/video/cx23885/cimax2.c
+++ b/drivers/media/video/cx23885/cimax2.c
@@ -312,7 +312,7 @@ static void netup_read_ci_status(struct work_struct *work)
312 "TS config = %02x\n", __func__, state->ci_i2c_addr, 0, buf[0], 312 "TS config = %02x\n", __func__, state->ci_i2c_addr, 0, buf[0],
313 buf[32]); 313 buf[32]);
314 314
315 if (buf[0] && 1) 315 if (buf[0] & 1)
316 state->status = DVB_CA_EN50221_POLL_CAM_PRESENT | 316 state->status = DVB_CA_EN50221_POLL_CAM_PRESENT |
317 DVB_CA_EN50221_POLL_CAM_READY; 317 DVB_CA_EN50221_POLL_CAM_READY;
318 else 318 else
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 6f5df90af93e..2943bfd32a94 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1742,7 +1742,6 @@ static struct video_device *cx23885_video_dev_alloc(
1742 if (NULL == vfd) 1742 if (NULL == vfd)
1743 return NULL; 1743 return NULL;
1744 *vfd = *template; 1744 *vfd = *template;
1745 vfd->minor = -1;
1746 snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name, 1745 snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name,
1747 type, cx23885_boards[tsport->dev->board].name); 1746 type, cx23885_boards[tsport->dev->board].name);
1748 vfd->parent = &pci->dev; 1747 vfd->parent = &pci->dev;
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 6d6293f7d428..ce29b5e34a11 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -181,6 +181,26 @@ struct cx23885_board cx23885_boards[] = {
181 .portb = CX23885_MPEG_DVB, 181 .portb = CX23885_MPEG_DVB,
182 .portc = CX23885_MPEG_DVB, 182 .portc = CX23885_MPEG_DVB,
183 }, 183 },
184 [CX23885_BOARD_HAUPPAUGE_HVR1270] = {
185 .name = "Hauppauge WinTV-HVR1270",
186 .portc = CX23885_MPEG_DVB,
187 },
188 [CX23885_BOARD_HAUPPAUGE_HVR1275] = {
189 .name = "Hauppauge WinTV-HVR1275",
190 .portc = CX23885_MPEG_DVB,
191 },
192 [CX23885_BOARD_HAUPPAUGE_HVR1255] = {
193 .name = "Hauppauge WinTV-HVR1255",
194 .portc = CX23885_MPEG_DVB,
195 },
196 [CX23885_BOARD_HAUPPAUGE_HVR1210] = {
197 .name = "Hauppauge WinTV-HVR1210",
198 .portc = CX23885_MPEG_DVB,
199 },
200 [CX23885_BOARD_MYGICA_X8506] = {
201 .name = "Mygica X8506 DMB-TH",
202 .portb = CX23885_MPEG_DVB,
203 },
184}; 204};
185const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards); 205const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
186 206
@@ -280,6 +300,30 @@ struct cx23885_subid cx23885_subids[] = {
280 .subvendor = 0x1b55, 300 .subvendor = 0x1b55,
281 .subdevice = 0x2a2c, 301 .subdevice = 0x2a2c,
282 .card = CX23885_BOARD_NETUP_DUAL_DVBS2_CI, 302 .card = CX23885_BOARD_NETUP_DUAL_DVBS2_CI,
303 }, {
304 .subvendor = 0x0070,
305 .subdevice = 0x2211,
306 .card = CX23885_BOARD_HAUPPAUGE_HVR1270,
307 }, {
308 .subvendor = 0x0070,
309 .subdevice = 0x2215,
310 .card = CX23885_BOARD_HAUPPAUGE_HVR1275,
311 }, {
312 .subvendor = 0x0070,
313 .subdevice = 0x2251,
314 .card = CX23885_BOARD_HAUPPAUGE_HVR1255,
315 }, {
316 .subvendor = 0x0070,
317 .subdevice = 0x2291,
318 .card = CX23885_BOARD_HAUPPAUGE_HVR1210,
319 }, {
320 .subvendor = 0x0070,
321 .subdevice = 0x2295,
322 .card = CX23885_BOARD_HAUPPAUGE_HVR1210,
323 }, {
324 .subvendor = 0x14f1,
325 .subdevice = 0x8651,
326 .card = CX23885_BOARD_MYGICA_X8506,
283 }, 327 },
284}; 328};
285const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids); 329const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -321,6 +365,42 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
321 365
322 /* Make sure we support the board model */ 366 /* Make sure we support the board model */
323 switch (tv.model) { 367 switch (tv.model) {
368 case 22001:
369 /* WinTV-HVR1270 (PCIe, Retail, half height)
370 * ATSC/QAM and basic analog, IR Blast */
371 case 22009:
372 /* WinTV-HVR1210 (PCIe, Retail, half height)
373 * DVB-T and basic analog, IR Blast */
374 case 22011:
375 /* WinTV-HVR1270 (PCIe, Retail, half height)
376 * ATSC/QAM and basic analog, IR Recv */
377 case 22019:
378 /* WinTV-HVR1210 (PCIe, Retail, half height)
379 * DVB-T and basic analog, IR Recv */
380 case 22021:
381 /* WinTV-HVR1275 (PCIe, Retail, half height)
382 * ATSC/QAM and basic analog, IR Recv */
383 case 22029:
384 /* WinTV-HVR1210 (PCIe, Retail, half height)
385 * DVB-T and basic analog, IR Recv */
386 case 22101:
387 /* WinTV-HVR1270 (PCIe, Retail, full height)
388 * ATSC/QAM and basic analog, IR Blast */
389 case 22109:
390 /* WinTV-HVR1210 (PCIe, Retail, full height)
391 * DVB-T and basic analog, IR Blast */
392 case 22111:
393 /* WinTV-HVR1270 (PCIe, Retail, full height)
394 * ATSC/QAM and basic analog, IR Recv */
395 case 22119:
396 /* WinTV-HVR1210 (PCIe, Retail, full height)
397 * DVB-T and basic analog, IR Recv */
398 case 22121:
399 /* WinTV-HVR1275 (PCIe, Retail, full height)
400 * ATSC/QAM and basic analog, IR Recv */
401 case 22129:
402 /* WinTV-HVR1210 (PCIe, Retail, full height)
403 * DVB-T and basic analog, IR Recv */
324 case 71009: 404 case 71009:
325 /* WinTV-HVR1200 (PCIe, Retail, full height) 405 /* WinTV-HVR1200 (PCIe, Retail, full height)
326 * DVB-T and basic analog */ 406 * DVB-T and basic analog */
@@ -619,6 +699,30 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
619 /* enable irq */ 699 /* enable irq */
620 cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/ 700 cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
621 break; 701 break;
702 case CX23885_BOARD_HAUPPAUGE_HVR1270:
703 case CX23885_BOARD_HAUPPAUGE_HVR1275:
704 case CX23885_BOARD_HAUPPAUGE_HVR1255:
705 case CX23885_BOARD_HAUPPAUGE_HVR1210:
706 /* GPIO-5 RF Control: 0 = RF1 Terrestrial, 1 = RF2 Cable */
707 /* GPIO-6 I2C Gate which can isolate the demod from the bus */
708 /* GPIO-9 Demod reset */
709
710 /* Put the parts into reset and back */
711 cx23885_gpio_enable(dev, GPIO_9 | GPIO_6 | GPIO_5, 1);
712 cx23885_gpio_set(dev, GPIO_9 | GPIO_6 | GPIO_5);
713 cx23885_gpio_clear(dev, GPIO_9);
714 mdelay(20);
715 cx23885_gpio_set(dev, GPIO_9);
716 break;
717 case CX23885_BOARD_MYGICA_X8506:
718 /* GPIO-1 reset XC5000 */
719 /* GPIO-2 reset LGS8GL5 */
720 cx_set(GP0_IO, 0x00060000);
721 cx_clear(GP0_IO, 0x00000006);
722 mdelay(100);
723 cx_set(GP0_IO, 0x00060006);
724 mdelay(100);
725 break;
622 } 726 }
623} 727}
624 728
@@ -631,6 +735,10 @@ int cx23885_ir_init(struct cx23885_dev *dev)
631 case CX23885_BOARD_HAUPPAUGE_HVR1800: 735 case CX23885_BOARD_HAUPPAUGE_HVR1800:
632 case CX23885_BOARD_HAUPPAUGE_HVR1200: 736 case CX23885_BOARD_HAUPPAUGE_HVR1200:
633 case CX23885_BOARD_HAUPPAUGE_HVR1400: 737 case CX23885_BOARD_HAUPPAUGE_HVR1400:
738 case CX23885_BOARD_HAUPPAUGE_HVR1270:
739 case CX23885_BOARD_HAUPPAUGE_HVR1275:
740 case CX23885_BOARD_HAUPPAUGE_HVR1255:
741 case CX23885_BOARD_HAUPPAUGE_HVR1210:
634 /* FIXME: Implement me */ 742 /* FIXME: Implement me */
635 break; 743 break;
636 case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP: 744 case CX23885_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL_EXP:
@@ -666,6 +774,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
666 case CX23885_BOARD_HAUPPAUGE_HVR1800lp: 774 case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
667 case CX23885_BOARD_HAUPPAUGE_HVR1200: 775 case CX23885_BOARD_HAUPPAUGE_HVR1200:
668 case CX23885_BOARD_HAUPPAUGE_HVR1700: 776 case CX23885_BOARD_HAUPPAUGE_HVR1700:
777 case CX23885_BOARD_HAUPPAUGE_HVR1270:
778 case CX23885_BOARD_HAUPPAUGE_HVR1275:
779 case CX23885_BOARD_HAUPPAUGE_HVR1255:
780 case CX23885_BOARD_HAUPPAUGE_HVR1210:
669 if (dev->i2c_bus[0].i2c_rc == 0) 781 if (dev->i2c_bus[0].i2c_rc == 0)
670 hauppauge_eeprom(dev, eeprom+0xc0); 782 hauppauge_eeprom(dev, eeprom+0xc0);
671 break; 783 break;
@@ -714,6 +826,11 @@ void cx23885_card_setup(struct cx23885_dev *dev)
714 ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */ 826 ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
715 ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; 827 ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
716 break; 828 break;
829 case CX23885_BOARD_MYGICA_X8506:
830 ts1->gen_ctrl_val = 0x5; /* Parallel */
831 ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
832 ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
833 break;
717 case CX23885_BOARD_HAUPPAUGE_HVR1250: 834 case CX23885_BOARD_HAUPPAUGE_HVR1250:
718 case CX23885_BOARD_HAUPPAUGE_HVR1500: 835 case CX23885_BOARD_HAUPPAUGE_HVR1500:
719 case CX23885_BOARD_HAUPPAUGE_HVR1500Q: 836 case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
@@ -723,6 +840,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
723 case CX23885_BOARD_HAUPPAUGE_HVR1400: 840 case CX23885_BOARD_HAUPPAUGE_HVR1400:
724 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H: 841 case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
725 case CX23885_BOARD_COMPRO_VIDEOMATE_E650F: 842 case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
843 case CX23885_BOARD_HAUPPAUGE_HVR1270:
844 case CX23885_BOARD_HAUPPAUGE_HVR1275:
845 case CX23885_BOARD_HAUPPAUGE_HVR1255:
846 case CX23885_BOARD_HAUPPAUGE_HVR1210:
726 default: 847 default:
727 ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */ 848 ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
728 ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */ 849 ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index beda42925ce7..bf7bb1c412fb 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -1700,9 +1700,13 @@ static irqreturn_t cx23885_irq(int irq, void *dev_id)
1700 } 1700 }
1701 1701
1702 if (cx23885_boards[dev->board].cimax > 0 && 1702 if (cx23885_boards[dev->board].cimax > 0 &&
1703 ((pci_status & PCI_MSK_GPIO0) || (pci_status & PCI_MSK_GPIO1))) 1703 ((pci_status & PCI_MSK_GPIO0) ||
1704 /* handled += cx23885_irq_gpio(dev, pci_status); */ 1704 (pci_status & PCI_MSK_GPIO1))) {
1705 handled += netup_ci_slot_status(dev, pci_status); 1705
1706 if (cx23885_boards[dev->board].cimax > 0)
1707 handled += netup_ci_slot_status(dev, pci_status);
1708
1709 }
1706 1710
1707 if (ts1_status) { 1711 if (ts1_status) {
1708 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) 1712 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
@@ -1729,6 +1733,88 @@ out:
1729 return IRQ_RETVAL(handled); 1733 return IRQ_RETVAL(handled);
1730} 1734}
1731 1735
1736static inline int encoder_on_portb(struct cx23885_dev *dev)
1737{
1738 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1739}
1740
1741static inline int encoder_on_portc(struct cx23885_dev *dev)
1742{
1743 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1744}
1745
1746/* Mask represents 32 different GPIOs, GPIO's are split into multiple
1747 * registers depending on the board configuration (and whether the
1748 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1749 * be pushed into the correct hardware register, regardless of the
1750 * physical location. Certain registers are shared so we sanity check
1751 * and report errors if we think we're tampering with a GPIo that might
1752 * be assigned to the encoder (and used for the host bus).
1753 *
1754 * GPIO 2 thru 0 - On the cx23885 bridge
1755 * GPIO 18 thru 3 - On the cx23417 host bus interface
1756 * GPIO 23 thru 19 - On the cx25840 a/v core
1757 */
1758void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1759{
1760 if (mask & 0x7)
1761 cx_set(GP0_IO, mask & 0x7);
1762
1763 if (mask & 0x0007fff8) {
1764 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1765 printk(KERN_ERR
1766 "%s: Setting GPIO on encoder ports\n",
1767 dev->name);
1768 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1769 }
1770
1771 /* TODO: 23-19 */
1772 if (mask & 0x00f80000)
1773 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1774}
1775
1776void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1777{
1778 if (mask & 0x00000007)
1779 cx_clear(GP0_IO, mask & 0x7);
1780
1781 if (mask & 0x0007fff8) {
1782 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1783 printk(KERN_ERR
1784 "%s: Clearing GPIO moving on encoder ports\n",
1785 dev->name);
1786 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1787 }
1788
1789 /* TODO: 23-19 */
1790 if (mask & 0x00f80000)
1791 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1792}
1793
1794void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1795{
1796 if ((mask & 0x00000007) && asoutput)
1797 cx_set(GP0_IO, (mask & 0x7) << 16);
1798 else if ((mask & 0x00000007) && !asoutput)
1799 cx_clear(GP0_IO, (mask & 0x7) << 16);
1800
1801 if (mask & 0x0007fff8) {
1802 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1803 printk(KERN_ERR
1804 "%s: Enabling GPIO on encoder ports\n",
1805 dev->name);
1806 }
1807
1808 /* MC417_OEN is active low for output, write 1 for an input */
1809 if ((mask & 0x0007fff8) && asoutput)
1810 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1811
1812 else if ((mask & 0x0007fff8) && !asoutput)
1813 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1814
1815 /* TODO: 23-19 */
1816}
1817
1732static int __devinit cx23885_initdev(struct pci_dev *pci_dev, 1818static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
1733 const struct pci_device_id *pci_id) 1819 const struct pci_device_id *pci_id)
1734{ 1820{
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 1dc070da8652..e236df23370e 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -49,8 +49,10 @@
49#include "lnbh24.h" 49#include "lnbh24.h"
50#include "cx24116.h" 50#include "cx24116.h"
51#include "cimax2.h" 51#include "cimax2.h"
52#include "lgs8gxx.h"
52#include "netup-eeprom.h" 53#include "netup-eeprom.h"
53#include "netup-init.h" 54#include "netup-init.h"
55#include "lgdt3305.h"
54 56
55static unsigned int debug; 57static unsigned int debug;
56 58
@@ -122,7 +124,22 @@ static struct tda10048_config hauppauge_hvr1200_config = {
122 .demod_address = 0x10 >> 1, 124 .demod_address = 0x10 >> 1,
123 .output_mode = TDA10048_SERIAL_OUTPUT, 125 .output_mode = TDA10048_SERIAL_OUTPUT,
124 .fwbulkwritelen = TDA10048_BULKWRITE_200, 126 .fwbulkwritelen = TDA10048_BULKWRITE_200,
125 .inversion = TDA10048_INVERSION_ON 127 .inversion = TDA10048_INVERSION_ON,
128 .dtv6_if_freq_khz = TDA10048_IF_3300,
129 .dtv7_if_freq_khz = TDA10048_IF_3800,
130 .dtv8_if_freq_khz = TDA10048_IF_4300,
131 .clk_freq_khz = TDA10048_CLK_16000,
132};
133
134static struct tda10048_config hauppauge_hvr1210_config = {
135 .demod_address = 0x10 >> 1,
136 .output_mode = TDA10048_SERIAL_OUTPUT,
137 .fwbulkwritelen = TDA10048_BULKWRITE_200,
138 .inversion = TDA10048_INVERSION_ON,
139 .dtv6_if_freq_khz = TDA10048_IF_3300,
140 .dtv7_if_freq_khz = TDA10048_IF_3500,
141 .dtv8_if_freq_khz = TDA10048_IF_4000,
142 .clk_freq_khz = TDA10048_CLK_16000,
126}; 143};
127 144
128static struct s5h1409_config hauppauge_ezqam_config = { 145static struct s5h1409_config hauppauge_ezqam_config = {
@@ -194,6 +211,16 @@ static struct s5h1411_config dvico_s5h1411_config = {
194 .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK, 211 .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
195}; 212};
196 213
214static struct s5h1411_config hcw_s5h1411_config = {
215 .output_mode = S5H1411_SERIAL_OUTPUT,
216 .gpio = S5H1411_GPIO_OFF,
217 .vsb_if = S5H1411_IF_44000,
218 .qam_if = S5H1411_IF_4000,
219 .inversion = S5H1411_INVERSION_ON,
220 .status_mode = S5H1411_DEMODLOCKING,
221 .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
222};
223
197static struct xc5000_config hauppauge_hvr1500q_tunerconfig = { 224static struct xc5000_config hauppauge_hvr1500q_tunerconfig = {
198 .i2c_address = 0x61, 225 .i2c_address = 0x61,
199 .if_khz = 5380, 226 .if_khz = 5380,
@@ -224,6 +251,32 @@ static struct tda18271_config hauppauge_hvr1200_tuner_config = {
224 .gate = TDA18271_GATE_ANALOG, 251 .gate = TDA18271_GATE_ANALOG,
225}; 252};
226 253
254static struct tda18271_config hauppauge_hvr1210_tuner_config = {
255 .gate = TDA18271_GATE_DIGITAL,
256};
257
258static struct tda18271_std_map hauppauge_hvr127x_std_map = {
259 .atsc_6 = { .if_freq = 3250, .agc_mode = 3, .std = 4,
260 .if_lvl = 1, .rfagc_top = 0x58 },
261 .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 5,
262 .if_lvl = 1, .rfagc_top = 0x58 },
263};
264
265static struct tda18271_config hauppauge_hvr127x_config = {
266 .std_map = &hauppauge_hvr127x_std_map,
267};
268
269static struct lgdt3305_config hauppauge_lgdt3305_config = {
270 .i2c_addr = 0x0e,
271 .mpeg_mode = LGDT3305_MPEG_SERIAL,
272 .tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE,
273 .tpvalid_polarity = LGDT3305_TP_VALID_HIGH,
274 .deny_i2c_rptr = 1,
275 .spectral_inversion = 1,
276 .qam_if_khz = 4000,
277 .vsb_if_khz = 3250,
278};
279
227static struct dibx000_agc_config xc3028_agc_config = { 280static struct dibx000_agc_config xc3028_agc_config = {
228 BAND_VHF | BAND_UHF, /* band_caps */ 281 BAND_VHF | BAND_UHF, /* band_caps */
229 282
@@ -368,10 +421,29 @@ static struct cx24116_config dvbworld_cx24116_config = {
368 .demod_address = 0x05, 421 .demod_address = 0x05,
369}; 422};
370 423
424static struct lgs8gxx_config mygica_x8506_lgs8gl5_config = {
425 .prod = LGS8GXX_PROD_LGS8GL5,
426 .demod_address = 0x19,
427 .serial_ts = 0,
428 .ts_clk_pol = 1,
429 .ts_clk_gated = 1,
430 .if_clk_freq = 30400, /* 30.4 MHz */
431 .if_freq = 5380, /* 5.38 MHz */
432 .if_neg_center = 1,
433 .ext_adc = 0,
434 .adc_signed = 0,
435 .if_neg_edge = 0,
436};
437
438static struct xc5000_config mygica_x8506_xc5000_config = {
439 .i2c_address = 0x61,
440 .if_khz = 5380,
441};
442
371static int dvb_register(struct cx23885_tsport *port) 443static int dvb_register(struct cx23885_tsport *port)
372{ 444{
373 struct cx23885_dev *dev = port->dev; 445 struct cx23885_dev *dev = port->dev;
374 struct cx23885_i2c *i2c_bus = NULL; 446 struct cx23885_i2c *i2c_bus = NULL, *i2c_bus2 = NULL;
375 struct videobuf_dvb_frontend *fe0; 447 struct videobuf_dvb_frontend *fe0;
376 int ret; 448 int ret;
377 449
@@ -396,6 +468,29 @@ static int dvb_register(struct cx23885_tsport *port)
396 &hauppauge_generic_tunerconfig, 0); 468 &hauppauge_generic_tunerconfig, 0);
397 } 469 }
398 break; 470 break;
471 case CX23885_BOARD_HAUPPAUGE_HVR1270:
472 case CX23885_BOARD_HAUPPAUGE_HVR1275:
473 i2c_bus = &dev->i2c_bus[0];
474 fe0->dvb.frontend = dvb_attach(lgdt3305_attach,
475 &hauppauge_lgdt3305_config,
476 &i2c_bus->i2c_adap);
477 if (fe0->dvb.frontend != NULL) {
478 dvb_attach(tda18271_attach, fe0->dvb.frontend,
479 0x60, &dev->i2c_bus[1].i2c_adap,
480 &hauppauge_hvr127x_config);
481 }
482 break;
483 case CX23885_BOARD_HAUPPAUGE_HVR1255:
484 i2c_bus = &dev->i2c_bus[0];
485 fe0->dvb.frontend = dvb_attach(s5h1411_attach,
486 &hcw_s5h1411_config,
487 &i2c_bus->i2c_adap);
488 if (fe0->dvb.frontend != NULL) {
489 dvb_attach(tda18271_attach, fe0->dvb.frontend,
490 0x60, &dev->i2c_bus[1].i2c_adap,
491 &hauppauge_tda18271_config);
492 }
493 break;
399 case CX23885_BOARD_HAUPPAUGE_HVR1800: 494 case CX23885_BOARD_HAUPPAUGE_HVR1800:
400 i2c_bus = &dev->i2c_bus[0]; 495 i2c_bus = &dev->i2c_bus[0];
401 switch (alt_tuner) { 496 switch (alt_tuner) {
@@ -496,6 +591,17 @@ static int dvb_register(struct cx23885_tsport *port)
496 &hauppauge_hvr1200_tuner_config); 591 &hauppauge_hvr1200_tuner_config);
497 } 592 }
498 break; 593 break;
594 case CX23885_BOARD_HAUPPAUGE_HVR1210:
595 i2c_bus = &dev->i2c_bus[0];
596 fe0->dvb.frontend = dvb_attach(tda10048_attach,
597 &hauppauge_hvr1210_config,
598 &i2c_bus->i2c_adap);
599 if (fe0->dvb.frontend != NULL) {
600 dvb_attach(tda18271_attach, fe0->dvb.frontend,
601 0x60, &dev->i2c_bus[1].i2c_adap,
602 &hauppauge_hvr1210_tuner_config);
603 }
604 break;
499 case CX23885_BOARD_HAUPPAUGE_HVR1400: 605 case CX23885_BOARD_HAUPPAUGE_HVR1400:
500 i2c_bus = &dev->i2c_bus[0]; 606 i2c_bus = &dev->i2c_bus[0];
501 fe0->dvb.frontend = dvb_attach(dib7000p_attach, 607 fe0->dvb.frontend = dvb_attach(dib7000p_attach,
@@ -659,6 +765,19 @@ static int dvb_register(struct cx23885_tsport *port)
659 break; 765 break;
660 } 766 }
661 break; 767 break;
768 case CX23885_BOARD_MYGICA_X8506:
769 i2c_bus = &dev->i2c_bus[0];
770 i2c_bus2 = &dev->i2c_bus[1];
771 fe0->dvb.frontend = dvb_attach(lgs8gxx_attach,
772 &mygica_x8506_lgs8gl5_config,
773 &i2c_bus->i2c_adap);
774 if (fe0->dvb.frontend != NULL) {
775 dvb_attach(xc5000_attach,
776 fe0->dvb.frontend,
777 &i2c_bus2->i2c_adap,
778 &mygica_x8506_xc5000_config);
779 }
780 break;
662 default: 781 default:
663 printk(KERN_INFO "%s: The frontend of your DVB/ATSC card " 782 printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
664 " isn't supported yet\n", 783 " isn't supported yet\n",
diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c
index 3421bd12056a..384dec34134f 100644
--- a/drivers/media/video/cx23885/cx23885-i2c.c
+++ b/drivers/media/video/cx23885/cx23885-i2c.c
@@ -357,6 +357,18 @@ int cx23885_i2c_register(struct cx23885_i2c *bus)
357 printk(KERN_WARNING "%s: i2c bus %d register FAILED\n", 357 printk(KERN_WARNING "%s: i2c bus %d register FAILED\n",
358 dev->name, bus->nr); 358 dev->name, bus->nr);
359 359
360 /* Instantiate the IR receiver device, if present */
361 if (0 == bus->i2c_rc) {
362 struct i2c_board_info info;
363 const unsigned short addr_list[] = {
364 0x6b, I2C_CLIENT_END
365 };
366
367 memset(&info, 0, sizeof(struct i2c_board_info));
368 strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
369 i2c_new_probed_device(&bus->i2c_adap, &info, addr_list);
370 }
371
360 return bus->i2c_rc; 372 return bus->i2c_rc;
361} 373}
362 374
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 68068c6d0987..66bbd2e71105 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -796,6 +796,7 @@ static unsigned int video_poll(struct file *file,
796{ 796{
797 struct cx23885_fh *fh = file->private_data; 797 struct cx23885_fh *fh = file->private_data;
798 struct cx23885_buffer *buf; 798 struct cx23885_buffer *buf;
799 unsigned int rc = POLLERR;
799 800
800 if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) { 801 if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
801 if (!res_get(fh->dev, fh, RESOURCE_VBI)) 802 if (!res_get(fh->dev, fh, RESOURCE_VBI))
@@ -803,23 +804,28 @@ static unsigned int video_poll(struct file *file,
803 return videobuf_poll_stream(file, &fh->vbiq, wait); 804 return videobuf_poll_stream(file, &fh->vbiq, wait);
804 } 805 }
805 806
807 mutex_lock(&fh->vidq.vb_lock);
806 if (res_check(fh, RESOURCE_VIDEO)) { 808 if (res_check(fh, RESOURCE_VIDEO)) {
807 /* streaming capture */ 809 /* streaming capture */
808 if (list_empty(&fh->vidq.stream)) 810 if (list_empty(&fh->vidq.stream))
809 return POLLERR; 811 goto done;
810 buf = list_entry(fh->vidq.stream.next, 812 buf = list_entry(fh->vidq.stream.next,
811 struct cx23885_buffer, vb.stream); 813 struct cx23885_buffer, vb.stream);
812 } else { 814 } else {
813 /* read() capture */ 815 /* read() capture */
814 buf = (struct cx23885_buffer *)fh->vidq.read_buf; 816 buf = (struct cx23885_buffer *)fh->vidq.read_buf;
815 if (NULL == buf) 817 if (NULL == buf)
816 return POLLERR; 818 goto done;
817 } 819 }
818 poll_wait(file, &buf->vb.done, wait); 820 poll_wait(file, &buf->vb.done, wait);
819 if (buf->vb.state == VIDEOBUF_DONE || 821 if (buf->vb.state == VIDEOBUF_DONE ||
820 buf->vb.state == VIDEOBUF_ERROR) 822 buf->vb.state == VIDEOBUF_ERROR)
821 return POLLIN|POLLRDNORM; 823 rc = POLLIN|POLLRDNORM;
822 return 0; 824 else
825 rc = 0;
826done:
827 mutex_unlock(&fh->vidq.vb_lock);
828 return rc;
823} 829}
824 830
825static int video_release(struct file *file) 831static int video_release(struct file *file)
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index 85642831ea8e..1a2ac518a3f1 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -71,6 +71,22 @@
71#define CX23885_BOARD_TEVII_S470 15 71#define CX23885_BOARD_TEVII_S470 15
72#define CX23885_BOARD_DVBWORLD_2005 16 72#define CX23885_BOARD_DVBWORLD_2005 16
73#define CX23885_BOARD_NETUP_DUAL_DVBS2_CI 17 73#define CX23885_BOARD_NETUP_DUAL_DVBS2_CI 17
74#define CX23885_BOARD_HAUPPAUGE_HVR1270 18
75#define CX23885_BOARD_HAUPPAUGE_HVR1275 19
76#define CX23885_BOARD_HAUPPAUGE_HVR1255 20
77#define CX23885_BOARD_HAUPPAUGE_HVR1210 21
78#define CX23885_BOARD_MYGICA_X8506 22
79
80#define GPIO_0 0x00000001
81#define GPIO_1 0x00000002
82#define GPIO_2 0x00000004
83#define GPIO_3 0x00000008
84#define GPIO_4 0x00000010
85#define GPIO_5 0x00000020
86#define GPIO_6 0x00000040
87#define GPIO_7 0x00000080
88#define GPIO_8 0x00000100
89#define GPIO_9 0x00000200
74 90
75/* Currently unsupported by the driver: PAL/H, NTSC/Kr, SECAM B/G/H/LC */ 91/* Currently unsupported by the driver: PAL/H, NTSC/Kr, SECAM B/G/H/LC */
76#define CX23885_NORMS (\ 92#define CX23885_NORMS (\
@@ -422,6 +438,11 @@ extern int cx23885_restart_queue(struct cx23885_tsport *port,
422extern void cx23885_wakeup(struct cx23885_tsport *port, 438extern void cx23885_wakeup(struct cx23885_tsport *port,
423 struct cx23885_dmaqueue *q, u32 count); 439 struct cx23885_dmaqueue *q, u32 count);
424 440
441extern void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask);
442extern void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask);
443extern void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask,
444 int asoutput);
445
425 446
426/* ----------------------------------------------------------- */ 447/* ----------------------------------------------------------- */
427/* cx23885-cards.c */ 448/* cx23885-cards.c */
diff --git a/drivers/media/video/cx88/Makefile b/drivers/media/video/cx88/Makefile
index b06b1275a9ec..5b7e26761f0a 100644
--- a/drivers/media/video/cx88/Makefile
+++ b/drivers/media/video/cx88/Makefile
@@ -1,5 +1,5 @@
1cx88xx-objs := cx88-cards.o cx88-core.o cx88-i2c.o cx88-tvaudio.o \ 1cx88xx-objs := cx88-cards.o cx88-core.o cx88-i2c.o cx88-tvaudio.o \
2 cx88-input.o 2 cx88-dsp.o cx88-input.o
3cx8800-objs := cx88-video.o cx88-vbi.o 3cx8800-objs := cx88-video.o cx88-vbi.o
4cx8802-objs := cx88-mpeg.o 4cx8802-objs := cx88-mpeg.o
5 5
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 0ccdf36626e3..5a67445dd6ed 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -871,7 +871,7 @@ static struct pci_driver cx88_audio_pci_driver = {
871 .name = "cx88_audio", 871 .name = "cx88_audio",
872 .id_table = cx88_audio_pci_tbl, 872 .id_table = cx88_audio_pci_tbl,
873 .probe = cx88_audio_initdev, 873 .probe = cx88_audio_initdev,
874 .remove = cx88_audio_finidev, 874 .remove = __devexit_p(cx88_audio_finidev),
875}; 875};
876 876
877/**************************************************************************** 877/****************************************************************************
@@ -881,7 +881,7 @@ static struct pci_driver cx88_audio_pci_driver = {
881/* 881/*
882 * module init 882 * module init
883 */ 883 */
884static int cx88_audio_init(void) 884static int __init cx88_audio_init(void)
885{ 885{
886 printk(KERN_INFO "cx2388x alsa driver version %d.%d.%d loaded\n", 886 printk(KERN_INFO "cx2388x alsa driver version %d.%d.%d loaded\n",
887 (CX88_VERSION_CODE >> 16) & 0xff, 887 (CX88_VERSION_CODE >> 16) & 0xff,
@@ -897,9 +897,8 @@ static int cx88_audio_init(void)
897/* 897/*
898 * module remove 898 * module remove
899 */ 899 */
900static void cx88_audio_fini(void) 900static void __exit cx88_audio_fini(void)
901{ 901{
902
903 pci_unregister_driver(&cx88_audio_pci_driver); 902 pci_unregister_driver(&cx88_audio_pci_driver);
904} 903}
905 904
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 6bbbfc66bb4b..94b7a52629d0 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -1969,6 +1969,54 @@ static const struct cx88_board cx88_boards[] = {
1969 }, 1969 },
1970 .mpeg = CX88_MPEG_DVB, 1970 .mpeg = CX88_MPEG_DVB,
1971 }, 1971 },
1972 [CX88_BOARD_HAUPPAUGE_IRONLY] = {
1973 .name = "Hauppauge WinTV-IR Only",
1974 .tuner_type = UNSET,
1975 .radio_type = UNSET,
1976 .tuner_addr = ADDR_UNSET,
1977 .radio_addr = ADDR_UNSET,
1978 },
1979 [CX88_BOARD_WINFAST_DTV1800H] = {
1980 .name = "Leadtek WinFast DTV1800 Hybrid",
1981 .tuner_type = TUNER_XC2028,
1982 .radio_type = TUNER_XC2028,
1983 .tuner_addr = 0x61,
1984 .radio_addr = 0x61,
1985 /*
1986 * GPIO setting
1987 *
1988 * 2: mute (0=off,1=on)
1989 * 12: tuner reset pin
1990 * 13: audio source (0=tuner audio,1=line in)
1991 * 14: FM (0=on,1=off ???)
1992 */
1993 .input = {{
1994 .type = CX88_VMUX_TELEVISION,
1995 .vmux = 0,
1996 .gpio0 = 0x0400, /* pin 2 = 0 */
1997 .gpio1 = 0x6040, /* pin 13 = 0, pin 14 = 1 */
1998 .gpio2 = 0x0000,
1999 }, {
2000 .type = CX88_VMUX_COMPOSITE1,
2001 .vmux = 1,
2002 .gpio0 = 0x0400, /* pin 2 = 0 */
2003 .gpio1 = 0x6060, /* pin 13 = 1, pin 14 = 1 */
2004 .gpio2 = 0x0000,
2005 }, {
2006 .type = CX88_VMUX_SVIDEO,
2007 .vmux = 2,
2008 .gpio0 = 0x0400, /* pin 2 = 0 */
2009 .gpio1 = 0x6060, /* pin 13 = 1, pin 14 = 1 */
2010 .gpio2 = 0x0000,
2011 } },
2012 .radio = {
2013 .type = CX88_RADIO,
2014 .gpio0 = 0x0400, /* pin 2 = 0 */
2015 .gpio1 = 0x6000, /* pin 13 = 0, pin 14 = 0 */
2016 .gpio2 = 0x0000,
2017 },
2018 .mpeg = CX88_MPEG_DVB,
2019 },
1972}; 2020};
1973 2021
1974/* ------------------------------------------------------------------ */ 2022/* ------------------------------------------------------------------ */
@@ -2382,6 +2430,14 @@ static const struct cx88_subid cx88_subids[] = {
2382 .subvendor = 0x153b, 2430 .subvendor = 0x153b,
2383 .subdevice = 0x1177, 2431 .subdevice = 0x1177,
2384 .card = CX88_BOARD_TERRATEC_CINERGY_HT_PCI_MKII, 2432 .card = CX88_BOARD_TERRATEC_CINERGY_HT_PCI_MKII,
2433 }, {
2434 .subvendor = 0x0070,
2435 .subdevice = 0x9290,
2436 .card = CX88_BOARD_HAUPPAUGE_IRONLY,
2437 }, {
2438 .subvendor = 0x107d,
2439 .subdevice = 0x6654,
2440 .card = CX88_BOARD_WINFAST_DTV1800H,
2385 }, 2441 },
2386}; 2442};
2387 2443
@@ -2448,6 +2504,7 @@ static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
2448 case 90500: /* Nova-T-PCI (oem) */ 2504 case 90500: /* Nova-T-PCI (oem) */
2449 case 90501: /* Nova-T-PCI (oem/IR) */ 2505 case 90501: /* Nova-T-PCI (oem/IR) */
2450 case 92000: /* Nova-SE2 (OEM, No Video or IR) */ 2506 case 92000: /* Nova-SE2 (OEM, No Video or IR) */
2507 case 92900: /* WinTV-IROnly (No analog or digital Video inputs) */
2451 case 94009: /* WinTV-HVR1100 (Video and IR Retail) */ 2508 case 94009: /* WinTV-HVR1100 (Video and IR Retail) */
2452 case 94501: /* WinTV-HVR1100 (Video and IR OEM) */ 2509 case 94501: /* WinTV-HVR1100 (Video and IR OEM) */
2453 case 96009: /* WinTV-HVR1300 (PAL Video, MPEG Video and IR RX) */ 2510 case 96009: /* WinTV-HVR1300 (PAL Video, MPEG Video and IR RX) */
@@ -2579,6 +2636,23 @@ static int cx88_xc3028_geniatech_tuner_callback(struct cx88_core *core,
2579 return -EINVAL; 2636 return -EINVAL;
2580} 2637}
2581 2638
2639static int cx88_xc3028_winfast1800h_callback(struct cx88_core *core,
2640 int command, int arg)
2641{
2642 switch (command) {
2643 case XC2028_TUNER_RESET:
2644 /* GPIO 12 (xc3028 tuner reset) */
2645 cx_set(MO_GP1_IO, 0x1010);
2646 mdelay(50);
2647 cx_clear(MO_GP1_IO, 0x10);
2648 mdelay(50);
2649 cx_set(MO_GP1_IO, 0x10);
2650 mdelay(50);
2651 return 0;
2652 }
2653 return -EINVAL;
2654}
2655
2582/* ------------------------------------------------------------------- */ 2656/* ------------------------------------------------------------------- */
2583/* some Divco specific stuff */ 2657/* some Divco specific stuff */
2584static int cx88_pv_8000gt_callback(struct cx88_core *core, 2658static int cx88_pv_8000gt_callback(struct cx88_core *core,
@@ -2651,6 +2725,8 @@ static int cx88_xc2028_tuner_callback(struct cx88_core *core,
2651 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO: 2725 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
2652 case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: 2726 case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
2653 return cx88_dvico_xc2028_callback(core, command, arg); 2727 return cx88_dvico_xc2028_callback(core, command, arg);
2728 case CX88_BOARD_WINFAST_DTV1800H:
2729 return cx88_xc3028_winfast1800h_callback(core, command, arg);
2654 } 2730 }
2655 2731
2656 switch (command) { 2732 switch (command) {
@@ -2690,10 +2766,22 @@ static int cx88_xc5000_tuner_callback(struct cx88_core *core,
2690 switch (core->boardnr) { 2766 switch (core->boardnr) {
2691 case CX88_BOARD_PINNACLE_PCTV_HD_800i: 2767 case CX88_BOARD_PINNACLE_PCTV_HD_800i:
2692 if (command == 0) { /* This is the reset command from xc5000 */ 2768 if (command == 0) { /* This is the reset command from xc5000 */
2693 /* Reset XC5000 tuner via SYS_RSTO_pin */ 2769
2694 cx_write(MO_SRST_IO, 0); 2770 /* djh - According to the engineer at PCTV Systems,
2695 msleep(10); 2771 the xc5000 reset pin is supposed to be on GPIO12.
2696 cx_write(MO_SRST_IO, 1); 2772 However, despite three nights of effort, pulling
2773 that GPIO low didn't reset the xc5000. While
2774 pulling MO_SRST_IO low does reset the xc5000, this
2775 also resets in the s5h1409 being reset as well.
2776 This causes tuning to always fail since the internal
2777 state of the s5h1409 does not match the driver's
2778 state. Given that the only two conditions in which
2779 the driver performs a reset is during firmware load
2780 and powering down the chip, I am taking out the
2781 reset. We know that the chip is being reset
2782 when the cx88 comes online, and not being able to
2783 do power management for this board is worse than
2784 not having any tuning at all. */
2697 return 0; 2785 return 0;
2698 } else { 2786 } else {
2699 err_printk(core, "xc5000: unknown tuner " 2787 err_printk(core, "xc5000: unknown tuner "
@@ -2825,6 +2913,16 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
2825 cx_set(MO_GP0_IO, 0x00000080); /* 702 out of reset */ 2913 cx_set(MO_GP0_IO, 0x00000080); /* 702 out of reset */
2826 udelay(1000); 2914 udelay(1000);
2827 break; 2915 break;
2916
2917 case CX88_BOARD_WINFAST_DTV1800H:
2918 /* GPIO 12 (xc3028 tuner reset) */
2919 cx_set(MO_GP1_IO, 0x1010);
2920 mdelay(50);
2921 cx_clear(MO_GP1_IO, 0x10);
2922 mdelay(50);
2923 cx_set(MO_GP1_IO, 0x10);
2924 mdelay(50);
2925 break;
2828 } 2926 }
2829} 2927}
2830 2928
@@ -2845,6 +2943,7 @@ void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl)
2845 core->i2c_algo.udelay = 16; 2943 core->i2c_algo.udelay = 16;
2846 break; 2944 break;
2847 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO: 2945 case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
2946 case CX88_BOARD_WINFAST_DTV1800H:
2848 ctl->demod = XC3028_FE_ZARLINK456; 2947 ctl->demod = XC3028_FE_ZARLINK456;
2849 break; 2948 break;
2850 case CX88_BOARD_KWORLD_ATSC_120: 2949 case CX88_BOARD_KWORLD_ATSC_120:
@@ -2907,6 +3006,7 @@ static void cx88_card_setup(struct cx88_core *core)
2907 case CX88_BOARD_HAUPPAUGE_HVR1300: 3006 case CX88_BOARD_HAUPPAUGE_HVR1300:
2908 case CX88_BOARD_HAUPPAUGE_HVR4000: 3007 case CX88_BOARD_HAUPPAUGE_HVR4000:
2909 case CX88_BOARD_HAUPPAUGE_HVR4000LITE: 3008 case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
3009 case CX88_BOARD_HAUPPAUGE_IRONLY:
2910 if (0 == core->i2c_rc) 3010 if (0 == core->i2c_rc)
2911 hauppauge_eeprom(core, eeprom); 3011 hauppauge_eeprom(core, eeprom);
2912 break; 3012 break;
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index 0e149b22bd19..cf634606ba9a 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -231,7 +231,7 @@ cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf)
231 * can use the whole SDRAM for the DMA fifos. To simplify things, we 231 * can use the whole SDRAM for the DMA fifos. To simplify things, we
232 * use a static memory layout. That surely will waste memory in case 232 * use a static memory layout. That surely will waste memory in case
233 * we don't use all DMA channels at the same time (which will be the 233 * we don't use all DMA channels at the same time (which will be the
234 * case most of the time). But that still gives us enougth FIFO space 234 * case most of the time). But that still gives us enough FIFO space
235 * to be able to deal with insane long pci latencies ... 235 * to be able to deal with insane long pci latencies ...
236 * 236 *
237 * FIFO space allocations: 237 * FIFO space allocations:
@@ -241,6 +241,7 @@ cx88_free_buffer(struct videobuf_queue *q, struct cx88_buffer *buf)
241 * channel 24 (vbi) - 4.0k 241 * channel 24 (vbi) - 4.0k
242 * channels 25+26 (audio) - 4.0k 242 * channels 25+26 (audio) - 4.0k
243 * channel 28 (mpeg) - 4.0k 243 * channel 28 (mpeg) - 4.0k
244 * channel 27 (audio rds)- 3.0k
244 * TOTAL = 29.0k 245 * TOTAL = 29.0k
245 * 246 *
246 * Every channel has 160 bytes control data (64 bytes instruction 247 * Every channel has 160 bytes control data (64 bytes instruction
@@ -337,6 +338,18 @@ struct sram_channel cx88_sram_channels[] = {
337 .cnt1_reg = MO_DMA28_CNT1, 338 .cnt1_reg = MO_DMA28_CNT1,
338 .cnt2_reg = MO_DMA28_CNT2, 339 .cnt2_reg = MO_DMA28_CNT2,
339 }, 340 },
341 [SRAM_CH27] = {
342 .name = "audio rds",
343 .cmds_start = 0x1801C0,
344 .ctrl_start = 0x180860,
345 .cdt = 0x180860 + 64,
346 .fifo_start = 0x187400,
347 .fifo_size = 0x000C00,
348 .ptr1_reg = MO_DMA27_PTR1,
349 .ptr2_reg = MO_DMA27_PTR2,
350 .cnt1_reg = MO_DMA27_CNT1,
351 .cnt2_reg = MO_DMA27_CNT2,
352 },
340}; 353};
341 354
342int cx88_sram_channel_setup(struct cx88_core *core, 355int cx88_sram_channel_setup(struct cx88_core *core,
@@ -598,6 +611,7 @@ int cx88_reset(struct cx88_core *core)
598 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH25], 128, 0); 611 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH25], 128, 0);
599 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH26], 128, 0); 612 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH26], 128, 0);
600 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH28], 188*4, 0); 613 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH28], 188*4, 0);
614 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH27], 128, 0);
601 615
602 /* misc init ... */ 616 /* misc init ... */
603 cx_write(MO_INPUT_FORMAT, ((1 << 13) | // agc enable 617 cx_write(MO_INPUT_FORMAT, ((1 << 13) | // agc enable
@@ -796,6 +810,8 @@ int cx88_start_audio_dma(struct cx88_core *core)
796 /* constant 128 made buzz in analog Nicam-stereo for bigger fifo_size */ 810 /* constant 128 made buzz in analog Nicam-stereo for bigger fifo_size */
797 int bpl = cx88_sram_channels[SRAM_CH25].fifo_size/4; 811 int bpl = cx88_sram_channels[SRAM_CH25].fifo_size/4;
798 812
813 int rds_bpl = cx88_sram_channels[SRAM_CH27].fifo_size/AUD_RDS_LINES;
814
799 /* If downstream RISC is enabled, bail out; ALSA is managing DMA */ 815 /* If downstream RISC is enabled, bail out; ALSA is managing DMA */
800 if (cx_read(MO_AUD_DMACNTRL) & 0x10) 816 if (cx_read(MO_AUD_DMACNTRL) & 0x10)
801 return 0; 817 return 0;
@@ -803,12 +819,14 @@ int cx88_start_audio_dma(struct cx88_core *core)
803 /* setup fifo + format */ 819 /* setup fifo + format */
804 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH25], bpl, 0); 820 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH25], bpl, 0);
805 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH26], bpl, 0); 821 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH26], bpl, 0);
822 cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH27],
823 rds_bpl, 0);
806 824
807 cx_write(MO_AUDD_LNGTH, bpl); /* fifo bpl size */ 825 cx_write(MO_AUDD_LNGTH, bpl); /* fifo bpl size */
808 cx_write(MO_AUDR_LNGTH, bpl); /* fifo bpl size */ 826 cx_write(MO_AUDR_LNGTH, rds_bpl); /* fifo bpl size */
809 827
810 /* start dma */ 828 /* enable Up, Down and Audio RDS fifo */
811 cx_write(MO_AUD_DMACNTRL, 0x0003); /* Up and Down fifo enable */ 829 cx_write(MO_AUD_DMACNTRL, 0x0007);
812 830
813 return 0; 831 return 0;
814} 832}
@@ -1010,7 +1028,6 @@ struct video_device *cx88_vdev_init(struct cx88_core *core,
1010 if (NULL == vfd) 1028 if (NULL == vfd)
1011 return NULL; 1029 return NULL;
1012 *vfd = *template; 1030 *vfd = *template;
1013 vfd->minor = -1;
1014 vfd->v4l2_dev = &core->v4l2_dev; 1031 vfd->v4l2_dev = &core->v4l2_dev;
1015 vfd->parent = &pci->dev; 1032 vfd->parent = &pci->dev;
1016 vfd->release = video_device_release; 1033 vfd->release = video_device_release;
diff --git a/drivers/media/video/cx88/cx88-dsp.c b/drivers/media/video/cx88/cx88-dsp.c
new file mode 100644
index 000000000000..3e5eaf3fe2a6
--- /dev/null
+++ b/drivers/media/video/cx88/cx88-dsp.c
@@ -0,0 +1,312 @@
1/*
2 *
3 * Stereo and SAP detection for cx88
4 *
5 * Copyright (c) 2009 Marton Balint <cus@fazekas.hu>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/jiffies.h>
25#include <asm/div64.h>
26
27#include "cx88.h"
28#include "cx88-reg.h"
29
30#define INT_PI ((s32)(3.141592653589 * 32768.0))
31
32#define compat_remainder(a, b) \
33 ((float)(((s32)((a)*100))%((s32)((b)*100)))/100.0)
34
35#define baseband_freq(carrier, srate, tone) ((s32)( \
36 (compat_remainder(carrier + tone, srate)) / srate * 2 * INT_PI))
37
38/* We calculate the baseband frequencies of the carrier and the pilot tones
39 * based on the the sampling rate of the audio rds fifo. */
40
41#define FREQ_A2_CARRIER baseband_freq(54687.5, 2689.36, 0.0)
42#define FREQ_A2_DUAL baseband_freq(54687.5, 2689.36, 274.1)
43#define FREQ_A2_STEREO baseband_freq(54687.5, 2689.36, 117.5)
44
45/* The frequencies below are from the reference driver. They probably need
46 * further adjustments, because they are not tested at all. You may even need
47 * to play a bit with the registers of the chip to select the proper signal
48 * for the input of the audio rds fifo, and measure it's sampling rate to
49 * calculate the proper baseband frequencies... */
50
51#define FREQ_A2M_CARRIER ((s32)(2.114516 * 32768.0))
52#define FREQ_A2M_DUAL ((s32)(2.754916 * 32768.0))
53#define FREQ_A2M_STEREO ((s32)(2.462326 * 32768.0))
54
55#define FREQ_EIAJ_CARRIER ((s32)(1.963495 * 32768.0)) /* 5pi/8 */
56#define FREQ_EIAJ_DUAL ((s32)(2.562118 * 32768.0))
57#define FREQ_EIAJ_STEREO ((s32)(2.601053 * 32768.0))
58
59#define FREQ_BTSC_DUAL ((s32)(1.963495 * 32768.0)) /* 5pi/8 */
60#define FREQ_BTSC_DUAL_REF ((s32)(1.374446 * 32768.0)) /* 7pi/16 */
61
62#define FREQ_BTSC_SAP ((s32)(2.471532 * 32768.0))
63#define FREQ_BTSC_SAP_REF ((s32)(1.730072 * 32768.0))
64
65/* The spectrum of the signal should be empty between these frequencies. */
66#define FREQ_NOISE_START ((s32)(0.100000 * 32768.0))
67#define FREQ_NOISE_END ((s32)(1.200000 * 32768.0))
68
69static unsigned int dsp_debug;
70module_param(dsp_debug, int, 0644);
71MODULE_PARM_DESC(dsp_debug, "enable audio dsp debug messages");
72
73#define dprintk(level, fmt, arg...) if (dsp_debug >= level) \
74 printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
75
76static s32 int_cos(u32 x)
77{
78 u32 t2, t4, t6, t8;
79 s32 ret;
80 u16 period = x / INT_PI;
81 if (period % 2)
82 return -int_cos(x - INT_PI);
83 x = x % INT_PI;
84 if (x > INT_PI/2)
85 return -int_cos(INT_PI/2 - (x % (INT_PI/2)));
86 /* Now x is between 0 and INT_PI/2.
87 * To calculate cos(x) we use it's Taylor polinom. */
88 t2 = x*x/32768/2;
89 t4 = t2*x/32768*x/32768/3/4;
90 t6 = t4*x/32768*x/32768/5/6;
91 t8 = t6*x/32768*x/32768/7/8;
92 ret = 32768-t2+t4-t6+t8;
93 return ret;
94}
95
96static u32 int_goertzel(s16 x[], u32 N, u32 freq)
97{
98 /* We use the Goertzel algorithm to determine the power of the
99 * given frequency in the signal */
100 s32 s_prev = 0;
101 s32 s_prev2 = 0;
102 s32 coeff = 2*int_cos(freq);
103 u32 i;
104
105 u64 tmp;
106 u32 divisor;
107
108 for (i = 0; i < N; i++) {
109 s32 s = x[i] + ((s64)coeff*s_prev/32768) - s_prev2;
110 s_prev2 = s_prev;
111 s_prev = s;
112 }
113
114 tmp = (s64)s_prev2 * s_prev2 + (s64)s_prev * s_prev -
115 (s64)coeff * s_prev2 * s_prev / 32768;
116
117 /* XXX: N must be low enough so that N*N fits in s32.
118 * Else we need two divisions. */
119 divisor = N * N;
120 do_div(tmp, divisor);
121
122 return (u32) tmp;
123}
124
125static u32 freq_magnitude(s16 x[], u32 N, u32 freq)
126{
127 u32 sum = int_goertzel(x, N, freq);
128 return (u32)int_sqrt(sum);
129}
130
131static u32 noise_magnitude(s16 x[], u32 N, u32 freq_start, u32 freq_end)
132{
133 int i;
134 u32 sum = 0;
135 u32 freq_step;
136 int samples = 5;
137
138 if (N > 192) {
139 /* The last 192 samples are enough for noise detection */
140 x += (N-192);
141 N = 192;
142 }
143
144 freq_step = (freq_end - freq_start) / (samples - 1);
145
146 for (i = 0; i < samples; i++) {
147 sum += int_goertzel(x, N, freq_start);
148 freq_start += freq_step;
149 }
150
151 return (u32)int_sqrt(sum / samples);
152}
153
154static s32 detect_a2_a2m_eiaj(struct cx88_core *core, s16 x[], u32 N)
155{
156 s32 carrier, stereo, dual, noise;
157 s32 carrier_freq, stereo_freq, dual_freq;
158 s32 ret;
159
160 switch (core->tvaudio) {
161 case WW_BG:
162 case WW_DK:
163 carrier_freq = FREQ_A2_CARRIER;
164 stereo_freq = FREQ_A2_STEREO;
165 dual_freq = FREQ_A2_DUAL;
166 break;
167 case WW_M:
168 carrier_freq = FREQ_A2M_CARRIER;
169 stereo_freq = FREQ_A2M_STEREO;
170 dual_freq = FREQ_A2M_DUAL;
171 break;
172 case WW_EIAJ:
173 carrier_freq = FREQ_EIAJ_CARRIER;
174 stereo_freq = FREQ_EIAJ_STEREO;
175 dual_freq = FREQ_EIAJ_DUAL;
176 break;
177 default:
178 printk(KERN_WARNING "%s/0: unsupported audio mode %d for %s\n",
179 core->name, core->tvaudio, __func__);
180 return UNSET;
181 }
182
183 carrier = freq_magnitude(x, N, carrier_freq);
184 stereo = freq_magnitude(x, N, stereo_freq);
185 dual = freq_magnitude(x, N, dual_freq);
186 noise = noise_magnitude(x, N, FREQ_NOISE_START, FREQ_NOISE_END);
187
188 dprintk(1, "detect a2/a2m/eiaj: carrier=%d, stereo=%d, dual=%d, "
189 "noise=%d\n", carrier, stereo, dual, noise);
190
191 if (stereo > dual)
192 ret = V4L2_TUNER_SUB_STEREO;
193 else
194 ret = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
195
196 if (core->tvaudio == WW_EIAJ) {
197 /* EIAJ checks may need adjustments */
198 if ((carrier > max(stereo, dual)*2) &&
199 (carrier < max(stereo, dual)*6) &&
200 (carrier > 20 && carrier < 200) &&
201 (max(stereo, dual) > min(stereo, dual))) {
202 /* For EIAJ the carrier is always present,
203 so we probably don't need noise detection */
204 return ret;
205 }
206 } else {
207 if ((carrier > max(stereo, dual)*2) &&
208 (carrier < max(stereo, dual)*8) &&
209 (carrier > 20 && carrier < 200) &&
210 (noise < 10) &&
211 (max(stereo, dual) > min(stereo, dual)*2)) {
212 return ret;
213 }
214 }
215 return V4L2_TUNER_SUB_MONO;
216}
217
218static s32 detect_btsc(struct cx88_core *core, s16 x[], u32 N)
219{
220 s32 sap_ref = freq_magnitude(x, N, FREQ_BTSC_SAP_REF);
221 s32 sap = freq_magnitude(x, N, FREQ_BTSC_SAP);
222 s32 dual_ref = freq_magnitude(x, N, FREQ_BTSC_DUAL_REF);
223 s32 dual = freq_magnitude(x, N, FREQ_BTSC_DUAL);
224 dprintk(1, "detect btsc: dual_ref=%d, dual=%d, sap_ref=%d, sap=%d"
225 "\n", dual_ref, dual, sap_ref, sap);
226 /* FIXME: Currently not supported */
227 return UNSET;
228}
229
230static s16 *read_rds_samples(struct cx88_core *core, u32 *N)
231{
232 struct sram_channel *srch = &cx88_sram_channels[SRAM_CH27];
233 s16 *samples;
234
235 unsigned int i;
236 unsigned int bpl = srch->fifo_size/AUD_RDS_LINES;
237 unsigned int spl = bpl/4;
238 unsigned int sample_count = spl*(AUD_RDS_LINES-1);
239
240 u32 current_address = cx_read(srch->ptr1_reg);
241 u32 offset = (current_address - srch->fifo_start + bpl);
242
243 dprintk(1, "read RDS samples: current_address=%08x (offset=%08x), "
244 "sample_count=%d, aud_intstat=%08x\n", current_address,
245 current_address - srch->fifo_start, sample_count,
246 cx_read(MO_AUD_INTSTAT));
247
248 samples = kmalloc(sizeof(s16)*sample_count, GFP_KERNEL);
249 if (!samples)
250 return NULL;
251
252 *N = sample_count;
253
254 for (i = 0; i < sample_count; i++) {
255 offset = offset % (AUD_RDS_LINES*bpl);
256 samples[i] = cx_read(srch->fifo_start + offset);
257 offset += 4;
258 }
259
260 if (dsp_debug >= 2) {
261 dprintk(2, "RDS samples dump: ");
262 for (i = 0; i < sample_count; i++)
263 printk("%hd ", samples[i]);
264 printk(".\n");
265 }
266
267 return samples;
268}
269
270s32 cx88_dsp_detect_stereo_sap(struct cx88_core *core)
271{
272 s16 *samples;
273 u32 N = 0;
274 s32 ret = UNSET;
275
276 /* If audio RDS fifo is disabled, we can't read the samples */
277 if (!(cx_read(MO_AUD_DMACNTRL) & 0x04))
278 return ret;
279 if (!(cx_read(AUD_CTL) & EN_FMRADIO_EN_RDS))
280 return ret;
281
282 /* Wait at least 500 ms after an audio standard change */
283 if (time_before(jiffies, core->last_change + msecs_to_jiffies(500)))
284 return ret;
285
286 samples = read_rds_samples(core, &N);
287
288 if (!samples)
289 return ret;
290
291 switch (core->tvaudio) {
292 case WW_BG:
293 case WW_DK:
294 ret = detect_a2_a2m_eiaj(core, samples, N);
295 break;
296 case WW_BTSC:
297 ret = detect_btsc(core, samples, N);
298 break;
299 }
300
301 kfree(samples);
302
303 if (UNSET != ret)
304 dprintk(1, "stereo/sap detection result:%s%s%s\n",
305 (ret & V4L2_TUNER_SUB_MONO) ? " mono" : "",
306 (ret & V4L2_TUNER_SUB_STEREO) ? " stereo" : "",
307 (ret & V4L2_TUNER_SUB_LANG2) ? " dual" : "");
308
309 return ret;
310}
311EXPORT_SYMBOL(cx88_dsp_detect_stereo_sap);
312
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 9389cf290c1b..c44e87600219 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -1014,6 +1014,7 @@ static int dvb_register(struct cx8802_dev *dev)
1014 } 1014 }
1015 break; 1015 break;
1016 case CX88_BOARD_PINNACLE_HYBRID_PCTV: 1016 case CX88_BOARD_PINNACLE_HYBRID_PCTV:
1017 case CX88_BOARD_WINFAST_DTV1800H:
1017 fe0->dvb.frontend = dvb_attach(zl10353_attach, 1018 fe0->dvb.frontend = dvb_attach(zl10353_attach,
1018 &cx88_pinnacle_hybrid_pctv, 1019 &cx88_pinnacle_hybrid_pctv,
1019 &core->i2c_adap); 1020 &core->i2c_adap);
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 996b4ed5a4fc..ee1ca39db06a 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -180,6 +180,19 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
180 do_i2c_scan(core->name,&core->i2c_client); 180 do_i2c_scan(core->name,&core->i2c_client);
181 } else 181 } else
182 printk("%s: i2c register FAILED\n", core->name); 182 printk("%s: i2c register FAILED\n", core->name);
183
184 /* Instantiate the IR receiver device, if present */
185 if (0 == core->i2c_rc) {
186 struct i2c_board_info info;
187 const unsigned short addr_list[] = {
188 0x18, 0x6b, 0x71,
189 I2C_CLIENT_END
190 };
191
192 memset(&info, 0, sizeof(struct i2c_board_info));
193 strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
194 i2c_new_probed_device(&core->i2c_adap, &info, addr_list);
195 }
183 return core->i2c_rc; 196 return core->i2c_rc;
184} 197}
185 198
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index ec05312a9b62..d91f5c51206d 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -91,6 +91,8 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
91 gpio=(gpio & 0x7fd) + (auxgpio & 0xef); 91 gpio=(gpio & 0x7fd) + (auxgpio & 0xef);
92 break; 92 break;
93 case CX88_BOARD_WINFAST_DTV1000: 93 case CX88_BOARD_WINFAST_DTV1000:
94 case CX88_BOARD_WINFAST_DTV1800H:
95 case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
94 gpio = (gpio & 0x6ff) | ((cx_read(MO_GP1_IO) << 8) & 0x900); 96 gpio = (gpio & 0x6ff) | ((cx_read(MO_GP1_IO) << 8) & 0x900);
95 auxgpio = gpio; 97 auxgpio = gpio;
96 break; 98 break;
@@ -217,11 +219,13 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
217 case CX88_BOARD_HAUPPAUGE_HVR4000LITE: 219 case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
218 case CX88_BOARD_PCHDTV_HD3000: 220 case CX88_BOARD_PCHDTV_HD3000:
219 case CX88_BOARD_PCHDTV_HD5500: 221 case CX88_BOARD_PCHDTV_HD5500:
222 case CX88_BOARD_HAUPPAUGE_IRONLY:
220 ir_codes = ir_codes_hauppauge_new; 223 ir_codes = ir_codes_hauppauge_new;
221 ir_type = IR_TYPE_RC5; 224 ir_type = IR_TYPE_RC5;
222 ir->sampling = 1; 225 ir->sampling = 1;
223 break; 226 break;
224 case CX88_BOARD_WINFAST_DTV2000H: 227 case CX88_BOARD_WINFAST_DTV2000H:
228 case CX88_BOARD_WINFAST_DTV1800H:
225 ir_codes = ir_codes_winfast; 229 ir_codes = ir_codes_winfast;
226 ir->gpio_addr = MO_GP0_IO; 230 ir->gpio_addr = MO_GP0_IO;
227 ir->mask_keycode = 0x8f8; 231 ir->mask_keycode = 0x8f8;
@@ -230,6 +234,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
230 break; 234 break;
231 case CX88_BOARD_WINFAST2000XP_EXPERT: 235 case CX88_BOARD_WINFAST2000XP_EXPERT:
232 case CX88_BOARD_WINFAST_DTV1000: 236 case CX88_BOARD_WINFAST_DTV1000:
237 case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
233 ir_codes = ir_codes_winfast; 238 ir_codes = ir_codes_winfast;
234 ir->gpio_addr = MO_GP0_IO; 239 ir->gpio_addr = MO_GP0_IO;
235 ir->mask_keycode = 0x8f8; 240 ir->mask_keycode = 0x8f8;
@@ -459,6 +464,7 @@ void cx88_ir_irq(struct cx88_core *core)
459 case CX88_BOARD_HAUPPAUGE_HVR4000LITE: 464 case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
460 case CX88_BOARD_PCHDTV_HD3000: 465 case CX88_BOARD_PCHDTV_HD3000:
461 case CX88_BOARD_PCHDTV_HD5500: 466 case CX88_BOARD_PCHDTV_HD5500:
467 case CX88_BOARD_HAUPPAUGE_IRONLY:
462 ircode = ir_decode_biphase(ir->samples, ir->scount, 5, 7); 468 ircode = ir_decode_biphase(ir->samples, ir->scount, 5, 7);
463 ir_dprintk("biphase decoded: %x\n", ircode); 469 ir_dprintk("biphase decoded: %x\n", ircode);
464 /* 470 /*
diff --git a/drivers/media/video/cx88/cx88-tvaudio.c b/drivers/media/video/cx88/cx88-tvaudio.c
index 7dd506b987fe..e8316cf7f32f 100644
--- a/drivers/media/video/cx88/cx88-tvaudio.c
+++ b/drivers/media/video/cx88/cx88-tvaudio.c
@@ -163,6 +163,8 @@ static void set_audio_finish(struct cx88_core *core, u32 ctl)
163 /* unmute */ 163 /* unmute */
164 volume = cx_sread(SHADOW_AUD_VOL_CTL); 164 volume = cx_sread(SHADOW_AUD_VOL_CTL);
165 cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, volume); 165 cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, volume);
166
167 core->last_change = jiffies;
166} 168}
167 169
168/* ----------------------------------------------------------- */ 170/* ----------------------------------------------------------- */
@@ -745,6 +747,7 @@ void cx88_set_tvaudio(struct cx88_core *core)
745 break; 747 break;
746 case WW_BG: 748 case WW_BG:
747 case WW_DK: 749 case WW_DK:
750 case WW_M:
748 case WW_I: 751 case WW_I:
749 case WW_L: 752 case WW_L:
750 /* prepare all dsp registers */ 753 /* prepare all dsp registers */
@@ -756,6 +759,7 @@ void cx88_set_tvaudio(struct cx88_core *core)
756 if (0 == cx88_detect_nicam(core)) { 759 if (0 == cx88_detect_nicam(core)) {
757 /* fall back to fm / am mono */ 760 /* fall back to fm / am mono */
758 set_audio_standard_A2(core, EN_A2_FORCE_MONO1); 761 set_audio_standard_A2(core, EN_A2_FORCE_MONO1);
762 core->audiomode_current = V4L2_TUNER_MODE_MONO;
759 core->use_nicam = 0; 763 core->use_nicam = 0;
760 } else { 764 } else {
761 core->use_nicam = 1; 765 core->use_nicam = 1;
@@ -787,6 +791,7 @@ void cx88_set_tvaudio(struct cx88_core *core)
787void cx88_newstation(struct cx88_core *core) 791void cx88_newstation(struct cx88_core *core)
788{ 792{
789 core->audiomode_manual = UNSET; 793 core->audiomode_manual = UNSET;
794 core->last_change = jiffies;
790} 795}
791 796
792void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t) 797void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t)
@@ -805,12 +810,50 @@ void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t)
805 aud_ctl_names[cx_read(AUD_CTL) & 63]); 810 aud_ctl_names[cx_read(AUD_CTL) & 63]);
806 core->astat = reg; 811 core->astat = reg;
807 812
808/* TODO 813 t->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_SAP |
809 Reading from AUD_STATUS is not enough 814 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
810 for auto-detecting sap/dual-fm/nicam. 815 t->rxsubchans = UNSET;
811 Add some code here later. 816 t->audmode = V4L2_TUNER_MODE_MONO;
812*/ 817
818 switch (mode) {
819 case 0:
820 t->audmode = V4L2_TUNER_MODE_STEREO;
821 break;
822 case 1:
823 t->audmode = V4L2_TUNER_MODE_LANG2;
824 break;
825 case 2:
826 t->audmode = V4L2_TUNER_MODE_MONO;
827 break;
828 case 3:
829 t->audmode = V4L2_TUNER_MODE_SAP;
830 break;
831 }
813 832
833 switch (core->tvaudio) {
834 case WW_BTSC:
835 case WW_BG:
836 case WW_DK:
837 case WW_M:
838 case WW_EIAJ:
839 if (!core->use_nicam) {
840 t->rxsubchans = cx88_dsp_detect_stereo_sap(core);
841 break;
842 }
843 break;
844 default:
845 /* nothing */
846 break;
847 }
848
849 /* If software stereo detection is not supported... */
850 if (UNSET == t->rxsubchans) {
851 t->rxsubchans = V4L2_TUNER_SUB_MONO;
852 /* If the hardware itself detected stereo, also return
853 stereo as an available subchannel */
854 if (V4L2_TUNER_MODE_STEREO == t->audmode)
855 t->rxsubchans |= V4L2_TUNER_SUB_STEREO;
856 }
814 return; 857 return;
815} 858}
816 859
@@ -847,6 +890,7 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
847 break; 890 break;
848 case WW_BG: 891 case WW_BG:
849 case WW_DK: 892 case WW_DK:
893 case WW_M:
850 case WW_I: 894 case WW_I:
851 case WW_L: 895 case WW_L:
852 if (1 == core->use_nicam) { 896 if (1 == core->use_nicam) {
@@ -872,20 +916,18 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
872 set_audio_standard_A2(core, EN_A2_FORCE_MONO1); 916 set_audio_standard_A2(core, EN_A2_FORCE_MONO1);
873 } else { 917 } else {
874 /* TODO: Add A2 autodection */ 918 /* TODO: Add A2 autodection */
919 mask = 0x3f;
875 switch (mode) { 920 switch (mode) {
876 case V4L2_TUNER_MODE_MONO: 921 case V4L2_TUNER_MODE_MONO:
877 case V4L2_TUNER_MODE_LANG1: 922 case V4L2_TUNER_MODE_LANG1:
878 set_audio_standard_A2(core, 923 ctl = EN_A2_FORCE_MONO1;
879 EN_A2_FORCE_MONO1);
880 break; 924 break;
881 case V4L2_TUNER_MODE_LANG2: 925 case V4L2_TUNER_MODE_LANG2:
882 set_audio_standard_A2(core, 926 ctl = EN_A2_FORCE_MONO2;
883 EN_A2_FORCE_MONO2);
884 break; 927 break;
885 case V4L2_TUNER_MODE_STEREO: 928 case V4L2_TUNER_MODE_STEREO:
886 case V4L2_TUNER_MODE_LANG1_LANG2: 929 case V4L2_TUNER_MODE_LANG1_LANG2:
887 set_audio_standard_A2(core, 930 ctl = EN_A2_FORCE_STEREO;
888 EN_A2_FORCE_STEREO);
889 break; 931 break;
890 } 932 }
891 } 933 }
@@ -932,24 +974,39 @@ int cx88_audio_thread(void *data)
932 break; 974 break;
933 try_to_freeze(); 975 try_to_freeze();
934 976
935 /* just monitor the audio status for now ... */ 977 switch (core->tvaudio) {
936 memset(&t, 0, sizeof(t)); 978 case WW_BG:
937 cx88_get_stereo(core, &t); 979 case WW_DK:
938 980 case WW_M:
939 if (UNSET != core->audiomode_manual) 981 case WW_I:
940 /* manually set, don't do anything. */ 982 case WW_L:
941 continue; 983 if (core->use_nicam)
942 984 goto hw_autodetect;
943 /* monitor signal */ 985
944 if (t.rxsubchans & V4L2_TUNER_SUB_STEREO) 986 /* just monitor the audio status for now ... */
945 mode = V4L2_TUNER_MODE_STEREO; 987 memset(&t, 0, sizeof(t));
946 else 988 cx88_get_stereo(core, &t);
947 mode = V4L2_TUNER_MODE_MONO; 989
948 if (mode == core->audiomode_current) 990 if (UNSET != core->audiomode_manual)
949 continue; 991 /* manually set, don't do anything. */
950 992 continue;
951 /* automatically switch to best available mode */ 993
952 cx88_set_stereo(core, mode, 0); 994 /* monitor signal and set stereo if available */
995 if (t.rxsubchans & V4L2_TUNER_SUB_STEREO)
996 mode = V4L2_TUNER_MODE_STEREO;
997 else
998 mode = V4L2_TUNER_MODE_MONO;
999 if (mode == core->audiomode_current)
1000 continue;
1001 /* automatically switch to best available mode */
1002 cx88_set_stereo(core, mode, 0);
1003 break;
1004 default:
1005hw_autodetect:
1006 /* stereo autodetection is supported by hardware so
1007 we don't need to do it manually. Do nothing. */
1008 break;
1009 }
953 } 1010 }
954 1011
955 dprintk("cx88: tvaudio thread exiting\n"); 1012 dprintk("cx88: tvaudio thread exiting\n");
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index b993d42fe73c..0ccac702bea4 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -869,6 +869,7 @@ video_poll(struct file *file, struct poll_table_struct *wait)
869{ 869{
870 struct cx8800_fh *fh = file->private_data; 870 struct cx8800_fh *fh = file->private_data;
871 struct cx88_buffer *buf; 871 struct cx88_buffer *buf;
872 unsigned int rc = POLLERR;
872 873
873 if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) { 874 if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
874 if (!res_get(fh->dev,fh,RESOURCE_VBI)) 875 if (!res_get(fh->dev,fh,RESOURCE_VBI))
@@ -876,22 +877,27 @@ video_poll(struct file *file, struct poll_table_struct *wait)
876 return videobuf_poll_stream(file, &fh->vbiq, wait); 877 return videobuf_poll_stream(file, &fh->vbiq, wait);
877 } 878 }
878 879
880 mutex_lock(&fh->vidq.vb_lock);
879 if (res_check(fh,RESOURCE_VIDEO)) { 881 if (res_check(fh,RESOURCE_VIDEO)) {
880 /* streaming capture */ 882 /* streaming capture */
881 if (list_empty(&fh->vidq.stream)) 883 if (list_empty(&fh->vidq.stream))
882 return POLLERR; 884 goto done;
883 buf = list_entry(fh->vidq.stream.next,struct cx88_buffer,vb.stream); 885 buf = list_entry(fh->vidq.stream.next,struct cx88_buffer,vb.stream);
884 } else { 886 } else {
885 /* read() capture */ 887 /* read() capture */
886 buf = (struct cx88_buffer*)fh->vidq.read_buf; 888 buf = (struct cx88_buffer*)fh->vidq.read_buf;
887 if (NULL == buf) 889 if (NULL == buf)
888 return POLLERR; 890 goto done;
889 } 891 }
890 poll_wait(file, &buf->vb.done, wait); 892 poll_wait(file, &buf->vb.done, wait);
891 if (buf->vb.state == VIDEOBUF_DONE || 893 if (buf->vb.state == VIDEOBUF_DONE ||
892 buf->vb.state == VIDEOBUF_ERROR) 894 buf->vb.state == VIDEOBUF_ERROR)
893 return POLLIN|POLLRDNORM; 895 rc = POLLIN|POLLRDNORM;
894 return 0; 896 else
897 rc = 0;
898done:
899 mutex_unlock(&fh->vidq.vb_lock);
900 return rc;
895} 901}
896 902
897static int video_release(struct file *file) 903static int video_release(struct file *file)
@@ -926,8 +932,10 @@ static int video_release(struct file *file)
926 file->private_data = NULL; 932 file->private_data = NULL;
927 kfree(fh); 933 kfree(fh);
928 934
935 mutex_lock(&dev->core->lock);
929 if(atomic_dec_and_test(&dev->core->users)) 936 if(atomic_dec_and_test(&dev->core->users))
930 call_all(dev->core, tuner, s_standby); 937 call_all(dev->core, tuner, s_standby);
938 mutex_unlock(&dev->core->lock);
931 939
932 return 0; 940 return 0;
933} 941}
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index 7724d168fc04..9d83762163f5 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -65,6 +65,8 @@
65#define VBI_LINE_COUNT 17 65#define VBI_LINE_COUNT 17
66#define VBI_LINE_LENGTH 2048 66#define VBI_LINE_LENGTH 2048
67 67
68#define AUD_RDS_LINES 4
69
68/* need "shadow" registers for some write-only ones ... */ 70/* need "shadow" registers for some write-only ones ... */
69#define SHADOW_AUD_VOL_CTL 1 71#define SHADOW_AUD_VOL_CTL 1
70#define SHADOW_AUD_BAL_CTL 2 72#define SHADOW_AUD_BAL_CTL 2
@@ -132,6 +134,7 @@ struct cx88_ctrl {
132#define SRAM_CH25 4 /* audio */ 134#define SRAM_CH25 4 /* audio */
133#define SRAM_CH26 5 135#define SRAM_CH26 5
134#define SRAM_CH28 6 /* mpeg */ 136#define SRAM_CH28 6 /* mpeg */
137#define SRAM_CH27 7 /* audio rds */
135/* more */ 138/* more */
136 139
137struct sram_channel { 140struct sram_channel {
@@ -232,6 +235,8 @@ extern struct sram_channel cx88_sram_channels[];
232#define CX88_BOARD_TBS_8910 77 235#define CX88_BOARD_TBS_8910 77
233#define CX88_BOARD_PROF_6200 78 236#define CX88_BOARD_PROF_6200 78
234#define CX88_BOARD_TERRATEC_CINERGY_HT_PCI_MKII 79 237#define CX88_BOARD_TERRATEC_CINERGY_HT_PCI_MKII 79
238#define CX88_BOARD_HAUPPAUGE_IRONLY 80
239#define CX88_BOARD_WINFAST_DTV1800H 81
235 240
236enum cx88_itype { 241enum cx88_itype {
237 CX88_VMUX_COMPOSITE1 = 1, 242 CX88_VMUX_COMPOSITE1 = 1,
@@ -350,6 +355,7 @@ struct cx88_core {
350 u32 input; 355 u32 input;
351 u32 astat; 356 u32 astat;
352 u32 use_nicam; 357 u32 use_nicam;
358 unsigned long last_change;
353 359
354 /* IR remote control state */ 360 /* IR remote control state */
355 struct cx88_IR *ir; 361 struct cx88_IR *ir;
@@ -652,6 +658,7 @@ extern void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl);
652#define WW_I2SPT 8 658#define WW_I2SPT 8
653#define WW_FM 9 659#define WW_FM 9
654#define WW_I2SADC 10 660#define WW_I2SADC 10
661#define WW_M 11
655 662
656void cx88_set_tvaudio(struct cx88_core *core); 663void cx88_set_tvaudio(struct cx88_core *core);
657void cx88_newstation(struct cx88_core *core); 664void cx88_newstation(struct cx88_core *core);
@@ -665,6 +672,11 @@ struct cx8802_dev *cx8802_get_device(int minor);
665struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype); 672struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype);
666 673
667/* ----------------------------------------------------------- */ 674/* ----------------------------------------------------------- */
675/* cx88-dsp.c */
676
677s32 cx88_dsp_detect_stereo_sap(struct cx88_core *core);
678
679/* ----------------------------------------------------------- */
668/* cx88-input.c */ 680/* cx88-input.c */
669 681
670int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci); 682int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci);
diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c
index ba3709bec3f0..ec2f45dde164 100644
--- a/drivers/media/video/dabusb.c
+++ b/drivers/media/video/dabusb.c
@@ -747,8 +747,14 @@ static const struct file_operations dabusb_fops =
747 .release = dabusb_release, 747 .release = dabusb_release,
748}; 748};
749 749
750static char *dabusb_nodename(struct device *dev)
751{
752 return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
753}
754
750static struct usb_class_driver dabusb_class = { 755static struct usb_class_driver dabusb_class = {
751 .name = "dabusb%d", 756 .name = "dabusb%d",
757 .nodename = dabusb_nodename,
752 .fops = &dabusb_fops, 758 .fops = &dabusb_fops,
753 .minor_base = DABUSB_MINOR, 759 .minor_base = DABUSB_MINOR,
754}; 760};
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index 0131322475bf..7bd8a70f0a0b 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -339,6 +339,11 @@ static int snd_em28xx_pcm_close(struct snd_pcm_substream *substream)
339 mutex_lock(&dev->lock); 339 mutex_lock(&dev->lock);
340 dev->adev.users--; 340 dev->adev.users--;
341 em28xx_audio_analog_set(dev); 341 em28xx_audio_analog_set(dev);
342 if (substream->runtime->dma_area) {
343 dprintk("freeing\n");
344 vfree(substream->runtime->dma_area);
345 substream->runtime->dma_area = NULL;
346 }
342 mutex_unlock(&dev->lock); 347 mutex_unlock(&dev->lock);
343 348
344 return 0; 349 return 0;
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 7c70738479dd..00cc791a9e44 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -49,6 +49,11 @@ static unsigned int disable_ir;
49module_param(disable_ir, int, 0444); 49module_param(disable_ir, int, 0444);
50MODULE_PARM_DESC(disable_ir, "disable infrared remote support"); 50MODULE_PARM_DESC(disable_ir, "disable infrared remote support");
51 51
52static unsigned int disable_usb_speed_check;
53module_param(disable_usb_speed_check, int, 0444);
54MODULE_PARM_DESC(disable_usb_speed_check,
55 "override min bandwidth requirement of 480M bps");
56
52static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET }; 57static unsigned int card[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
53module_param_array(card, int, NULL, 0444); 58module_param_array(card, int, NULL, 0444);
54MODULE_PARM_DESC(card, "card type"); 59MODULE_PARM_DESC(card, "card type");
@@ -104,6 +109,24 @@ static struct em28xx_reg_seq em2880_msi_digivox_ad_analog[] = {
104/* Board - EM2870 Kworld 355u 109/* Board - EM2870 Kworld 355u
105 Analog - No input analog */ 110 Analog - No input analog */
106 111
112/* Board - EM2882 Kworld 315U digital */
113static struct em28xx_reg_seq em2882_kworld_315u_digital[] = {
114 {EM28XX_R08_GPIO, 0xff, 0xff, 10},
115 {EM28XX_R08_GPIO, 0xfe, 0xff, 10},
116 {EM2880_R04_GPO, 0x04, 0xff, 10},
117 {EM2880_R04_GPO, 0x0c, 0xff, 10},
118 {EM28XX_R08_GPIO, 0x7e, 0xff, 10},
119 { -1, -1, -1, -1},
120};
121
122static struct em28xx_reg_seq em2882_kworld_315u_tuner_gpio[] = {
123 {EM2880_R04_GPO, 0x08, 0xff, 10},
124 {EM2880_R04_GPO, 0x0c, 0xff, 10},
125 {EM2880_R04_GPO, 0x08, 0xff, 10},
126 {EM2880_R04_GPO, 0x0c, 0xff, 10},
127 { -1, -1, -1, -1},
128};
129
107static struct em28xx_reg_seq kworld_330u_analog[] = { 130static struct em28xx_reg_seq kworld_330u_analog[] = {
108 {EM28XX_R08_GPIO, 0x6d, ~EM_GPIO_4, 10}, 131 {EM28XX_R08_GPIO, 0x6d, ~EM_GPIO_4, 10},
109 {EM2880_R04_GPO, 0x00, 0xff, 10}, 132 {EM2880_R04_GPO, 0x00, 0xff, 10},
@@ -140,6 +163,16 @@ static struct em28xx_reg_seq compro_mute_gpio[] = {
140 { -1, -1, -1, -1}, 163 { -1, -1, -1, -1},
141}; 164};
142 165
166/* Terratec AV350 */
167static struct em28xx_reg_seq terratec_av350_mute_gpio[] = {
168 {EM28XX_R08_GPIO, 0xff, 0x7f, 10},
169 { -1, -1, -1, -1},
170};
171
172static struct em28xx_reg_seq terratec_av350_unmute_gpio[] = {
173 {EM28XX_R08_GPIO, 0xff, 0xff, 10},
174 { -1, -1, -1, -1},
175};
143/* 176/*
144 * Board definitions 177 * Board definitions
145 */ 178 */
@@ -992,16 +1025,17 @@ struct em28xx_board em28xx_boards[] = {
992 .amux = EM28XX_AMUX_LINE_IN, 1025 .amux = EM28XX_AMUX_LINE_IN,
993 } }, 1026 } },
994 }, 1027 },
995 [EM2860_BOARD_POINTNIX_INTRAORAL_CAMERA] = { 1028 [EM2860_BOARD_SAA711X_REFERENCE_DESIGN] = {
996 .name = "PointNix Intra-Oral Camera", 1029 .name = "EM2860/SAA711X Reference Design",
997 .has_snapshot_button = 1, 1030 .has_snapshot_button = 1,
998 .tda9887_conf = TDA9887_PRESENT,
999 .tuner_type = TUNER_ABSENT, 1031 .tuner_type = TUNER_ABSENT,
1000 .decoder = EM28XX_SAA711X, 1032 .decoder = EM28XX_SAA711X,
1001 .input = { { 1033 .input = { {
1002 .type = EM28XX_VMUX_SVIDEO, 1034 .type = EM28XX_VMUX_SVIDEO,
1003 .vmux = SAA7115_SVIDEO3, 1035 .vmux = SAA7115_SVIDEO3,
1004 .amux = EM28XX_AMUX_VIDEO, 1036 }, {
1037 .type = EM28XX_VMUX_COMPOSITE1,
1038 .vmux = SAA7115_COMPOSITE0,
1005 } }, 1039 } },
1006 }, 1040 },
1007 [EM2880_BOARD_MSI_DIGIVOX_AD] = { 1041 [EM2880_BOARD_MSI_DIGIVOX_AD] = {
@@ -1095,6 +1129,63 @@ struct em28xx_board em28xx_boards[] = {
1095 .gpio = default_analog, 1129 .gpio = default_analog,
1096 } }, 1130 } },
1097 }, 1131 },
1132 [EM2882_BOARD_KWORLD_ATSC_315U] = {
1133 .name = "KWorld ATSC 315U HDTV TV Box",
1134 .valid = EM28XX_BOARD_NOT_VALIDATED,
1135 .tuner_type = TUNER_THOMSON_DTT761X,
1136 .tuner_gpio = em2882_kworld_315u_tuner_gpio,
1137 .tda9887_conf = TDA9887_PRESENT,
1138 .decoder = EM28XX_SAA711X,
1139 .has_dvb = 1,
1140 .dvb_gpio = em2882_kworld_315u_digital,
1141 .xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
1142 .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE,
1143 /* Analog mode - still not ready */
1144 /*.input = { {
1145 .type = EM28XX_VMUX_TELEVISION,
1146 .vmux = SAA7115_COMPOSITE2,
1147 .amux = EM28XX_AMUX_VIDEO,
1148 .gpio = em2882_kworld_315u_analog,
1149 .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO,
1150 }, {
1151 .type = EM28XX_VMUX_COMPOSITE1,
1152 .vmux = SAA7115_COMPOSITE0,
1153 .amux = EM28XX_AMUX_LINE_IN,
1154 .gpio = em2882_kworld_315u_analog1,
1155 .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO,
1156 }, {
1157 .type = EM28XX_VMUX_SVIDEO,
1158 .vmux = SAA7115_SVIDEO3,
1159 .amux = EM28XX_AMUX_LINE_IN,
1160 .gpio = em2882_kworld_315u_analog1,
1161 .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO,
1162 } }, */
1163 },
1164 [EM2880_BOARD_EMPIRE_DUAL_TV] = {
1165 .name = "Empire dual TV",
1166 .tuner_type = TUNER_XC2028,
1167 .tuner_gpio = default_tuner_gpio,
1168 .has_dvb = 1,
1169 .dvb_gpio = default_digital,
1170 .mts_firmware = 1,
1171 .decoder = EM28XX_TVP5150,
1172 .input = { {
1173 .type = EM28XX_VMUX_TELEVISION,
1174 .vmux = TVP5150_COMPOSITE0,
1175 .amux = EM28XX_AMUX_VIDEO,
1176 .gpio = default_analog,
1177 }, {
1178 .type = EM28XX_VMUX_COMPOSITE1,
1179 .vmux = TVP5150_COMPOSITE1,
1180 .amux = EM28XX_AMUX_LINE_IN,
1181 .gpio = default_analog,
1182 }, {
1183 .type = EM28XX_VMUX_SVIDEO,
1184 .vmux = TVP5150_SVIDEO,
1185 .amux = EM28XX_AMUX_LINE_IN,
1186 .gpio = default_analog,
1187 } },
1188 },
1098 [EM2881_BOARD_DNT_DA2_HYBRID] = { 1189 [EM2881_BOARD_DNT_DA2_HYBRID] = {
1099 .name = "DNT DA2 Hybrid", 1190 .name = "DNT DA2 Hybrid",
1100 .valid = EM28XX_BOARD_NOT_VALIDATED, 1191 .valid = EM28XX_BOARD_NOT_VALIDATED,
@@ -1322,6 +1413,42 @@ struct em28xx_board em28xx_boards[] = {
1322 .amux = EM28XX_AMUX_VIDEO, 1413 .amux = EM28XX_AMUX_VIDEO,
1323 } }, 1414 } },
1324 }, 1415 },
1416 [EM2860_BOARD_TERRATEC_GRABBY] = {
1417 .name = "Terratec Grabby",
1418 .vchannels = 2,
1419 .tuner_type = TUNER_ABSENT,
1420 .decoder = EM28XX_SAA711X,
1421 .xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
1422 .input = { {
1423 .type = EM28XX_VMUX_COMPOSITE1,
1424 .vmux = SAA7115_COMPOSITE0,
1425 .amux = EM28XX_AMUX_VIDEO2,
1426 }, {
1427 .type = EM28XX_VMUX_SVIDEO,
1428 .vmux = SAA7115_SVIDEO3,
1429 .amux = EM28XX_AMUX_VIDEO2,
1430 } },
1431 },
1432 [EM2860_BOARD_TERRATEC_AV350] = {
1433 .name = "Terratec AV350",
1434 .vchannels = 2,
1435 .tuner_type = TUNER_ABSENT,
1436 .decoder = EM28XX_TVP5150,
1437 .xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
1438 .mute_gpio = terratec_av350_mute_gpio,
1439 .input = { {
1440 .type = EM28XX_VMUX_COMPOSITE1,
1441 .vmux = TVP5150_COMPOSITE1,
1442 .amux = EM28XX_AUDIO_SRC_LINE,
1443 .gpio = terratec_av350_unmute_gpio,
1444
1445 }, {
1446 .type = EM28XX_VMUX_SVIDEO,
1447 .vmux = TVP5150_SVIDEO,
1448 .amux = EM28XX_AUDIO_SRC_LINE,
1449 .gpio = terratec_av350_unmute_gpio,
1450 } },
1451 },
1325}; 1452};
1326const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards); 1453const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
1327 1454
@@ -1355,6 +1482,8 @@ struct usb_device_id em28xx_id_table[] = {
1355 .driver_info = EM2880_BOARD_KWORLD_DVB_305U }, 1482 .driver_info = EM2880_BOARD_KWORLD_DVB_305U },
1356 { USB_DEVICE(0xeb1a, 0xe310), 1483 { USB_DEVICE(0xeb1a, 0xe310),
1357 .driver_info = EM2880_BOARD_MSI_DIGIVOX_AD }, 1484 .driver_info = EM2880_BOARD_MSI_DIGIVOX_AD },
1485 { USB_DEVICE(0xeb1a, 0xa313),
1486 .driver_info = EM2882_BOARD_KWORLD_ATSC_315U },
1358 { USB_DEVICE(0xeb1a, 0xa316), 1487 { USB_DEVICE(0xeb1a, 0xa316),
1359 .driver_info = EM2883_BOARD_KWORLD_HYBRID_330U }, 1488 .driver_info = EM2883_BOARD_KWORLD_HYBRID_330U },
1360 { USB_DEVICE(0xeb1a, 0xe320), 1489 { USB_DEVICE(0xeb1a, 0xe320),
@@ -1385,6 +1514,10 @@ struct usb_device_id em28xx_id_table[] = {
1385 .driver_info = EM2870_BOARD_TERRATEC_XS }, 1514 .driver_info = EM2870_BOARD_TERRATEC_XS },
1386 { USB_DEVICE(0x0ccd, 0x0047), 1515 { USB_DEVICE(0x0ccd, 0x0047),
1387 .driver_info = EM2880_BOARD_TERRATEC_PRODIGY_XS }, 1516 .driver_info = EM2880_BOARD_TERRATEC_PRODIGY_XS },
1517 { USB_DEVICE(0x0ccd, 0x0084),
1518 .driver_info = EM2860_BOARD_TERRATEC_AV350 },
1519 { USB_DEVICE(0x0ccd, 0x0096),
1520 .driver_info = EM2860_BOARD_TERRATEC_GRABBY },
1388 { USB_DEVICE(0x185b, 0x2870), 1521 { USB_DEVICE(0x185b, 0x2870),
1389 .driver_info = EM2870_BOARD_COMPRO_VIDEOMATE }, 1522 .driver_info = EM2870_BOARD_COMPRO_VIDEOMATE },
1390 { USB_DEVICE(0x185b, 0x2041), 1523 { USB_DEVICE(0x185b, 0x2041),
@@ -1437,13 +1570,14 @@ static struct em28xx_hash_table em28xx_eeprom_hash[] = {
1437 {0x6ce05a8f, EM2820_BOARD_PROLINK_PLAYTV_USB2, TUNER_YMEC_TVF_5533MF}, 1570 {0x6ce05a8f, EM2820_BOARD_PROLINK_PLAYTV_USB2, TUNER_YMEC_TVF_5533MF},
1438 {0x72cc5a8b, EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2, TUNER_YMEC_TVF_5533MF}, 1571 {0x72cc5a8b, EM2820_BOARD_PROLINK_PLAYTV_BOX4_USB2, TUNER_YMEC_TVF_5533MF},
1439 {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028}, 1572 {0x966a0441, EM2880_BOARD_KWORLD_DVB_310U, TUNER_XC2028},
1573 {0x9567eb1a, EM2880_BOARD_EMPIRE_DUAL_TV, TUNER_XC2028},
1440}; 1574};
1441 1575
1442/* I2C devicelist hash table for devices with generic USB IDs */ 1576/* I2C devicelist hash table for devices with generic USB IDs */
1443static struct em28xx_hash_table em28xx_i2c_hash[] = { 1577static struct em28xx_hash_table em28xx_i2c_hash[] = {
1444 {0xb06a32c3, EM2800_BOARD_TERRATEC_CINERGY_200, TUNER_LG_PAL_NEW_TAPC}, 1578 {0xb06a32c3, EM2800_BOARD_TERRATEC_CINERGY_200, TUNER_LG_PAL_NEW_TAPC},
1445 {0xf51200e3, EM2800_BOARD_VGEAR_POCKETTV, TUNER_LG_PAL_NEW_TAPC}, 1579 {0xf51200e3, EM2800_BOARD_VGEAR_POCKETTV, TUNER_LG_PAL_NEW_TAPC},
1446 {0x1ba50080, EM2860_BOARD_POINTNIX_INTRAORAL_CAMERA, TUNER_ABSENT}, 1580 {0x1ba50080, EM2860_BOARD_SAA711X_REFERENCE_DESIGN, TUNER_ABSENT},
1447 {0xc51200e3, EM2820_BOARD_GADMEI_TVR200, TUNER_LG_PAL_NEW_TAPC}, 1581 {0xc51200e3, EM2820_BOARD_GADMEI_TVR200, TUNER_LG_PAL_NEW_TAPC},
1448}; 1582};
1449 1583
@@ -1619,6 +1753,17 @@ void em28xx_pre_card_setup(struct em28xx *dev)
1619 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfd); 1753 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfd);
1620 break; 1754 break;
1621 1755
1756 case EM2882_BOARD_KWORLD_ATSC_315U:
1757 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xff);
1758 msleep(10);
1759 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfe);
1760 msleep(10);
1761 em28xx_write_reg(dev, EM2880_R04_GPO, 0x00);
1762 msleep(10);
1763 em28xx_write_reg(dev, EM2880_R04_GPO, 0x08);
1764 msleep(10);
1765 break;
1766
1622 case EM2860_BOARD_KAIOMY_TVNPC_U2: 1767 case EM2860_BOARD_KAIOMY_TVNPC_U2:
1623 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x07", 1); 1768 em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x07", 1);
1624 em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1); 1769 em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1);
@@ -1664,6 +1809,7 @@ static void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
1664 ctl->mts = em28xx_boards[dev->model].mts_firmware; 1809 ctl->mts = em28xx_boards[dev->model].mts_firmware;
1665 1810
1666 switch (dev->model) { 1811 switch (dev->model) {
1812 case EM2880_BOARD_EMPIRE_DUAL_TV:
1667 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900: 1813 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
1668 ctl->demod = XC3028_FE_ZARLINK456; 1814 ctl->demod = XC3028_FE_ZARLINK456;
1669 break; 1815 break;
@@ -1835,12 +1981,20 @@ static int em28xx_hint_board(struct em28xx *dev)
1835} 1981}
1836 1982
1837/* ----------------------------------------------------------------------- */ 1983/* ----------------------------------------------------------------------- */
1838void em28xx_set_ir(struct em28xx *dev, struct IR_i2c *ir) 1984void em28xx_register_i2c_ir(struct em28xx *dev)
1839{ 1985{
1840 if (disable_ir) { 1986 struct i2c_board_info info;
1841 ir->get_key = NULL; 1987 struct IR_i2c_init_data init_data;
1842 return ; 1988 const unsigned short addr_list[] = {
1843 } 1989 0x30, 0x47, I2C_CLIENT_END
1990 };
1991
1992 if (disable_ir)
1993 return;
1994
1995 memset(&info, 0, sizeof(struct i2c_board_info));
1996 memset(&init_data, 0, sizeof(struct IR_i2c_init_data));
1997 strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
1844 1998
1845 /* detect & configure */ 1999 /* detect & configure */
1846 switch (dev->model) { 2000 switch (dev->model) {
@@ -1850,22 +2004,19 @@ void em28xx_set_ir(struct em28xx *dev, struct IR_i2c *ir)
1850 break; 2004 break;
1851 case (EM2800_BOARD_TERRATEC_CINERGY_200): 2005 case (EM2800_BOARD_TERRATEC_CINERGY_200):
1852 case (EM2820_BOARD_TERRATEC_CINERGY_250): 2006 case (EM2820_BOARD_TERRATEC_CINERGY_250):
1853 ir->ir_codes = ir_codes_em_terratec; 2007 init_data.ir_codes = ir_codes_em_terratec;
1854 ir->get_key = em28xx_get_key_terratec; 2008 init_data.get_key = em28xx_get_key_terratec;
1855 snprintf(ir->c.name, sizeof(ir->c.name), 2009 init_data.name = "i2c IR (EM28XX Terratec)";
1856 "i2c IR (EM28XX Terratec)");
1857 break; 2010 break;
1858 case (EM2820_BOARD_PINNACLE_USB_2): 2011 case (EM2820_BOARD_PINNACLE_USB_2):
1859 ir->ir_codes = ir_codes_pinnacle_grey; 2012 init_data.ir_codes = ir_codes_pinnacle_grey;
1860 ir->get_key = em28xx_get_key_pinnacle_usb_grey; 2013 init_data.get_key = em28xx_get_key_pinnacle_usb_grey;
1861 snprintf(ir->c.name, sizeof(ir->c.name), 2014 init_data.name = "i2c IR (EM28XX Pinnacle PCTV)";
1862 "i2c IR (EM28XX Pinnacle PCTV)");
1863 break; 2015 break;
1864 case (EM2820_BOARD_HAUPPAUGE_WINTV_USB_2): 2016 case (EM2820_BOARD_HAUPPAUGE_WINTV_USB_2):
1865 ir->ir_codes = ir_codes_hauppauge_new; 2017 init_data.ir_codes = ir_codes_hauppauge_new;
1866 ir->get_key = em28xx_get_key_em_haup; 2018 init_data.get_key = em28xx_get_key_em_haup;
1867 snprintf(ir->c.name, sizeof(ir->c.name), 2019 init_data.name = "i2c IR (EM2840 Hauppauge)";
1868 "i2c IR (EM2840 Hauppauge)");
1869 break; 2020 break;
1870 case (EM2820_BOARD_MSI_VOX_USB_2): 2021 case (EM2820_BOARD_MSI_VOX_USB_2):
1871 break; 2022 break;
@@ -1876,6 +2027,10 @@ void em28xx_set_ir(struct em28xx *dev, struct IR_i2c *ir)
1876 case (EM2800_BOARD_GRABBEEX_USB2800): 2027 case (EM2800_BOARD_GRABBEEX_USB2800):
1877 break; 2028 break;
1878 } 2029 }
2030
2031 if (init_data.name)
2032 info.platform_data = &init_data;
2033 i2c_new_probed_device(&dev->i2c_adap, &info, addr_list);
1879} 2034}
1880 2035
1881void em28xx_card_setup(struct em28xx *dev) 2036void em28xx_card_setup(struct em28xx *dev)
@@ -1886,6 +2041,9 @@ void em28xx_card_setup(struct em28xx *dev)
1886 if (em28xx_boards[dev->model].tuner_addr) 2041 if (em28xx_boards[dev->model].tuner_addr)
1887 dev->tuner_addr = em28xx_boards[dev->model].tuner_addr; 2042 dev->tuner_addr = em28xx_boards[dev->model].tuner_addr;
1888 2043
2044 if (em28xx_boards[dev->model].tda9887_conf)
2045 dev->tda9887_conf = em28xx_boards[dev->model].tda9887_conf;
2046
1889 /* request some modules */ 2047 /* request some modules */
1890 switch (dev->model) { 2048 switch (dev->model) {
1891 case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2: 2049 case EM2820_BOARD_HAUPPAUGE_WINTV_USB_2:
@@ -1915,6 +2073,12 @@ void em28xx_card_setup(struct em28xx *dev)
1915#endif 2073#endif
1916 break; 2074 break;
1917 } 2075 }
2076 case EM2882_BOARD_KWORLD_ATSC_315U:
2077 em28xx_write_reg(dev, 0x0d, 0x42);
2078 msleep(10);
2079 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xfd);
2080 msleep(10);
2081 break;
1918 case EM2820_BOARD_KWORLD_PVRTV2800RF: 2082 case EM2820_BOARD_KWORLD_PVRTV2800RF:
1919 /* GPIO enables sound on KWORLD PVR TV 2800RF */ 2083 /* GPIO enables sound on KWORLD PVR TV 2800RF */
1920 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xf9); 2084 em28xx_write_reg(dev, EM28XX_R08_GPIO, 0xf9);
@@ -2279,6 +2443,20 @@ static int em28xx_usb_probe(struct usb_interface *interface,
2279 ifnum, 2443 ifnum,
2280 interface->altsetting->desc.bInterfaceNumber); 2444 interface->altsetting->desc.bInterfaceNumber);
2281 2445
2446 /*
2447 * Make sure we have 480 Mbps of bandwidth, otherwise things like
2448 * video stream wouldn't likely work, since 12 Mbps is generally
2449 * not enough even for most Digital TV streams.
2450 */
2451 if (udev->speed != USB_SPEED_HIGH && disable_usb_speed_check == 0) {
2452 printk(DRIVER_NAME ": Device initialization failed.\n");
2453 printk(DRIVER_NAME ": Device must be connected to a high-speed"
2454 " USB 2.0 port.\n");
2455 em28xx_devused &= ~(1<<nr);
2456 retval = -ENODEV;
2457 goto err;
2458 }
2459
2282 if (nr >= EM28XX_MAXBOARDS) { 2460 if (nr >= EM28XX_MAXBOARDS) {
2283 printk(DRIVER_NAME ": Supports only %i em28xx boards.\n", 2461 printk(DRIVER_NAME ": Supports only %i em28xx boards.\n",
2284 EM28XX_MAXBOARDS); 2462 EM28XX_MAXBOARDS);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index 192b76cdd5d7..c8d7ce8fbd36 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -500,18 +500,21 @@ int em28xx_audio_setup(struct em28xx *dev)
500 500
501 /* See how this device is configured */ 501 /* See how this device is configured */
502 cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG); 502 cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
503 if (cfg < 0) 503 em28xx_info("Config register raw data: 0x%02x\n", cfg);
504 if (cfg < 0) {
505 /* Register read error? */
504 cfg = EM28XX_CHIPCFG_AC97; /* Be conservative */ 506 cfg = EM28XX_CHIPCFG_AC97; /* Be conservative */
505 else 507 } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) == 0x00) {
506 em28xx_info("Config register raw data: 0x%02x\n", cfg); 508 /* The device doesn't have vendor audio at all */
507 509 dev->has_alsa_audio = 0;
508 if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) == 510 dev->audio_mode.has_audio = 0;
509 EM28XX_CHIPCFG_I2S_3_SAMPRATES) { 511 return 0;
512 } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
513 EM28XX_CHIPCFG_I2S_3_SAMPRATES) {
510 em28xx_info("I2S Audio (3 sample rates)\n"); 514 em28xx_info("I2S Audio (3 sample rates)\n");
511 dev->audio_mode.i2s_3rates = 1; 515 dev->audio_mode.i2s_3rates = 1;
512 } 516 } else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
513 if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) == 517 EM28XX_CHIPCFG_I2S_5_SAMPRATES) {
514 EM28XX_CHIPCFG_I2S_5_SAMPRATES) {
515 em28xx_info("I2S Audio (5 sample rates)\n"); 518 em28xx_info("I2S Audio (5 sample rates)\n");
516 dev->audio_mode.i2s_5rates = 1; 519 dev->audio_mode.i2s_5rates = 1;
517 } 520 }
@@ -938,7 +941,7 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
938 dev->isoc_ctl.transfer_buffer = kzalloc(sizeof(void *)*num_bufs, 941 dev->isoc_ctl.transfer_buffer = kzalloc(sizeof(void *)*num_bufs,
939 GFP_KERNEL); 942 GFP_KERNEL);
940 if (!dev->isoc_ctl.transfer_buffer) { 943 if (!dev->isoc_ctl.transfer_buffer) {
941 em28xx_errdev("cannot allocate memory for usbtransfer\n"); 944 em28xx_errdev("cannot allocate memory for usb transfer\n");
942 kfree(dev->isoc_ctl.urb); 945 kfree(dev->isoc_ctl.urb);
943 return -ENOMEM; 946 return -ENOMEM;
944 } 947 }
@@ -1012,6 +1015,41 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
1012} 1015}
1013EXPORT_SYMBOL_GPL(em28xx_init_isoc); 1016EXPORT_SYMBOL_GPL(em28xx_init_isoc);
1014 1017
1018/* Determine the packet size for the DVB stream for the given device
1019 (underlying value programmed into the eeprom) */
1020int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev)
1021{
1022 unsigned int chip_cfg2;
1023 unsigned int packet_size = 564;
1024
1025 if (dev->chip_id == CHIP_ID_EM2874) {
1026 /* FIXME - for now assume 564 like it was before, but the
1027 em2874 code should be added to return the proper value... */
1028 packet_size = 564;
1029 } else {
1030 /* TS max packet size stored in bits 1-0 of R01 */
1031 chip_cfg2 = em28xx_read_reg(dev, EM28XX_R01_CHIPCFG2);
1032 switch (chip_cfg2 & EM28XX_CHIPCFG2_TS_PACKETSIZE_MASK) {
1033 case EM28XX_CHIPCFG2_TS_PACKETSIZE_188:
1034 packet_size = 188;
1035 break;
1036 case EM28XX_CHIPCFG2_TS_PACKETSIZE_376:
1037 packet_size = 376;
1038 break;
1039 case EM28XX_CHIPCFG2_TS_PACKETSIZE_564:
1040 packet_size = 564;
1041 break;
1042 case EM28XX_CHIPCFG2_TS_PACKETSIZE_752:
1043 packet_size = 752;
1044 break;
1045 }
1046 }
1047
1048 em28xx_coredbg("dvb max packet size=%d\n", packet_size);
1049 return packet_size;
1050}
1051EXPORT_SYMBOL_GPL(em28xx_isoc_dvb_max_packetsize);
1052
1015/* 1053/*
1016 * em28xx_wake_i2c() 1054 * em28xx_wake_i2c()
1017 * configure i2c attached devices 1055 * configure i2c attached devices
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index fcd25511209b..563dd2b1c8e9 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -25,6 +25,8 @@
25#include "em28xx.h" 25#include "em28xx.h"
26#include <media/v4l2-common.h> 26#include <media/v4l2-common.h>
27#include <media/videobuf-vmalloc.h> 27#include <media/videobuf-vmalloc.h>
28#include <media/tuner.h>
29#include "tuner-simple.h"
28 30
29#include "lgdt330x.h" 31#include "lgdt330x.h"
30#include "zl10353.h" 32#include "zl10353.h"
@@ -46,7 +48,6 @@ if (debug >= level) \
46} while (0) 48} while (0)
47 49
48#define EM28XX_DVB_NUM_BUFS 5 50#define EM28XX_DVB_NUM_BUFS 5
49#define EM28XX_DVB_MAX_PACKETSIZE 564
50#define EM28XX_DVB_MAX_PACKETS 64 51#define EM28XX_DVB_MAX_PACKETS 64
51 52
52struct em28xx_dvb { 53struct em28xx_dvb {
@@ -142,14 +143,17 @@ static int start_streaming(struct em28xx_dvb *dvb)
142{ 143{
143 int rc; 144 int rc;
144 struct em28xx *dev = dvb->adapter.priv; 145 struct em28xx *dev = dvb->adapter.priv;
146 int max_dvb_packet_size;
145 147
146 usb_set_interface(dev->udev, 0, 1); 148 usb_set_interface(dev->udev, 0, 1);
147 rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE); 149 rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
148 if (rc < 0) 150 if (rc < 0)
149 return rc; 151 return rc;
150 152
153 max_dvb_packet_size = em28xx_isoc_dvb_max_packetsize(dev);
154
151 return em28xx_init_isoc(dev, EM28XX_DVB_MAX_PACKETS, 155 return em28xx_init_isoc(dev, EM28XX_DVB_MAX_PACKETS,
152 EM28XX_DVB_NUM_BUFS, EM28XX_DVB_MAX_PACKETSIZE, 156 EM28XX_DVB_NUM_BUFS, max_dvb_packet_size,
153 dvb_isoc_copy); 157 dvb_isoc_copy);
154} 158}
155 159
@@ -431,6 +435,7 @@ static int dvb_init(struct em28xx *dev)
431 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900: 435 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900:
432 case EM2880_BOARD_TERRATEC_HYBRID_XS: 436 case EM2880_BOARD_TERRATEC_HYBRID_XS:
433 case EM2880_BOARD_KWORLD_DVB_310U: 437 case EM2880_BOARD_KWORLD_DVB_310U:
438 case EM2880_BOARD_EMPIRE_DUAL_TV:
434 dvb->frontend = dvb_attach(zl10353_attach, 439 dvb->frontend = dvb_attach(zl10353_attach,
435 &em28xx_zl10353_with_xc3028, 440 &em28xx_zl10353_with_xc3028,
436 &dev->i2c_adap); 441 &dev->i2c_adap);
@@ -448,6 +453,18 @@ static int dvb_init(struct em28xx *dev)
448 goto out_free; 453 goto out_free;
449 } 454 }
450 break; 455 break;
456 case EM2882_BOARD_KWORLD_ATSC_315U:
457 dvb->frontend = dvb_attach(lgdt330x_attach,
458 &em2880_lgdt3303_dev,
459 &dev->i2c_adap);
460 if (dvb->frontend != NULL) {
461 if (!dvb_attach(simple_tuner_attach, dvb->frontend,
462 &dev->i2c_adap, 0x61, TUNER_THOMSON_DTT761X)) {
463 result = -EINVAL;
464 goto out_free;
465 }
466 }
467 break;
451 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2: 468 case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
452#ifdef EM28XX_DRX397XD_SUPPORT 469#ifdef EM28XX_DRX397XD_SUPPORT
453 /* We don't have the config structure properly populated, so 470 /* We don't have the config structure properly populated, so
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index f0bf1d960c75..2c86fcf089f5 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -451,27 +451,6 @@ static u32 functionality(struct i2c_adapter *adap)
451 return I2C_FUNC_SMBUS_EMUL; 451 return I2C_FUNC_SMBUS_EMUL;
452} 452}
453 453
454/*
455 * attach_inform()
456 * gets called when a device attaches to the i2c bus
457 * does some basic configuration
458 */
459static int attach_inform(struct i2c_client *client)
460{
461 struct em28xx *dev = client->adapter->algo_data;
462 struct IR_i2c *ir = i2c_get_clientdata(client);
463
464 switch (client->addr << 1) {
465 case 0x60:
466 case 0x8e:
467 dprintk1(1, "attach_inform: IR detected (%s).\n", ir->phys);
468 em28xx_set_ir(dev, ir);
469 break;
470 }
471
472 return 0;
473}
474
475static struct i2c_algorithm em28xx_algo = { 454static struct i2c_algorithm em28xx_algo = {
476 .master_xfer = em28xx_i2c_xfer, 455 .master_xfer = em28xx_i2c_xfer,
477 .functionality = functionality, 456 .functionality = functionality,
@@ -482,7 +461,6 @@ static struct i2c_adapter em28xx_adap_template = {
482 .name = "em28xx", 461 .name = "em28xx",
483 .id = I2C_HW_B_EM28XX, 462 .id = I2C_HW_B_EM28XX,
484 .algo = &em28xx_algo, 463 .algo = &em28xx_algo,
485 .client_register = attach_inform,
486}; 464};
487 465
488static struct i2c_client em28xx_client_template = { 466static struct i2c_client em28xx_client_template = {
@@ -575,6 +553,9 @@ int em28xx_i2c_register(struct em28xx *dev)
575 if (i2c_scan) 553 if (i2c_scan)
576 em28xx_do_i2c_scan(dev); 554 em28xx_do_i2c_scan(dev);
577 555
556 /* Instantiate the IR receiver device, if present */
557 em28xx_register_i2c_ir(dev);
558
578 return 0; 559 return 0;
579} 560}
580 561
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index a5abfd7a19f5..7a0fe3816e3d 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -40,7 +40,7 @@ MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
40 40
41#define i2cdprintk(fmt, arg...) \ 41#define i2cdprintk(fmt, arg...) \
42 if (ir_debug) { \ 42 if (ir_debug) { \
43 printk(KERN_DEBUG "%s/ir: " fmt, ir->c.name , ## arg); \ 43 printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
44 } 44 }
45 45
46#define dprintk(fmt, arg...) \ 46#define dprintk(fmt, arg...) \
@@ -85,7 +85,7 @@ int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
85 unsigned char b; 85 unsigned char b;
86 86
87 /* poll IR chip */ 87 /* poll IR chip */
88 if (1 != i2c_master_recv(&ir->c, &b, 1)) { 88 if (1 != i2c_master_recv(ir->c, &b, 1)) {
89 i2cdprintk("read error\n"); 89 i2cdprintk("read error\n");
90 return -EIO; 90 return -EIO;
91 } 91 }
@@ -114,7 +114,7 @@ int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
114 unsigned char code; 114 unsigned char code;
115 115
116 /* poll IR chip */ 116 /* poll IR chip */
117 if (2 != i2c_master_recv(&ir->c, buf, 2)) 117 if (2 != i2c_master_recv(ir->c, buf, 2))
118 return -EIO; 118 return -EIO;
119 119
120 /* Does eliminate repeated parity code */ 120 /* Does eliminate repeated parity code */
@@ -147,7 +147,7 @@ int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
147 147
148 /* poll IR chip */ 148 /* poll IR chip */
149 149
150 if (3 != i2c_master_recv(&ir->c, buf, 3)) { 150 if (3 != i2c_master_recv(ir->c, buf, 3)) {
151 i2cdprintk("read error\n"); 151 i2cdprintk("read error\n");
152 return -EIO; 152 return -EIO;
153 } 153 }
diff --git a/drivers/media/video/em28xx/em28xx-reg.h b/drivers/media/video/em28xx/em28xx-reg.h
index 24e39c56811e..a2676d63cfd0 100644
--- a/drivers/media/video/em28xx/em28xx-reg.h
+++ b/drivers/media/video/em28xx/em28xx-reg.h
@@ -27,6 +27,22 @@
27#define EM28XX_CHIPCFG_AC97 0x10 27#define EM28XX_CHIPCFG_AC97 0x10
28#define EM28XX_CHIPCFG_AUDIOMASK 0x30 28#define EM28XX_CHIPCFG_AUDIOMASK 0x30
29 29
30#define EM28XX_R01_CHIPCFG2 0x01
31
32/* em28xx Chip Configuration 2 0x01 */
33#define EM28XX_CHIPCFG2_TS_PRESENT 0x10
34#define EM28XX_CHIPCFG2_TS_REQ_INTERVAL_MASK 0x0c /* bits 3-2 */
35#define EM28XX_CHIPCFG2_TS_REQ_INTERVAL_1MF 0x00
36#define EM28XX_CHIPCFG2_TS_REQ_INTERVAL_2MF 0x04
37#define EM28XX_CHIPCFG2_TS_REQ_INTERVAL_4MF 0x08
38#define EM28XX_CHIPCFG2_TS_REQ_INTERVAL_8MF 0x0c
39#define EM28XX_CHIPCFG2_TS_PACKETSIZE_MASK 0x03 /* bits 0-1 */
40#define EM28XX_CHIPCFG2_TS_PACKETSIZE_188 0x00
41#define EM28XX_CHIPCFG2_TS_PACKETSIZE_376 0x01
42#define EM28XX_CHIPCFG2_TS_PACKETSIZE_564 0x02
43#define EM28XX_CHIPCFG2_TS_PACKETSIZE_752 0x03
44
45
30 /* GPIO/GPO registers */ 46 /* GPIO/GPO registers */
31#define EM2880_R04_GPO 0x04 /* em2880-em2883 only */ 47#define EM2880_R04_GPO 0x04 /* em2880-em2883 only */
32#define EM28XX_R08_GPIO 0x08 /* em2820 or upper */ 48#define EM28XX_R08_GPIO 0x08 /* em2820 or upper */
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 4c4e58004f54..8bf81be1da61 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -58,7 +58,7 @@
58#define EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 16 58#define EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 16
59#define EM2880_BOARD_PINNACLE_PCTV_HD_PRO 17 59#define EM2880_BOARD_PINNACLE_PCTV_HD_PRO 17
60#define EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2 18 60#define EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2 18
61#define EM2860_BOARD_POINTNIX_INTRAORAL_CAMERA 19 61#define EM2860_BOARD_SAA711X_REFERENCE_DESIGN 19
62#define EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 20 62#define EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 20
63#define EM2800_BOARD_GRABBEEX_USB2800 21 63#define EM2800_BOARD_GRABBEEX_USB2800 21
64#define EM2750_BOARD_UNKNOWN 22 64#define EM2750_BOARD_UNKNOWN 22
@@ -102,6 +102,10 @@
102#define EM2860_BOARD_KAIOMY_TVNPC_U2 63 102#define EM2860_BOARD_KAIOMY_TVNPC_U2 63
103#define EM2860_BOARD_EASYCAP 64 103#define EM2860_BOARD_EASYCAP 64
104#define EM2820_BOARD_IODATA_GVMVP_SZ 65 104#define EM2820_BOARD_IODATA_GVMVP_SZ 65
105#define EM2880_BOARD_EMPIRE_DUAL_TV 66
106#define EM2860_BOARD_TERRATEC_GRABBY 67
107#define EM2860_BOARD_TERRATEC_AV350 68
108#define EM2882_BOARD_KWORLD_ATSC_315U 69
105 109
106/* Limits minimum and default number of buffers */ 110/* Limits minimum and default number of buffers */
107#define EM28XX_MIN_BUF 4 111#define EM28XX_MIN_BUF 4
@@ -615,6 +619,7 @@ int em28xx_init_isoc(struct em28xx *dev, int max_packets,
615 int num_bufs, int max_pkt_size, 619 int num_bufs, int max_pkt_size,
616 int (*isoc_copy) (struct em28xx *dev, struct urb *urb)); 620 int (*isoc_copy) (struct em28xx *dev, struct urb *urb));
617void em28xx_uninit_isoc(struct em28xx *dev); 621void em28xx_uninit_isoc(struct em28xx *dev);
622int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev);
618int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode); 623int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode);
619int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio); 624int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio);
620void em28xx_wake_i2c(struct em28xx *dev); 625void em28xx_wake_i2c(struct em28xx *dev);
@@ -639,7 +644,7 @@ extern void em28xx_card_setup(struct em28xx *dev);
639extern struct em28xx_board em28xx_boards[]; 644extern struct em28xx_board em28xx_boards[];
640extern struct usb_device_id em28xx_id_table[]; 645extern struct usb_device_id em28xx_id_table[];
641extern const unsigned int em28xx_bcount; 646extern const unsigned int em28xx_bcount;
642void em28xx_set_ir(struct em28xx *dev, struct IR_i2c *ir); 647void em28xx_register_i2c_ir(struct em28xx *dev);
643int em28xx_tuner_callback(void *ptr, int component, int command, int arg); 648int em28xx_tuner_callback(void *ptr, int component, int command, int arg);
644void em28xx_release_resources(struct em28xx *dev); 649void em28xx_release_resources(struct em28xx *dev);
645 650
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index 00e6863ed666..480ec5c87d0e 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -168,6 +168,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
168 168
169 cam->cam_mode = fpix_mode; 169 cam->cam_mode = fpix_mode;
170 cam->nmodes = 1; 170 cam->nmodes = 1;
171 cam->bulk = 1;
171 cam->bulk_size = FPIX_MAX_TRANSFER; 172 cam->bulk_size = FPIX_MAX_TRANSFER;
172 173
173 INIT_WORK(&dev->work_struct, dostream); 174 INIT_WORK(&dev->work_struct, dostream);
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index a2741d7dccfe..f7e0355ad644 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Main USB camera driver 2 * Main USB camera driver
3 * 3 *
4 * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> 4 * Copyright (C) 2008-2009 Jean-Francois Moine (http://moinejf.free.fr)
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -47,7 +47,7 @@ MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>");
47MODULE_DESCRIPTION("GSPCA USB Camera Driver"); 47MODULE_DESCRIPTION("GSPCA USB Camera Driver");
48MODULE_LICENSE("GPL"); 48MODULE_LICENSE("GPL");
49 49
50#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 5, 0) 50#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 6, 0)
51 51
52#ifdef GSPCA_DEBUG 52#ifdef GSPCA_DEBUG
53int gspca_debug = D_ERR | D_PROBE; 53int gspca_debug = D_ERR | D_PROBE;
@@ -441,7 +441,7 @@ static void destroy_urbs(struct gspca_dev *gspca_dev)
441 * look for an input transfer endpoint in an alternate setting 441 * look for an input transfer endpoint in an alternate setting
442 */ 442 */
443static struct usb_host_endpoint *alt_xfer(struct usb_host_interface *alt, 443static struct usb_host_endpoint *alt_xfer(struct usb_host_interface *alt,
444 __u8 xfer) 444 int xfer)
445{ 445{
446 struct usb_host_endpoint *ep; 446 struct usb_host_endpoint *ep;
447 int i, attr; 447 int i, attr;
@@ -449,7 +449,8 @@ static struct usb_host_endpoint *alt_xfer(struct usb_host_interface *alt,
449 for (i = 0; i < alt->desc.bNumEndpoints; i++) { 449 for (i = 0; i < alt->desc.bNumEndpoints; i++) {
450 ep = &alt->endpoint[i]; 450 ep = &alt->endpoint[i];
451 attr = ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 451 attr = ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
452 if (attr == xfer) 452 if (attr == xfer
453 && ep->desc.wMaxPacketSize != 0)
453 return ep; 454 return ep;
454 } 455 }
455 return NULL; 456 return NULL;
@@ -467,37 +468,28 @@ static struct usb_host_endpoint *get_ep(struct gspca_dev *gspca_dev)
467{ 468{
468 struct usb_interface *intf; 469 struct usb_interface *intf;
469 struct usb_host_endpoint *ep; 470 struct usb_host_endpoint *ep;
470 int i, ret; 471 int xfer, i, ret;
471 472
472 intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); 473 intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
473 ep = NULL; 474 ep = NULL;
475 xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK
476 : USB_ENDPOINT_XFER_ISOC;
474 i = gspca_dev->alt; /* previous alt setting */ 477 i = gspca_dev->alt; /* previous alt setting */
475
476 /* try isoc */
477 while (--i >= 0) { 478 while (--i >= 0) {
478 ep = alt_xfer(&intf->altsetting[i], 479 ep = alt_xfer(&intf->altsetting[i], xfer);
479 USB_ENDPOINT_XFER_ISOC);
480 if (ep) 480 if (ep)
481 break; 481 break;
482 } 482 }
483
484 /* if no isoc, try bulk (alt 0 only) */
485 if (ep == NULL) { 483 if (ep == NULL) {
486 ep = alt_xfer(&intf->altsetting[0], 484 err("no transfer endpoint found");
487 USB_ENDPOINT_XFER_BULK); 485 return NULL;
488 if (ep == NULL) {
489 err("no transfer endpoint found");
490 return NULL;
491 }
492 i = 0;
493 gspca_dev->bulk = 1;
494 } 486 }
495 PDEBUG(D_STREAM, "use alt %d ep 0x%02x", 487 PDEBUG(D_STREAM, "use alt %d ep 0x%02x",
496 i, ep->desc.bEndpointAddress); 488 i, ep->desc.bEndpointAddress);
497 if (i > 0) { 489 if (gspca_dev->nbalt > 1) {
498 ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, i); 490 ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, i);
499 if (ret < 0) { 491 if (ret < 0) {
500 err("set interface err %d", ret); 492 err("set alt %d err %d", i, ret);
501 return NULL; 493 return NULL;
502 } 494 }
503 } 495 }
@@ -517,13 +509,13 @@ static int create_urbs(struct gspca_dev *gspca_dev,
517 /* calculate the packet size and the number of packets */ 509 /* calculate the packet size and the number of packets */
518 psize = le16_to_cpu(ep->desc.wMaxPacketSize); 510 psize = le16_to_cpu(ep->desc.wMaxPacketSize);
519 511
520 if (!gspca_dev->bulk) { /* isoc */ 512 if (!gspca_dev->cam.bulk) { /* isoc */
521 513
522 /* See paragraph 5.9 / table 5-11 of the usb 2.0 spec. */ 514 /* See paragraph 5.9 / table 5-11 of the usb 2.0 spec. */
523 psize = (psize & 0x07ff) * (1 + ((psize >> 11) & 3)); 515 psize = (psize & 0x07ff) * (1 + ((psize >> 11) & 3));
524 npkt = ISO_MAX_SIZE / psize; 516 npkt = gspca_dev->cam.npkt;
525 if (npkt > ISO_MAX_PKT) 517 if (npkt == 0)
526 npkt = ISO_MAX_PKT; 518 npkt = 32; /* default value */
527 bsize = psize * npkt; 519 bsize = psize * npkt;
528 PDEBUG(D_STREAM, 520 PDEBUG(D_STREAM,
529 "isoc %d pkts size %d = bsize:%d", 521 "isoc %d pkts size %d = bsize:%d",
@@ -617,7 +609,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
617 goto out; 609 goto out;
618 610
619 /* clear the bulk endpoint */ 611 /* clear the bulk endpoint */
620 if (gspca_dev->bulk) 612 if (gspca_dev->cam.bulk)
621 usb_clear_halt(gspca_dev->dev, 613 usb_clear_halt(gspca_dev->dev,
622 gspca_dev->urb[0]->pipe); 614 gspca_dev->urb[0]->pipe);
623 615
@@ -630,7 +622,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
630 gspca_dev->streaming = 1; 622 gspca_dev->streaming = 1;
631 623
632 /* some bulk transfers are started by the subdriver */ 624 /* some bulk transfers are started by the subdriver */
633 if (gspca_dev->bulk && gspca_dev->cam.bulk_nurbs == 0) 625 if (gspca_dev->cam.bulk && gspca_dev->cam.bulk_nurbs == 0)
634 break; 626 break;
635 627
636 /* submit the URBs */ 628 /* submit the URBs */
@@ -661,6 +653,8 @@ static int gspca_set_alt0(struct gspca_dev *gspca_dev)
661{ 653{
662 int ret; 654 int ret;
663 655
656 if (gspca_dev->alt == 0)
657 return 0;
664 ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0); 658 ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0);
665 if (ret < 0) 659 if (ret < 0)
666 PDEBUG(D_ERR|D_STREAM, "set alt 0 err %d", ret); 660 PDEBUG(D_ERR|D_STREAM, "set alt 0 err %d", ret);
@@ -869,6 +863,32 @@ out:
869 return ret; 863 return ret;
870} 864}
871 865
866static int vidioc_enum_framesizes(struct file *file, void *priv,
867 struct v4l2_frmsizeenum *fsize)
868{
869 struct gspca_dev *gspca_dev = priv;
870 int i;
871 __u32 index = 0;
872
873 for (i = 0; i < gspca_dev->cam.nmodes; i++) {
874 if (fsize->pixel_format !=
875 gspca_dev->cam.cam_mode[i].pixelformat)
876 continue;
877
878 if (fsize->index == index) {
879 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
880 fsize->discrete.width =
881 gspca_dev->cam.cam_mode[i].width;
882 fsize->discrete.height =
883 gspca_dev->cam.cam_mode[i].height;
884 return 0;
885 }
886 index++;
887 }
888
889 return -EINVAL;
890}
891
872static void gspca_release(struct video_device *vfd) 892static void gspca_release(struct video_device *vfd)
873{ 893{
874 struct gspca_dev *gspca_dev = container_of(vfd, struct gspca_dev, vdev); 894 struct gspca_dev *gspca_dev = container_of(vfd, struct gspca_dev, vdev);
@@ -989,43 +1009,54 @@ out:
989 return ret; 1009 return ret;
990} 1010}
991 1011
1012static const struct ctrl *get_ctrl(struct gspca_dev *gspca_dev,
1013 int id)
1014{
1015 const struct ctrl *ctrls;
1016 int i;
1017
1018 for (i = 0, ctrls = gspca_dev->sd_desc->ctrls;
1019 i < gspca_dev->sd_desc->nctrls;
1020 i++, ctrls++) {
1021 if (gspca_dev->ctrl_dis & (1 << i))
1022 continue;
1023 if (id == ctrls->qctrl.id)
1024 return ctrls;
1025 }
1026 return NULL;
1027}
1028
992static int vidioc_queryctrl(struct file *file, void *priv, 1029static int vidioc_queryctrl(struct file *file, void *priv,
993 struct v4l2_queryctrl *q_ctrl) 1030 struct v4l2_queryctrl *q_ctrl)
994{ 1031{
995 struct gspca_dev *gspca_dev = priv; 1032 struct gspca_dev *gspca_dev = priv;
996 int i, ix; 1033 const struct ctrl *ctrls;
1034 int i;
997 u32 id; 1035 u32 id;
998 1036
999 ix = -1; 1037 ctrls = NULL;
1000 id = q_ctrl->id; 1038 id = q_ctrl->id;
1001 if (id & V4L2_CTRL_FLAG_NEXT_CTRL) { 1039 if (id & V4L2_CTRL_FLAG_NEXT_CTRL) {
1002 id &= V4L2_CTRL_ID_MASK; 1040 id &= V4L2_CTRL_ID_MASK;
1003 id++; 1041 id++;
1004 for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) { 1042 for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
1005 if (gspca_dev->sd_desc->ctrls[i].qctrl.id < id) 1043 if (gspca_dev->ctrl_dis & (1 << i))
1006 continue; 1044 continue;
1007 if (ix < 0) { 1045 if (ctrls->qctrl.id < id)
1008 ix = i;
1009 continue; 1046 continue;
1047 if (ctrls != NULL) {
1048 if (gspca_dev->sd_desc->ctrls[i].qctrl.id
1049 > ctrls->qctrl.id)
1050 continue;
1010 } 1051 }
1011 if (gspca_dev->sd_desc->ctrls[i].qctrl.id 1052 ctrls = &gspca_dev->sd_desc->ctrls[i];
1012 > gspca_dev->sd_desc->ctrls[ix].qctrl.id)
1013 continue;
1014 ix = i;
1015 }
1016 }
1017 for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
1018 if (id == gspca_dev->sd_desc->ctrls[i].qctrl.id) {
1019 ix = i;
1020 break;
1021 } 1053 }
1054 } else {
1055 ctrls = get_ctrl(gspca_dev, id);
1022 } 1056 }
1023 if (ix < 0) 1057 if (ctrls == NULL)
1024 return -EINVAL; 1058 return -EINVAL;
1025 memcpy(q_ctrl, &gspca_dev->sd_desc->ctrls[ix].qctrl, 1059 memcpy(q_ctrl, ctrls, sizeof *q_ctrl);
1026 sizeof *q_ctrl);
1027 if (gspca_dev->ctrl_dis & (1 << ix))
1028 q_ctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
1029 return 0; 1060 return 0;
1030} 1061}
1031 1062
@@ -1034,56 +1065,45 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
1034{ 1065{
1035 struct gspca_dev *gspca_dev = priv; 1066 struct gspca_dev *gspca_dev = priv;
1036 const struct ctrl *ctrls; 1067 const struct ctrl *ctrls;
1037 int i, ret; 1068 int ret;
1038 1069
1039 for (i = 0, ctrls = gspca_dev->sd_desc->ctrls; 1070 ctrls = get_ctrl(gspca_dev, ctrl->id);
1040 i < gspca_dev->sd_desc->nctrls; 1071 if (ctrls == NULL)
1041 i++, ctrls++) { 1072 return -EINVAL;
1042 if (ctrl->id != ctrls->qctrl.id) 1073
1043 continue; 1074 if (ctrl->value < ctrls->qctrl.minimum
1044 if (gspca_dev->ctrl_dis & (1 << i)) 1075 || ctrl->value > ctrls->qctrl.maximum)
1045 return -EINVAL; 1076 return -ERANGE;
1046 if (ctrl->value < ctrls->qctrl.minimum 1077 PDEBUG(D_CONF, "set ctrl [%08x] = %d", ctrl->id, ctrl->value);
1047 || ctrl->value > ctrls->qctrl.maximum) 1078 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
1048 return -ERANGE; 1079 return -ERESTARTSYS;
1049 PDEBUG(D_CONF, "set ctrl [%08x] = %d", ctrl->id, ctrl->value); 1080 if (gspca_dev->present)
1050 if (mutex_lock_interruptible(&gspca_dev->usb_lock)) 1081 ret = ctrls->set(gspca_dev, ctrl->value);
1051 return -ERESTARTSYS; 1082 else
1052 if (gspca_dev->present) 1083 ret = -ENODEV;
1053 ret = ctrls->set(gspca_dev, ctrl->value); 1084 mutex_unlock(&gspca_dev->usb_lock);
1054 else 1085 return ret;
1055 ret = -ENODEV;
1056 mutex_unlock(&gspca_dev->usb_lock);
1057 return ret;
1058 }
1059 return -EINVAL;
1060} 1086}
1061 1087
1062static int vidioc_g_ctrl(struct file *file, void *priv, 1088static int vidioc_g_ctrl(struct file *file, void *priv,
1063 struct v4l2_control *ctrl) 1089 struct v4l2_control *ctrl)
1064{ 1090{
1065 struct gspca_dev *gspca_dev = priv; 1091 struct gspca_dev *gspca_dev = priv;
1066
1067 const struct ctrl *ctrls; 1092 const struct ctrl *ctrls;
1068 int i, ret; 1093 int ret;
1069 1094
1070 for (i = 0, ctrls = gspca_dev->sd_desc->ctrls; 1095 ctrls = get_ctrl(gspca_dev, ctrl->id);
1071 i < gspca_dev->sd_desc->nctrls; 1096 if (ctrls == NULL)
1072 i++, ctrls++) { 1097 return -EINVAL;
1073 if (ctrl->id != ctrls->qctrl.id) 1098
1074 continue; 1099 if (mutex_lock_interruptible(&gspca_dev->usb_lock))
1075 if (gspca_dev->ctrl_dis & (1 << i)) 1100 return -ERESTARTSYS;
1076 return -EINVAL; 1101 if (gspca_dev->present)
1077 if (mutex_lock_interruptible(&gspca_dev->usb_lock)) 1102 ret = ctrls->get(gspca_dev, &ctrl->value);
1078 return -ERESTARTSYS; 1103 else
1079 if (gspca_dev->present) 1104 ret = -ENODEV;
1080 ret = ctrls->get(gspca_dev, &ctrl->value); 1105 mutex_unlock(&gspca_dev->usb_lock);
1081 else 1106 return ret;
1082 ret = -ENODEV;
1083 mutex_unlock(&gspca_dev->usb_lock);
1084 return ret;
1085 }
1086 return -EINVAL;
1087} 1107}
1088 1108
1089/*fixme: have an audio flag in gspca_dev?*/ 1109/*fixme: have an audio flag in gspca_dev?*/
@@ -1864,6 +1884,7 @@ static const struct v4l2_ioctl_ops dev_ioctl_ops = {
1864 .vidioc_g_parm = vidioc_g_parm, 1884 .vidioc_g_parm = vidioc_g_parm,
1865 .vidioc_s_parm = vidioc_s_parm, 1885 .vidioc_s_parm = vidioc_s_parm,
1866 .vidioc_s_std = vidioc_s_std, 1886 .vidioc_s_std = vidioc_s_std,
1887 .vidioc_enum_framesizes = vidioc_enum_framesizes,
1867#ifdef CONFIG_VIDEO_V4L1_COMPAT 1888#ifdef CONFIG_VIDEO_V4L1_COMPAT
1868 .vidiocgmbuf = vidiocgmbuf, 1889 .vidiocgmbuf = vidiocgmbuf,
1869#endif 1890#endif
@@ -1943,7 +1964,7 @@ int gspca_dev_probe(struct usb_interface *intf,
1943 1964
1944 /* init video stuff */ 1965 /* init video stuff */
1945 memcpy(&gspca_dev->vdev, &gspca_template, sizeof gspca_template); 1966 memcpy(&gspca_dev->vdev, &gspca_template, sizeof gspca_template);
1946 gspca_dev->vdev.parent = &dev->dev; 1967 gspca_dev->vdev.parent = &intf->dev;
1947 gspca_dev->module = module; 1968 gspca_dev->module = module;
1948 gspca_dev->present = 1; 1969 gspca_dev->present = 1;
1949 ret = video_register_device(&gspca_dev->vdev, 1970 ret = video_register_device(&gspca_dev->vdev,
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 58e8ff02136a..bd1faff88644 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -44,8 +44,6 @@ extern int gspca_debug;
44#define GSPCA_MAX_FRAMES 16 /* maximum number of video frame buffers */ 44#define GSPCA_MAX_FRAMES 16 /* maximum number of video frame buffers */
45/* image transfers */ 45/* image transfers */
46#define MAX_NURBS 4 /* max number of URBs */ 46#define MAX_NURBS 4 /* max number of URBs */
47#define ISO_MAX_PKT 32 /* max number of packets in an ISOC transfer */
48#define ISO_MAX_SIZE 0x8000 /* max size of one URB buffer (32 Kb) */
49 47
50/* device information - set at probe time */ 48/* device information - set at probe time */
51struct cam { 49struct cam {
@@ -56,6 +54,9 @@ struct cam {
56 * - cannot be > MAX_NURBS 54 * - cannot be > MAX_NURBS
57 * - when 0 and bulk_size != 0 means 55 * - when 0 and bulk_size != 0 means
58 * 1 URB and submit done by subdriver */ 56 * 1 URB and submit done by subdriver */
57 u8 bulk; /* image transfer by 0:isoc / 1:bulk */
58 u8 npkt; /* number of packets in an ISOC message
59 * 0 is the default value: 32 packets */
59 u32 input_flags; /* value for ENUM_INPUT status flags */ 60 u32 input_flags; /* value for ENUM_INPUT status flags */
60}; 61};
61 62
@@ -168,7 +169,6 @@ struct gspca_dev {
168 __u8 iface; /* USB interface number */ 169 __u8 iface; /* USB interface number */
169 __u8 alt; /* USB alternate setting */ 170 __u8 alt; /* USB alternate setting */
170 __u8 nbalt; /* number of USB alternate settings */ 171 __u8 nbalt; /* number of USB alternate settings */
171 u8 bulk; /* image transfer by 0:isoc / 1:bulk */
172}; 172};
173 173
174int gspca_dev_probe(struct usb_interface *intf, 174int gspca_dev_probe(struct usb_interface *intf,
diff --git a/drivers/media/video/gspca/m5602/Makefile b/drivers/media/video/gspca/m5602/Makefile
index 9fa3644f4869..bf7a19a1e6d1 100644
--- a/drivers/media/video/gspca/m5602/Makefile
+++ b/drivers/media/video/gspca/m5602/Makefile
@@ -2,9 +2,10 @@ obj-$(CONFIG_USB_M5602) += gspca_m5602.o
2 2
3gspca_m5602-objs := m5602_core.o \ 3gspca_m5602-objs := m5602_core.o \
4 m5602_ov9650.o \ 4 m5602_ov9650.o \
5 m5602_ov7660.o \
5 m5602_mt9m111.o \ 6 m5602_mt9m111.o \
6 m5602_po1030.o \ 7 m5602_po1030.o \
7 m5602_s5k83a.o \ 8 m5602_s5k83a.o \
8 m5602_s5k4aa.o 9 m5602_s5k4aa.o
9 10
10EXTRA_CFLAGS += -Idrivers/media/video/gspca \ No newline at end of file 11EXTRA_CFLAGS += -Idrivers/media/video/gspca
diff --git a/drivers/media/video/gspca/m5602/m5602_bridge.h b/drivers/media/video/gspca/m5602/m5602_bridge.h
index 8f1cea6fd3bf..1127a405c9b2 100644
--- a/drivers/media/video/gspca/m5602/m5602_bridge.h
+++ b/drivers/media/video/gspca/m5602/m5602_bridge.h
@@ -45,6 +45,15 @@
45#define M5602_XB_SEN_CLK_DIV 0x15 45#define M5602_XB_SEN_CLK_DIV 0x15
46#define M5602_XB_AUD_CLK_CTRL 0x16 46#define M5602_XB_AUD_CLK_CTRL 0x16
47#define M5602_XB_AUD_CLK_DIV 0x17 47#define M5602_XB_AUD_CLK_DIV 0x17
48#define M5602_OB_AC_LINK_STATE 0x22
49#define M5602_OB_PCM_SLOT_INDEX 0x24
50#define M5602_OB_GPIO_SLOT_INDEX 0x25
51#define M5602_OB_ACRX_STATUS_ADDRESS_H 0x28
52#define M5602_OB_ACRX_STATUS_DATA_L 0x29
53#define M5602_OB_ACRX_STATUS_DATA_H 0x2a
54#define M5602_OB_ACTX_COMMAND_ADDRESS 0x31
55#define M5602_OB_ACRX_COMMAND_DATA_L 0x32
56#define M5602_OB_ACTX_COMMAND_DATA_H 0X33
48#define M5602_XB_DEVCTR1 0x41 57#define M5602_XB_DEVCTR1 0x41
49#define M5602_XB_EPSETR0 0x42 58#define M5602_XB_EPSETR0 0x42
50#define M5602_XB_EPAFCTR 0x47 59#define M5602_XB_EPAFCTR 0x47
@@ -77,7 +86,18 @@
77#define M5602_XB_GPIO_EN_L 0x75 86#define M5602_XB_GPIO_EN_L 0x75
78#define M5602_XB_GPIO_DAT 0x76 87#define M5602_XB_GPIO_DAT 0x76
79#define M5602_XB_GPIO_DIR 0x77 88#define M5602_XB_GPIO_DIR 0x77
80#define M5602_XB_MISC_CTL 0x70 89#define M5602_XB_SEN_CLK_CONTROL 0x80
90#define M5602_XB_SEN_CLK_DIVISION 0x81
91#define M5602_XB_CPR_CLK_CONTROL 0x82
92#define M5602_XB_CPR_CLK_DIVISION 0x83
93#define M5602_XB_MCU_CLK_CONTROL 0x84
94#define M5602_XB_MCU_CLK_DIVISION 0x85
95#define M5602_XB_DCT_CLK_CONTROL 0x86
96#define M5602_XB_DCT_CLK_DIVISION 0x87
97#define M5602_XB_EC_CLK_CONTROL 0x88
98#define M5602_XB_EC_CLK_DIVISION 0x89
99#define M5602_XB_LBUF_CLK_CONTROL 0x8a
100#define M5602_XB_LBUF_CLK_DIVISION 0x8b
81 101
82#define I2C_BUSY 0x80 102#define I2C_BUSY 0x80
83 103
@@ -128,10 +148,10 @@ struct sd {
128}; 148};
129 149
130int m5602_read_bridge( 150int m5602_read_bridge(
131 struct sd *sd, u8 address, u8 *i2c_data); 151 struct sd *sd, const u8 address, u8 *i2c_data);
132 152
133int m5602_write_bridge( 153int m5602_write_bridge(
134 struct sd *sd, u8 address, u8 i2c_data); 154 struct sd *sd, const u8 address, const u8 i2c_data);
135 155
136int m5602_write_sensor(struct sd *sd, const u8 address, 156int m5602_write_sensor(struct sd *sd, const u8 address,
137 u8 *i2c_data, const u8 len); 157 u8 *i2c_data, const u8 len);
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index 1aac2985fee6..8a5bba16ff32 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include "m5602_ov9650.h" 19#include "m5602_ov9650.h"
20#include "m5602_ov7660.h"
20#include "m5602_mt9m111.h" 21#include "m5602_mt9m111.h"
21#include "m5602_po1030.h" 22#include "m5602_po1030.h"
22#include "m5602_s5k83a.h" 23#include "m5602_s5k83a.h"
@@ -35,7 +36,7 @@ static const __devinitdata struct usb_device_id m5602_table[] = {
35MODULE_DEVICE_TABLE(usb, m5602_table); 36MODULE_DEVICE_TABLE(usb, m5602_table);
36 37
37/* Reads a byte from the m5602 */ 38/* Reads a byte from the m5602 */
38int m5602_read_bridge(struct sd *sd, u8 address, u8 *i2c_data) 39int m5602_read_bridge(struct sd *sd, const u8 address, u8 *i2c_data)
39{ 40{
40 int err; 41 int err;
41 struct usb_device *udev = sd->gspca_dev.dev; 42 struct usb_device *udev = sd->gspca_dev.dev;
@@ -56,7 +57,7 @@ int m5602_read_bridge(struct sd *sd, u8 address, u8 *i2c_data)
56} 57}
57 58
58/* Writes a byte to to the m5602 */ 59/* Writes a byte to to the m5602 */
59int m5602_write_bridge(struct sd *sd, u8 address, u8 i2c_data) 60int m5602_write_bridge(struct sd *sd, const u8 address, const u8 i2c_data)
60{ 61{
61 int err; 62 int err;
62 struct usb_device *udev = sd->gspca_dev.dev; 63 struct usb_device *udev = sd->gspca_dev.dev;
@@ -80,6 +81,17 @@ int m5602_write_bridge(struct sd *sd, u8 address, u8 i2c_data)
80 return (err < 0) ? err : 0; 81 return (err < 0) ? err : 0;
81} 82}
82 83
84int m5602_wait_for_i2c(struct sd *sd)
85{
86 int err;
87 u8 data;
88
89 do {
90 err = m5602_read_bridge(sd, M5602_XB_I2C_STATUS, &data);
91 } while ((data & I2C_BUSY) && !err);
92 return err;
93}
94
83int m5602_read_sensor(struct sd *sd, const u8 address, 95int m5602_read_sensor(struct sd *sd, const u8 address,
84 u8 *i2c_data, const u8 len) 96 u8 *i2c_data, const u8 len)
85{ 97{
@@ -88,9 +100,7 @@ int m5602_read_sensor(struct sd *sd, const u8 address,
88 if (!len || len > sd->sensor->i2c_regW) 100 if (!len || len > sd->sensor->i2c_regW)
89 return -EINVAL; 101 return -EINVAL;
90 102
91 do { 103 err = m5602_wait_for_i2c(sd);
92 err = m5602_read_bridge(sd, M5602_XB_I2C_STATUS, i2c_data);
93 } while ((*i2c_data & I2C_BUSY) && !err);
94 if (err < 0) 104 if (err < 0)
95 return err; 105 return err;
96 106
@@ -103,21 +113,25 @@ int m5602_read_sensor(struct sd *sd, const u8 address,
103 if (err < 0) 113 if (err < 0)
104 return err; 114 return err;
105 115
116 /* Sensors with registers that are of only
117 one byte width are differently read */
118
119 /* FIXME: This works with the ov9650, but has issues with the po1030 */
106 if (sd->sensor->i2c_regW == 1) { 120 if (sd->sensor->i2c_regW == 1) {
107 err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, len); 121 err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 1);
108 if (err < 0) 122 if (err < 0)
109 return err; 123 return err;
110 124
111 err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x08); 125 err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x08);
112 if (err < 0)
113 return err;
114 } else { 126 } else {
115 err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x18 + len); 127 err = m5602_write_bridge(sd, M5602_XB_I2C_CTRL, 0x18 + len);
116 if (err < 0)
117 return err;
118 } 128 }
119 129
120 for (i = 0; (i < len) && !err; i++) { 130 for (i = 0; (i < len) && !err; i++) {
131 err = m5602_wait_for_i2c(sd);
132 if (err < 0)
133 return err;
134
121 err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i])); 135 err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i]));
122 136
123 PDEBUG(D_CONF, "Reading sensor register " 137 PDEBUG(D_CONF, "Reading sensor register "
@@ -206,6 +220,11 @@ static int m5602_probe_sensor(struct sd *sd)
206 if (!sd->sensor->probe(sd)) 220 if (!sd->sensor->probe(sd))
207 return 0; 221 return 0;
208 222
223 /* Try the ov7660 */
224 sd->sensor = &ov7660;
225 if (!sd->sensor->probe(sd))
226 return 0;
227
209 /* Try the s5k83a */ 228 /* Try the s5k83a */
210 sd->sensor = &s5k83a; 229 sd->sensor = &s5k83a;
211 if (!sd->sensor->probe(sd)) 230 if (!sd->sensor->probe(sd))
@@ -409,8 +428,9 @@ MODULE_DESCRIPTION(DRIVER_DESC);
409MODULE_LICENSE("GPL"); 428MODULE_LICENSE("GPL");
410module_param(force_sensor, int, S_IRUGO | S_IWUSR); 429module_param(force_sensor, int, S_IRUGO | S_IWUSR);
411MODULE_PARM_DESC(force_sensor, 430MODULE_PARM_DESC(force_sensor,
412 "force detection of sensor, " 431 "forces detection of a sensor, "
413 "1 = OV9650, 2 = S5K83A, 3 = S5K4AA, 4 = MT9M111, 5 = PO1030"); 432 "1 = OV9650, 2 = S5K83A, 3 = S5K4AA, "
433 "4 = MT9M111, 5 = PO1030, 6 = OV7660");
414 434
415module_param(dump_bridge, bool, S_IRUGO | S_IWUSR); 435module_param(dump_bridge, bool, S_IRUGO | S_IWUSR);
416MODULE_PARM_DESC(dump_bridge, "Dumps all usb bridge registers at startup"); 436MODULE_PARM_DESC(dump_bridge, "Dumps all usb bridge registers at startup");
diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.c b/drivers/media/video/gspca/m5602/m5602_mt9m111.c
index 7d3f9e348ef4..8d071dff6944 100644
--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.c
+++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.c
@@ -18,6 +18,23 @@
18 18
19#include "m5602_mt9m111.h" 19#include "m5602_mt9m111.h"
20 20
21static int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
22static int mt9m111_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
23static int mt9m111_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
24static int mt9m111_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
25static int mt9m111_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
26static int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val);
27static int mt9m111_set_auto_white_balance(struct gspca_dev *gspca_dev,
28 __s32 val);
29static int mt9m111_get_auto_white_balance(struct gspca_dev *gspca_dev,
30 __s32 *val);
31static int mt9m111_get_green_balance(struct gspca_dev *gspca_dev, __s32 *val);
32static int mt9m111_set_green_balance(struct gspca_dev *gspca_dev, __s32 val);
33static int mt9m111_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val);
34static int mt9m111_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val);
35static int mt9m111_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val);
36static int mt9m111_set_red_balance(struct gspca_dev *gspca_dev, __s32 val);
37
21static struct v4l2_pix_format mt9m111_modes[] = { 38static struct v4l2_pix_format mt9m111_modes[] = {
22 { 39 {
23 640, 40 640,
@@ -32,6 +49,7 @@ static struct v4l2_pix_format mt9m111_modes[] = {
32}; 49};
33 50
34const static struct ctrl mt9m111_ctrls[] = { 51const static struct ctrl mt9m111_ctrls[] = {
52#define VFLIP_IDX 0
35 { 53 {
36 { 54 {
37 .id = V4L2_CID_VFLIP, 55 .id = V4L2_CID_VFLIP,
@@ -44,7 +62,9 @@ const static struct ctrl mt9m111_ctrls[] = {
44 }, 62 },
45 .set = mt9m111_set_vflip, 63 .set = mt9m111_set_vflip,
46 .get = mt9m111_get_vflip 64 .get = mt9m111_get_vflip
47 }, { 65 },
66#define HFLIP_IDX 1
67 {
48 { 68 {
49 .id = V4L2_CID_HFLIP, 69 .id = V4L2_CID_HFLIP,
50 .type = V4L2_CTRL_TYPE_BOOLEAN, 70 .type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -56,7 +76,9 @@ const static struct ctrl mt9m111_ctrls[] = {
56 }, 76 },
57 .set = mt9m111_set_hflip, 77 .set = mt9m111_set_hflip,
58 .get = mt9m111_get_hflip 78 .get = mt9m111_get_hflip
59 }, { 79 },
80#define GAIN_IDX 2
81 {
60 { 82 {
61 .id = V4L2_CID_GAIN, 83 .id = V4L2_CID_GAIN,
62 .type = V4L2_CTRL_TYPE_INTEGER, 84 .type = V4L2_CTRL_TYPE_INTEGER,
@@ -64,21 +86,80 @@ const static struct ctrl mt9m111_ctrls[] = {
64 .minimum = 0, 86 .minimum = 0,
65 .maximum = (INITIAL_MAX_GAIN - 1) * 2 * 2 * 2, 87 .maximum = (INITIAL_MAX_GAIN - 1) * 2 * 2 * 2,
66 .step = 1, 88 .step = 1,
67 .default_value = DEFAULT_GAIN, 89 .default_value = MT9M111_DEFAULT_GAIN,
68 .flags = V4L2_CTRL_FLAG_SLIDER 90 .flags = V4L2_CTRL_FLAG_SLIDER
69 }, 91 },
70 .set = mt9m111_set_gain, 92 .set = mt9m111_set_gain,
71 .get = mt9m111_get_gain 93 .get = mt9m111_get_gain
72 } 94 },
95#define AUTO_WHITE_BALANCE_IDX 3
96 {
97 {
98 .id = V4L2_CID_AUTO_WHITE_BALANCE,
99 .type = V4L2_CTRL_TYPE_BOOLEAN,
100 .name = "auto white balance",
101 .minimum = 0,
102 .maximum = 1,
103 .step = 1,
104 .default_value = 0,
105 },
106 .set = mt9m111_set_auto_white_balance,
107 .get = mt9m111_get_auto_white_balance
108 },
109#define GREEN_BALANCE_IDX 4
110 {
111 {
112 .id = M5602_V4L2_CID_GREEN_BALANCE,
113 .type = V4L2_CTRL_TYPE_INTEGER,
114 .name = "green balance",
115 .minimum = 0x00,
116 .maximum = 0x7ff,
117 .step = 0x1,
118 .default_value = MT9M111_GREEN_GAIN_DEFAULT,
119 .flags = V4L2_CTRL_FLAG_SLIDER
120 },
121 .set = mt9m111_set_green_balance,
122 .get = mt9m111_get_green_balance
123 },
124#define BLUE_BALANCE_IDX 5
125 {
126 {
127 .id = V4L2_CID_BLUE_BALANCE,
128 .type = V4L2_CTRL_TYPE_INTEGER,
129 .name = "blue balance",
130 .minimum = 0x00,
131 .maximum = 0x7ff,
132 .step = 0x1,
133 .default_value = MT9M111_BLUE_GAIN_DEFAULT,
134 .flags = V4L2_CTRL_FLAG_SLIDER
135 },
136 .set = mt9m111_set_blue_balance,
137 .get = mt9m111_get_blue_balance
138 },
139#define RED_BALANCE_IDX 5
140 {
141 {
142 .id = V4L2_CID_RED_BALANCE,
143 .type = V4L2_CTRL_TYPE_INTEGER,
144 .name = "red balance",
145 .minimum = 0x00,
146 .maximum = 0x7ff,
147 .step = 0x1,
148 .default_value = MT9M111_RED_GAIN_DEFAULT,
149 .flags = V4L2_CTRL_FLAG_SLIDER
150 },
151 .set = mt9m111_set_red_balance,
152 .get = mt9m111_get_red_balance
153 },
73}; 154};
74 155
75
76static void mt9m111_dump_registers(struct sd *sd); 156static void mt9m111_dump_registers(struct sd *sd);
77 157
78int mt9m111_probe(struct sd *sd) 158int mt9m111_probe(struct sd *sd)
79{ 159{
80 u8 data[2] = {0x00, 0x00}; 160 u8 data[2] = {0x00, 0x00};
81 int i; 161 int i;
162 s32 *sensor_settings;
82 163
83 if (force_sensor) { 164 if (force_sensor) {
84 if (force_sensor == MT9M111_SENSOR) { 165 if (force_sensor == MT9M111_SENSOR) {
@@ -117,16 +198,27 @@ int mt9m111_probe(struct sd *sd)
117 return -ENODEV; 198 return -ENODEV;
118 199
119sensor_found: 200sensor_found:
201 sensor_settings = kmalloc(ARRAY_SIZE(mt9m111_ctrls) * sizeof(s32),
202 GFP_KERNEL);
203 if (!sensor_settings)
204 return -ENOMEM;
205
120 sd->gspca_dev.cam.cam_mode = mt9m111_modes; 206 sd->gspca_dev.cam.cam_mode = mt9m111_modes;
121 sd->gspca_dev.cam.nmodes = ARRAY_SIZE(mt9m111_modes); 207 sd->gspca_dev.cam.nmodes = ARRAY_SIZE(mt9m111_modes);
122 sd->desc->ctrls = mt9m111_ctrls; 208 sd->desc->ctrls = mt9m111_ctrls;
123 sd->desc->nctrls = ARRAY_SIZE(mt9m111_ctrls); 209 sd->desc->nctrls = ARRAY_SIZE(mt9m111_ctrls);
210
211 for (i = 0; i < ARRAY_SIZE(mt9m111_ctrls); i++)
212 sensor_settings[i] = mt9m111_ctrls[i].qctrl.default_value;
213 sd->sensor_priv = sensor_settings;
214
124 return 0; 215 return 0;
125} 216}
126 217
127int mt9m111_init(struct sd *sd) 218int mt9m111_init(struct sd *sd)
128{ 219{
129 int i, err = 0; 220 int i, err = 0;
221 s32 *sensor_settings = sd->sensor_priv;
130 222
131 /* Init the sensor */ 223 /* Init the sensor */
132 for (i = 0; i < ARRAY_SIZE(init_mt9m111) && !err; i++) { 224 for (i = 0; i < ARRAY_SIZE(init_mt9m111) && !err; i++) {
@@ -147,36 +239,154 @@ int mt9m111_init(struct sd *sd)
147 if (dump_sensor) 239 if (dump_sensor)
148 mt9m111_dump_registers(sd); 240 mt9m111_dump_registers(sd);
149 241
150 return (err < 0) ? err : 0; 242 err = mt9m111_set_vflip(&sd->gspca_dev, sensor_settings[VFLIP_IDX]);
243 if (err < 0)
244 return err;
245
246 err = mt9m111_set_hflip(&sd->gspca_dev, sensor_settings[HFLIP_IDX]);
247 if (err < 0)
248 return err;
249
250 err = mt9m111_set_green_balance(&sd->gspca_dev,
251 sensor_settings[GREEN_BALANCE_IDX]);
252 if (err < 0)
253 return err;
254
255 err = mt9m111_set_blue_balance(&sd->gspca_dev,
256 sensor_settings[BLUE_BALANCE_IDX]);
257 if (err < 0)
258 return err;
259
260 err = mt9m111_set_red_balance(&sd->gspca_dev,
261 sensor_settings[RED_BALANCE_IDX]);
262 if (err < 0)
263 return err;
264
265 return mt9m111_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]);
151} 266}
152 267
153int mt9m111_power_down(struct sd *sd) 268int mt9m111_start(struct sd *sd)
154{ 269{
155 return 0; 270 int i, err = 0;
271 u8 data[2];
272 struct cam *cam = &sd->gspca_dev.cam;
273 s32 *sensor_settings = sd->sensor_priv;
274
275 int width = cam->cam_mode[sd->gspca_dev.curr_mode].width - 1;
276 int height = cam->cam_mode[sd->gspca_dev.curr_mode].height;
277
278 for (i = 0; i < ARRAY_SIZE(start_mt9m111) && !err; i++) {
279 if (start_mt9m111[i][0] == BRIDGE) {
280 err = m5602_write_bridge(sd,
281 start_mt9m111[i][1],
282 start_mt9m111[i][2]);
283 } else {
284 data[0] = start_mt9m111[i][2];
285 data[1] = start_mt9m111[i][3];
286 err = m5602_write_sensor(sd,
287 start_mt9m111[i][1], data, 2);
288 }
289 }
290 if (err < 0)
291 return err;
292
293 err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height >> 8) & 0xff);
294 if (err < 0)
295 return err;
296
297 err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height & 0xff));
298 if (err < 0)
299 return err;
300
301 for (i = 0; i < 2 && !err; i++)
302 err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0);
303 if (err < 0)
304 return err;
305
306 err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
307 if (err < 0)
308 return err;
309
310 err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 2);
311 if (err < 0)
312 return err;
313
314 for (i = 0; i < 2 && !err; i++)
315 err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, 0);
316 if (err < 0)
317 return err;
318
319 err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA,
320 (width >> 8) & 0xff);
321 if (err < 0)
322 return err;
323
324 err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, width & 0xff);
325 if (err < 0)
326 return err;
327
328 err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
329 if (err < 0)
330 return err;
331
332 switch (width) {
333 case 640:
334 PDEBUG(D_V4L2, "Configuring camera for VGA mode");
335 data[0] = MT9M111_RMB_OVER_SIZED;
336 data[1] = MT9M111_RMB_ROW_SKIP_2X |
337 MT9M111_RMB_COLUMN_SKIP_2X |
338 (sensor_settings[VFLIP_IDX] << 0) |
339 (sensor_settings[HFLIP_IDX] << 1);
340
341 err = m5602_write_sensor(sd,
342 MT9M111_SC_R_MODE_CONTEXT_B, data, 2);
343 break;
344
345 case 320:
346 PDEBUG(D_V4L2, "Configuring camera for QVGA mode");
347 data[0] = MT9M111_RMB_OVER_SIZED;
348 data[1] = MT9M111_RMB_ROW_SKIP_4X |
349 MT9M111_RMB_COLUMN_SKIP_4X |
350 (sensor_settings[VFLIP_IDX] << 0) |
351 (sensor_settings[HFLIP_IDX] << 1);
352 err = m5602_write_sensor(sd,
353 MT9M111_SC_R_MODE_CONTEXT_B, data, 2);
354 break;
355 }
356 return err;
156} 357}
157 358
158int mt9m111_get_vflip(struct gspca_dev *gspca_dev, __s32 *val) 359void mt9m111_disconnect(struct sd *sd)
360{
361 sd->sensor = NULL;
362 kfree(sd->sensor_priv);
363}
364
365static int mt9m111_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
159{ 366{
160 int err;
161 u8 data[2] = {0x00, 0x00};
162 struct sd *sd = (struct sd *) gspca_dev; 367 struct sd *sd = (struct sd *) gspca_dev;
368 s32 *sensor_settings = sd->sensor_priv;
163 369
164 err = m5602_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B, 370 *val = sensor_settings[VFLIP_IDX];
165 data, 2);
166 *val = data[0] & MT9M111_RMB_MIRROR_ROWS;
167 PDEBUG(D_V4L2, "Read vertical flip %d", *val); 371 PDEBUG(D_V4L2, "Read vertical flip %d", *val);
168 372
169 return err; 373 return 0;
170} 374}
171 375
172int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val) 376static int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
173{ 377{
174 int err; 378 int err;
175 u8 data[2] = {0x00, 0x00}; 379 u8 data[2] = {0x00, 0x00};
176 struct sd *sd = (struct sd *) gspca_dev; 380 struct sd *sd = (struct sd *) gspca_dev;
381 s32 *sensor_settings = sd->sensor_priv;
177 382
178 PDEBUG(D_V4L2, "Set vertical flip to %d", val); 383 PDEBUG(D_V4L2, "Set vertical flip to %d", val);
179 384
385 sensor_settings[VFLIP_IDX] = val;
386
387 /* The mt9m111 is flipped by default */
388 val = !val;
389
180 /* Set the correct page map */ 390 /* Set the correct page map */
181 err = m5602_write_sensor(sd, MT9M111_PAGE_MAP, data, 2); 391 err = m5602_write_sensor(sd, MT9M111_PAGE_MAP, data, 2);
182 if (err < 0) 392 if (err < 0)
@@ -186,34 +396,37 @@ int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
186 if (err < 0) 396 if (err < 0)
187 return err; 397 return err;
188 398
189 data[0] = (data[0] & 0xfe) | val; 399 data[1] = (data[1] & 0xfe) | val;
190 err = m5602_write_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B, 400 err = m5602_write_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B,
191 data, 2); 401 data, 2);
192 return err; 402 return err;
193} 403}
194 404
195int mt9m111_get_hflip(struct gspca_dev *gspca_dev, __s32 *val) 405static int mt9m111_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
196{ 406{
197 int err;
198 u8 data[2] = {0x00, 0x00};
199 struct sd *sd = (struct sd *) gspca_dev; 407 struct sd *sd = (struct sd *) gspca_dev;
408 s32 *sensor_settings = sd->sensor_priv;
200 409
201 err = m5602_read_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B, 410 *val = sensor_settings[HFLIP_IDX];
202 data, 2);
203 *val = data[0] & MT9M111_RMB_MIRROR_COLS;
204 PDEBUG(D_V4L2, "Read horizontal flip %d", *val); 411 PDEBUG(D_V4L2, "Read horizontal flip %d", *val);
205 412
206 return err; 413 return 0;
207} 414}
208 415
209int mt9m111_set_hflip(struct gspca_dev *gspca_dev, __s32 val) 416static int mt9m111_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
210{ 417{
211 int err; 418 int err;
212 u8 data[2] = {0x00, 0x00}; 419 u8 data[2] = {0x00, 0x00};
213 struct sd *sd = (struct sd *) gspca_dev; 420 struct sd *sd = (struct sd *) gspca_dev;
421 s32 *sensor_settings = sd->sensor_priv;
214 422
215 PDEBUG(D_V4L2, "Set horizontal flip to %d", val); 423 PDEBUG(D_V4L2, "Set horizontal flip to %d", val);
216 424
425 sensor_settings[HFLIP_IDX] = val;
426
427 /* The mt9m111 is flipped by default */
428 val = !val;
429
217 /* Set the correct page map */ 430 /* Set the correct page map */
218 err = m5602_write_sensor(sd, MT9M111_PAGE_MAP, data, 2); 431 err = m5602_write_sensor(sd, MT9M111_PAGE_MAP, data, 2);
219 if (err < 0) 432 if (err < 0)
@@ -223,36 +436,62 @@ int mt9m111_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
223 if (err < 0) 436 if (err < 0)
224 return err; 437 return err;
225 438
226 data[0] = (data[0] & 0xfd) | ((val << 1) & 0x02); 439 data[1] = (data[1] & 0xfd) | ((val << 1) & 0x02);
227 err = m5602_write_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B, 440 err = m5602_write_sensor(sd, MT9M111_SC_R_MODE_CONTEXT_B,
228 data, 2); 441 data, 2);
229 return err; 442 return err;
230} 443}
231 444
232int mt9m111_get_gain(struct gspca_dev *gspca_dev, __s32 *val) 445static int mt9m111_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
233{ 446{
234 int err, tmp;
235 u8 data[2] = {0x00, 0x00};
236 struct sd *sd = (struct sd *) gspca_dev; 447 struct sd *sd = (struct sd *) gspca_dev;
448 s32 *sensor_settings = sd->sensor_priv;
237 449
238 err = m5602_read_sensor(sd, MT9M111_SC_GLOBAL_GAIN, data, 2); 450 *val = sensor_settings[GAIN_IDX];
239 tmp = ((data[1] << 8) | data[0]); 451 PDEBUG(D_V4L2, "Read gain %d", *val);
240 452
241 *val = ((tmp & (1 << 10)) * 2) | 453 return 0;
242 ((tmp & (1 << 9)) * 2) | 454}
243 ((tmp & (1 << 8)) * 2) |
244 (tmp & 0x7f);
245 455
246 PDEBUG(D_V4L2, "Read gain %d", *val); 456static int mt9m111_set_auto_white_balance(struct gspca_dev *gspca_dev,
457 __s32 val)
458{
459 struct sd *sd = (struct sd *) gspca_dev;
460 s32 *sensor_settings = sd->sensor_priv;
461 int err;
462 u8 data[2];
463
464 err = m5602_read_sensor(sd, MT9M111_CP_OPERATING_MODE_CTL, data, 2);
465 if (err < 0)
466 return err;
467
468 sensor_settings[AUTO_WHITE_BALANCE_IDX] = val & 0x01;
469 data[1] = ((data[1] & 0xfd) | ((val & 0x01) << 1));
247 470
471 err = m5602_write_sensor(sd, MT9M111_CP_OPERATING_MODE_CTL, data, 2);
472
473 PDEBUG(D_V4L2, "Set auto white balance %d", val);
248 return err; 474 return err;
249} 475}
250 476
251int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val) 477static int mt9m111_get_auto_white_balance(struct gspca_dev *gspca_dev,
478 __s32 *val) {
479 struct sd *sd = (struct sd *) gspca_dev;
480 s32 *sensor_settings = sd->sensor_priv;
481
482 *val = sensor_settings[AUTO_WHITE_BALANCE_IDX];
483 PDEBUG(D_V4L2, "Read auto white balance %d", *val);
484 return 0;
485}
486
487static int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val)
252{ 488{
253 int err, tmp; 489 int err, tmp;
254 u8 data[2] = {0x00, 0x00}; 490 u8 data[2] = {0x00, 0x00};
255 struct sd *sd = (struct sd *) gspca_dev; 491 struct sd *sd = (struct sd *) gspca_dev;
492 s32 *sensor_settings = sd->sensor_priv;
493
494 sensor_settings[GAIN_IDX] = val;
256 495
257 /* Set the correct page map */ 496 /* Set the correct page map */
258 err = m5602_write_sensor(sd, MT9M111_PAGE_MAP, data, 2); 497 err = m5602_write_sensor(sd, MT9M111_PAGE_MAP, data, 2);
@@ -275,8 +514,8 @@ int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val)
275 else 514 else
276 tmp = val; 515 tmp = val;
277 516
278 data[1] = (tmp & 0xff00) >> 8; 517 data[1] = (tmp & 0xff);
279 data[0] = (tmp & 0xff); 518 data[0] = (tmp & 0xff00) >> 8;
280 PDEBUG(D_V4L2, "tmp=%d, data[1]=%d, data[0]=%d", tmp, 519 PDEBUG(D_V4L2, "tmp=%d, data[1]=%d, data[0]=%d", tmp,
281 data[1], data[0]); 520 data[1], data[0]);
282 521
@@ -286,6 +525,89 @@ int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val)
286 return err; 525 return err;
287} 526}
288 527
528static int mt9m111_set_green_balance(struct gspca_dev *gspca_dev, __s32 val)
529{
530 int err;
531 u8 data[2];
532 struct sd *sd = (struct sd *) gspca_dev;
533 s32 *sensor_settings = sd->sensor_priv;
534
535 sensor_settings[GREEN_BALANCE_IDX] = val;
536 data[1] = (val & 0xff);
537 data[0] = (val & 0xff00) >> 8;
538
539 PDEBUG(D_V4L2, "Set green balance %d", val);
540 err = m5602_write_sensor(sd, MT9M111_SC_GREEN_1_GAIN,
541 data, 2);
542 if (err < 0)
543 return err;
544
545 return m5602_write_sensor(sd, MT9M111_SC_GREEN_2_GAIN,
546 data, 2);
547}
548
549static int mt9m111_get_green_balance(struct gspca_dev *gspca_dev, __s32 *val)
550{
551 struct sd *sd = (struct sd *) gspca_dev;
552 s32 *sensor_settings = sd->sensor_priv;
553
554 *val = sensor_settings[GREEN_BALANCE_IDX];
555 PDEBUG(D_V4L2, "Read green balance %d", *val);
556 return 0;
557}
558
559static int mt9m111_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
560{
561 u8 data[2];
562 struct sd *sd = (struct sd *) gspca_dev;
563 s32 *sensor_settings = sd->sensor_priv;
564
565 sensor_settings[BLUE_BALANCE_IDX] = val;
566 data[1] = (val & 0xff);
567 data[0] = (val & 0xff00) >> 8;
568
569 PDEBUG(D_V4L2, "Set blue balance %d", val);
570
571 return m5602_write_sensor(sd, MT9M111_SC_BLUE_GAIN,
572 data, 2);
573}
574
575static int mt9m111_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
576{
577 struct sd *sd = (struct sd *) gspca_dev;
578 s32 *sensor_settings = sd->sensor_priv;
579
580 *val = sensor_settings[BLUE_BALANCE_IDX];
581 PDEBUG(D_V4L2, "Read blue balance %d", *val);
582 return 0;
583}
584
585static int mt9m111_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
586{
587 u8 data[2];
588 struct sd *sd = (struct sd *) gspca_dev;
589 s32 *sensor_settings = sd->sensor_priv;
590
591 sensor_settings[RED_BALANCE_IDX] = val;
592 data[1] = (val & 0xff);
593 data[0] = (val & 0xff00) >> 8;
594
595 PDEBUG(D_V4L2, "Set red balance %d", val);
596
597 return m5602_write_sensor(sd, MT9M111_SC_RED_GAIN,
598 data, 2);
599}
600
601static int mt9m111_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
602{
603 struct sd *sd = (struct sd *) gspca_dev;
604 s32 *sensor_settings = sd->sensor_priv;
605
606 *val = sensor_settings[RED_BALANCE_IDX];
607 PDEBUG(D_V4L2, "Read red balance %d", *val);
608 return 0;
609}
610
289static void mt9m111_dump_registers(struct sd *sd) 611static void mt9m111_dump_registers(struct sd *sd)
290{ 612{
291 u8 address, value[2] = {0x00, 0x00}; 613 u8 address, value[2] = {0x00, 0x00};
diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.h b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
index 00c6db02bdb7..b3de77823091 100644
--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.h
+++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
@@ -37,7 +37,6 @@
37#define MT9M111_SC_VBLANK_CONTEXT_A 0x08 37#define MT9M111_SC_VBLANK_CONTEXT_A 0x08
38#define MT9M111_SC_SHUTTER_WIDTH 0x09 38#define MT9M111_SC_SHUTTER_WIDTH 0x09
39#define MT9M111_SC_ROW_SPEED 0x0a 39#define MT9M111_SC_ROW_SPEED 0x0a
40
41#define MT9M111_SC_EXTRA_DELAY 0x0b 40#define MT9M111_SC_EXTRA_DELAY 0x0b
42#define MT9M111_SC_SHUTTER_DELAY 0x0c 41#define MT9M111_SC_SHUTTER_DELAY 0x0c
43#define MT9M111_SC_RESET 0x0d 42#define MT9M111_SC_RESET 0x0d
@@ -50,9 +49,6 @@
50#define MT9M111_SC_GREEN_2_GAIN 0x2e 49#define MT9M111_SC_GREEN_2_GAIN 0x2e
51#define MT9M111_SC_GLOBAL_GAIN 0x2f 50#define MT9M111_SC_GLOBAL_GAIN 0x2f
52 51
53#define MT9M111_RMB_MIRROR_ROWS (1 << 0)
54#define MT9M111_RMB_MIRROR_COLS (1 << 1)
55
56#define MT9M111_CONTEXT_CONTROL 0xc8 52#define MT9M111_CONTEXT_CONTROL 0xc8
57#define MT9M111_PAGE_MAP 0xf0 53#define MT9M111_PAGE_MAP 0xf0
58#define MT9M111_BYTEWISE_ADDRESS 0xf1 54#define MT9M111_BYTEWISE_ADDRESS 0xf1
@@ -74,8 +70,37 @@
74#define MT9M111_COLORPIPE 0x01 70#define MT9M111_COLORPIPE 0x01
75#define MT9M111_CAMERA_CONTROL 0x02 71#define MT9M111_CAMERA_CONTROL 0x02
76 72
73#define MT9M111_RESET (1 << 0)
74#define MT9M111_RESTART (1 << 1)
75#define MT9M111_ANALOG_STANDBY (1 << 2)
76#define MT9M111_CHIP_ENABLE (1 << 3)
77#define MT9M111_CHIP_DISABLE (0 << 3)
78#define MT9M111_OUTPUT_DISABLE (1 << 4)
79#define MT9M111_SHOW_BAD_FRAMES (1 << 0)
80#define MT9M111_RESTART_BAD_FRAMES (1 << 1)
81#define MT9M111_SYNCHRONIZE_CHANGES (1 << 7)
82
83#define MT9M111_RMB_OVER_SIZED (1 << 0)
84#define MT9M111_RMB_MIRROR_ROWS (1 << 0)
85#define MT9M111_RMB_MIRROR_COLS (1 << 1)
86#define MT9M111_RMB_ROW_SKIP_2X (1 << 2)
87#define MT9M111_RMB_COLUMN_SKIP_2X (1 << 3)
88#define MT9M111_RMB_ROW_SKIP_4X (1 << 4)
89#define MT9M111_RMB_COLUMN_SKIP_4X (1 << 5)
90
91#define MT9M111_COLOR_MATRIX_BYPASS (1 << 4)
92#define MT9M111_SEL_CONTEXT_B (1 << 3)
93
94#define MT9M111_TRISTATE_PIN_IN_STANDBY (1 << 1)
95#define MT9M111_SOC_SOFT_STANDBY (1 << 0)
96
97#define MT9M111_2D_DEFECT_CORRECTION_ENABLE (1 << 0)
98
77#define INITIAL_MAX_GAIN 64 99#define INITIAL_MAX_GAIN 64
78#define DEFAULT_GAIN 283 100#define MT9M111_DEFAULT_GAIN 283
101#define MT9M111_GREEN_GAIN_DEFAULT 0x20
102#define MT9M111_BLUE_GAIN_DEFAULT 0x20
103#define MT9M111_RED_GAIN_DEFAULT 0x20
79 104
80/*****************************************************************************/ 105/*****************************************************************************/
81 106
@@ -85,16 +110,10 @@ extern int dump_sensor;
85 110
86int mt9m111_probe(struct sd *sd); 111int mt9m111_probe(struct sd *sd);
87int mt9m111_init(struct sd *sd); 112int mt9m111_init(struct sd *sd);
88int mt9m111_power_down(struct sd *sd); 113int mt9m111_start(struct sd *sd);
114void mt9m111_disconnect(struct sd *sd);
89 115
90int mt9m111_set_vflip(struct gspca_dev *gspca_dev, __s32 val); 116static const struct m5602_sensor mt9m111 = {
91int mt9m111_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
92int mt9m111_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
93int mt9m111_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
94int mt9m111_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
95int mt9m111_set_gain(struct gspca_dev *gspca_dev, __s32 val);
96
97const static struct m5602_sensor mt9m111 = {
98 .name = "MT9M111", 117 .name = "MT9M111",
99 118
100 .i2c_slave_id = 0xba, 119 .i2c_slave_id = 0xba,
@@ -102,7 +121,8 @@ const static struct m5602_sensor mt9m111 = {
102 121
103 .probe = mt9m111_probe, 122 .probe = mt9m111_probe,
104 .init = mt9m111_init, 123 .init = mt9m111_init,
105 .power_down = mt9m111_power_down 124 .disconnect = mt9m111_disconnect,
125 .start = mt9m111_start,
106}; 126};
107 127
108static const unsigned char preinit_mt9m111[][4] = 128static const unsigned char preinit_mt9m111[][4] =
@@ -117,7 +137,14 @@ static const unsigned char preinit_mt9m111[][4] =
117 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00}, 137 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
118 138
119 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00}, 139 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
120 {SENSOR, MT9M111_SC_RESET, 0xff, 0xf7}, 140 {SENSOR, MT9M111_SC_RESET,
141 MT9M111_RESET |
142 MT9M111_RESTART |
143 MT9M111_ANALOG_STANDBY |
144 MT9M111_CHIP_DISABLE,
145 MT9M111_SHOW_BAD_FRAMES |
146 MT9M111_RESTART_BAD_FRAMES |
147 MT9M111_SYNCHRONIZE_CHANGES},
121 148
122 {BRIDGE, M5602_XB_GPIO_DIR, 0x05, 0x00}, 149 {BRIDGE, M5602_XB_GPIO_DIR, 0x05, 0x00},
123 {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00}, 150 {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
@@ -145,731 +172,42 @@ static const unsigned char init_mt9m111[][4] =
145 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, 172 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
146 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, 173 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
147 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00}, 174 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
148 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d, 0x00},
149 {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00, 0x00},
150 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
151 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00}, 175 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
152 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
153 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
154
155 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
156 {SENSOR, MT9M111_SC_RESET, 0xff, 0xff},
157 {SENSOR, MT9M111_SC_RESET, 0xff, 0xff},
158 {SENSOR, MT9M111_SC_RESET, 0xff, 0xde},
159 {SENSOR, MT9M111_SC_RESET, 0xff, 0xff},
160 {SENSOR, MT9M111_SC_RESET, 0xff, 0xf7},
161 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
162 176
163 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xb3, 0x00},
164
165 {SENSOR, MT9M111_CP_GLOBAL_CLK_CONTROL, 0xff, 0xff},
166
167 {BRIDGE, M5602_XB_GPIO_DIR, 0x05, 0x00},
168 {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
169 {BRIDGE, M5602_XB_GPIO_EN_H, 0x3e, 0x00},
170 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3e, 0x00},
171 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02, 0x00},
172 {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
173 {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
174 {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
175 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04, 0x00},
176 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
177 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
178 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
179 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
180 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
181 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
182 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
183 {BRIDGE, M5602_XB_GPIO_DIR, 0x07, 0x00},
184 {BRIDGE, M5602_XB_GPIO_DAT, 0x0b, 0x00},
185 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00}, 177 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
186 {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00}, 178 {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
187 {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00},
188
189 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
190 {SENSOR, MT9M111_SC_RESET, 0x00, 0x05},
191 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
192 {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
193 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
194 {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
195 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
196 {SENSOR, MT9M111_CP_OPERATING_MODE_CTL, 0x00, 0x10},
197 {SENSOR, MT9M111_CP_LENS_CORRECTION_1, 0x04, 0x2a},
198 {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_A, 0x00, 0x01},
199 {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_B, 0x00, 0x01},
200 {SENSOR, MT9M111_CP_LUMA_OFFSET, 0x00, 0x00},
201 {SENSOR, MT9M111_CP_LUMA_CLIP, 0xff, 0x00},
202 {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_A, 0x14, 0x00},
203 {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_B, 0x14, 0x00},
204 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xcd, 0x00},
205
206 {SENSOR, 0xcd, 0x00, 0x0e},
207 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xd0, 0x00},
208 {SENSOR, 0xd0, 0x00, 0x40},
209 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x02},
210 {SENSOR, MT9M111_CC_AUTO_EXPOSURE_PARAMETER_18, 0x00, 0x00},
211 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
212 {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x07},
213 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
214 {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x03},
215 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
216 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
217 {SENSOR, 0x33, 0x03, 0x49},
218 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
219
220 {SENSOR, 0x33, 0x03, 0x49},
221 {SENSOR, 0x34, 0xc0, 0x19},
222 {SENSOR, 0x3f, 0x20, 0x20},
223 {SENSOR, 0x40, 0x20, 0x20},
224 {SENSOR, 0x5a, 0xc0, 0x0a},
225 {SENSOR, 0x70, 0x7b, 0x0a},
226 {SENSOR, 0x71, 0xff, 0x00},
227 {SENSOR, 0x72, 0x19, 0x0e},
228 {SENSOR, 0x73, 0x18, 0x0f},
229 {SENSOR, 0x74, 0x57, 0x32},
230 {SENSOR, 0x75, 0x56, 0x34},
231 {SENSOR, 0x76, 0x73, 0x35},
232 {SENSOR, 0x77, 0x30, 0x12},
233 {SENSOR, 0x78, 0x79, 0x02},
234 {SENSOR, 0x79, 0x75, 0x06},
235 {SENSOR, 0x7a, 0x77, 0x0a},
236 {SENSOR, 0x7b, 0x78, 0x09},
237 {SENSOR, 0x7c, 0x7d, 0x06},
238 {SENSOR, 0x7d, 0x31, 0x10},
239 {SENSOR, 0x7e, 0x00, 0x7e},
240 {SENSOR, 0x80, 0x59, 0x04},
241 {SENSOR, 0x81, 0x59, 0x04},
242 {SENSOR, 0x82, 0x57, 0x0a},
243 {SENSOR, 0x83, 0x58, 0x0b},
244 {SENSOR, 0x84, 0x47, 0x0c},
245 {SENSOR, 0x85, 0x48, 0x0e},
246 {SENSOR, 0x86, 0x5b, 0x02},
247 {SENSOR, 0x87, 0x00, 0x5c},
248 {SENSOR, MT9M111_CONTEXT_CONTROL, 0x00, 0x08},
249 {SENSOR, 0x60, 0x00, 0x80},
250 {SENSOR, 0x61, 0x00, 0x00},
251 {SENSOR, 0x62, 0x00, 0x00},
252 {SENSOR, 0x63, 0x00, 0x00},
253 {SENSOR, 0x64, 0x00, 0x00},
254
255 {SENSOR, MT9M111_SC_ROWSTART, 0x00, 0x0d},
256 {SENSOR, MT9M111_SC_COLSTART, 0x00, 0x18},
257 {SENSOR, MT9M111_SC_WINDOW_HEIGHT, 0x04, 0x04},
258 {SENSOR, MT9M111_SC_WINDOW_WIDTH, 0x05, 0x08},
259 {SENSOR, MT9M111_SC_HBLANK_CONTEXT_B, 0x01, 0x38},
260 {SENSOR, MT9M111_SC_VBLANK_CONTEXT_B, 0x00, 0x11},
261 {SENSOR, MT9M111_SC_HBLANK_CONTEXT_A, 0x01, 0x38},
262 {SENSOR, MT9M111_SC_VBLANK_CONTEXT_A, 0x00, 0x11},
263 {SENSOR, MT9M111_SC_R_MODE_CONTEXT_B, 0x01, 0x03},
264 {SENSOR, MT9M111_SC_R_MODE_CONTEXT_A, 0x01, 0x03},
265 {SENSOR, 0x30, 0x04, 0x00},
266
267 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
268 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
269 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
270 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
271 {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
272 {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
273 {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
274 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
275 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
276 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
277 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
278 {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
279 {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
280 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
281 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
282 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
283 {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
284 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
285 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
286 {BRIDGE, M5602_XB_HSYNC_PARA, 0x05, 0x00},
287 {BRIDGE, M5602_XB_HSYNC_PARA, 0x07, 0x00},
288 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
289 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
290 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xa0, 0x00},
291 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
292 {SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0xf4},
293 {SENSOR, MT9M111_SC_GLOBAL_GAIN, 0x00, 0xea},
294
295 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
296 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
297 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
298 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
299 {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
300 {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
301 {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
302 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
303 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
304 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
305 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
306 {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
307 {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
308 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
309 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
310 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
311 {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
312 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
313 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
314 {BRIDGE, M5602_XB_HSYNC_PARA, 0x05, 0x00},
315 {BRIDGE, M5602_XB_HSYNC_PARA, 0x07, 0x00},
316 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
317 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
318 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
319
320 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
321 {SENSOR, MT9M111_SC_RESET, 0x00, 0x09},
322 {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
323 {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
324 {SENSOR, MT9M111_SC_RESET, 0x00, 0x0c},
325 {SENSOR, MT9M111_SC_RESET, 0x00, 0x04},
326 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
327 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xb3, 0x00},
328 {SENSOR, MT9M111_CP_GLOBAL_CLK_CONTROL, 0x00, 0x03},
329 {BRIDGE, M5602_XB_GPIO_DIR, 0x05, 0x00},
330 {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00}, 179 {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
331 {BRIDGE, M5602_XB_GPIO_EN_H, 0x3e, 0x00},
332 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3e, 0x00}, 180 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3e, 0x00},
333 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02, 0x00},
334 {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
335 {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00}, 181 {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
336 {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
337 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04, 0x00},
338 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
339 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
340 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
341 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
342 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
343 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
344 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
345 {BRIDGE, M5602_XB_GPIO_DIR, 0x07, 0x00},
346 {BRIDGE, M5602_XB_GPIO_DAT, 0x0b, 0x00},
347 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
348 {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
349 {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00},
350
351 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
352 {SENSOR, MT9M111_SC_RESET, 0x00, 0x05},
353 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
354 {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
355 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
356 {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
357 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
358 {SENSOR, MT9M111_CP_OPERATING_MODE_CTL, 0x00, 0x10},
359 {SENSOR, MT9M111_CP_LENS_CORRECTION_1, 0x04, 0x2a},
360 {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_A, 0x00, 0x01},
361 {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_B, 0x00, 0x01},
362 {SENSOR, MT9M111_CP_LUMA_OFFSET, 0x00, 0x00},
363 {SENSOR, MT9M111_CP_LUMA_CLIP, 0xff, 0x00},
364 {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_A, 0x14, 0x00},
365 {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_B, 0x14, 0x00},
366
367 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xcd, 0x00},
368 {SENSOR, 0xcd, 0x00, 0x0e},
369 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xd0, 0x00},
370 {SENSOR, 0xd0, 0x00, 0x40},
371 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x02},
372 {SENSOR, MT9M111_CC_AUTO_EXPOSURE_PARAMETER_18, 0x00, 0x00},
373 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
374 {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x07},
375 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
376 {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x03},
377 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
378 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
379 {SENSOR, 0x33, 0x03, 0x49},
380 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
381
382 {SENSOR, 0x33, 0x03, 0x49},
383 {SENSOR, 0x34, 0xc0, 0x19},
384 {SENSOR, 0x3f, 0x20, 0x20},
385 {SENSOR, 0x40, 0x20, 0x20},
386 {SENSOR, 0x5a, 0xc0, 0x0a},
387 {SENSOR, 0x70, 0x7b, 0x0a},
388 {SENSOR, 0x71, 0xff, 0x00},
389 {SENSOR, 0x72, 0x19, 0x0e},
390 {SENSOR, 0x73, 0x18, 0x0f},
391 {SENSOR, 0x74, 0x57, 0x32},
392 {SENSOR, 0x75, 0x56, 0x34},
393 {SENSOR, 0x76, 0x73, 0x35},
394 {SENSOR, 0x77, 0x30, 0x12},
395 {SENSOR, 0x78, 0x79, 0x02},
396 {SENSOR, 0x79, 0x75, 0x06},
397 {SENSOR, 0x7a, 0x77, 0x0a},
398 {SENSOR, 0x7b, 0x78, 0x09},
399 {SENSOR, 0x7c, 0x7d, 0x06},
400 {SENSOR, 0x7d, 0x31, 0x10},
401 {SENSOR, 0x7e, 0x00, 0x7e},
402 {SENSOR, 0x80, 0x59, 0x04},
403 {SENSOR, 0x81, 0x59, 0x04},
404 {SENSOR, 0x82, 0x57, 0x0a},
405 {SENSOR, 0x83, 0x58, 0x0b},
406 {SENSOR, 0x84, 0x47, 0x0c},
407 {SENSOR, 0x85, 0x48, 0x0e},
408 {SENSOR, 0x86, 0x5b, 0x02},
409 {SENSOR, 0x87, 0x00, 0x5c},
410 {SENSOR, MT9M111_CONTEXT_CONTROL, 0x00, 0x08},
411 {SENSOR, 0x60, 0x00, 0x80},
412 {SENSOR, 0x61, 0x00, 0x00},
413 {SENSOR, 0x62, 0x00, 0x00},
414 {SENSOR, 0x63, 0x00, 0x00},
415 {SENSOR, 0x64, 0x00, 0x00},
416
417 {SENSOR, MT9M111_SC_ROWSTART, 0x00, 0x0d},
418 {SENSOR, MT9M111_SC_COLSTART, 0x00, 0x18},
419 {SENSOR, MT9M111_SC_WINDOW_HEIGHT, 0x04, 0x04},
420 {SENSOR, MT9M111_SC_WINDOW_WIDTH, 0x05, 0x08},
421 {SENSOR, MT9M111_SC_HBLANK_CONTEXT_B, 0x01, 0x38},
422 {SENSOR, MT9M111_SC_VBLANK_CONTEXT_B, 0x00, 0x11},
423 {SENSOR, MT9M111_SC_HBLANK_CONTEXT_A, 0x01, 0x38},
424 {SENSOR, MT9M111_SC_VBLANK_CONTEXT_A, 0x00, 0x11},
425 {SENSOR, MT9M111_SC_R_MODE_CONTEXT_B, 0x01, 0x03},
426 {SENSOR, MT9M111_SC_R_MODE_CONTEXT_A, 0x01, 0x03},
427 {SENSOR, 0x30, 0x04, 0x00},
428
429 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
430 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
431 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
432 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
433 {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
434 {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
435 {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
436 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
437 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
438 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
439 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
440 {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
441 {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
442 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
443 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
444 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
445 {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
446 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
447 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
448 {BRIDGE, M5602_XB_HSYNC_PARA, 0x05, 0x00},
449 {BRIDGE, M5602_XB_HSYNC_PARA, 0x07, 0x00},
450 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
451 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
452 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xa0, 0x00},
453 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
454 {SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0xf4},
455 {SENSOR, MT9M111_SC_GLOBAL_GAIN, 0x00, 0xea},
456 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
457 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
458
459 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
460 {SENSOR, MT9M111_SC_RESET, 0x00, 0x09},
461 {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
462 {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
463 {SENSOR, MT9M111_SC_RESET, 0x00, 0x0c},
464 {SENSOR, MT9M111_SC_RESET, 0x00, 0x04},
465 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
466
467 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xb3, 0x00},
468 {SENSOR, MT9M111_CP_GLOBAL_CLK_CONTROL, 0x00, 0x03},
469 {BRIDGE, M5602_XB_GPIO_DIR, 0x05, 0x00},
470 {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
471 {BRIDGE, M5602_XB_GPIO_EN_H, 0x3e, 0x00},
472 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3e, 0x00},
473 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02, 0x00}, 182 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02, 0x00},
474 {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
475 {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
476 {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00}, 183 {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
477 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04, 0x00},
478 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
479 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
480 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
481 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
482 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
483 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
484 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
485 {BRIDGE, M5602_XB_GPIO_DIR, 0x07, 0x00}, 184 {BRIDGE, M5602_XB_GPIO_DIR, 0x07, 0x00},
486 {BRIDGE, M5602_XB_GPIO_DAT, 0x0b, 0x00}, 185 {BRIDGE, M5602_XB_GPIO_DAT, 0x0b, 0x00},
487 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
488 {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
489 {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00}, 186 {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00},
490 187
491 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
492 {SENSOR, MT9M111_SC_RESET, 0x00, 0x05},
493 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
494 {SENSOR, MT9M111_SC_RESET, 0x00, 0x29}, 188 {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
495 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00}, 189 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
496 {SENSOR, MT9M111_SC_RESET, 0x00, 0x08}, 190 {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
497 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01}, 191 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
498 {SENSOR, MT9M111_CP_OPERATING_MODE_CTL, 0x00, 0x10}, 192 {SENSOR, MT9M111_CP_OPERATING_MODE_CTL, 0x00,
193 MT9M111_CP_OPERATING_MODE_CTL},
499 {SENSOR, MT9M111_CP_LENS_CORRECTION_1, 0x04, 0x2a}, 194 {SENSOR, MT9M111_CP_LENS_CORRECTION_1, 0x04, 0x2a},
500 {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_A, 0x00, 0x01}, 195 {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_A, 0x00,
501 {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_B, 0x00, 0x01}, 196 MT9M111_2D_DEFECT_CORRECTION_ENABLE},
197 {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_B, 0x00,
198 MT9M111_2D_DEFECT_CORRECTION_ENABLE},
502 {SENSOR, MT9M111_CP_LUMA_OFFSET, 0x00, 0x00}, 199 {SENSOR, MT9M111_CP_LUMA_OFFSET, 0x00, 0x00},
503 {SENSOR, MT9M111_CP_LUMA_CLIP, 0xff, 0x00}, 200 {SENSOR, MT9M111_CP_LUMA_CLIP, 0xff, 0x00},
504 {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_A, 0x14, 0x00}, 201 {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_A, 0x14, 0x00},
505 {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_B, 0x14, 0x00}, 202 {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_B, 0x14, 0x00},
506
507 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xcd, 0x00},
508 {SENSOR, 0xcd, 0x00, 0x0e}, 203 {SENSOR, 0xcd, 0x00, 0x0e},
509 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xd0, 0x00},
510 {SENSOR, 0xd0, 0x00, 0x40}, 204 {SENSOR, 0xd0, 0x00, 0x40},
511 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x02},
512 {SENSOR, MT9M111_CC_AUTO_EXPOSURE_PARAMETER_18, 0x00, 0x00},
513 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
514 {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x07},
515 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
516 {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x03},
517 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
518 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
519 {SENSOR, 0x33, 0x03, 0x49},
520 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
521
522 {SENSOR, 0x33, 0x03, 0x49},
523 {SENSOR, 0x34, 0xc0, 0x19},
524 {SENSOR, 0x3f, 0x20, 0x20},
525 {SENSOR, 0x40, 0x20, 0x20},
526 {SENSOR, 0x5a, 0xc0, 0x0a},
527 {SENSOR, 0x70, 0x7b, 0x0a},
528 {SENSOR, 0x71, 0xff, 0x00},
529 {SENSOR, 0x72, 0x19, 0x0e},
530 {SENSOR, 0x73, 0x18, 0x0f},
531 {SENSOR, 0x74, 0x57, 0x32},
532 {SENSOR, 0x75, 0x56, 0x34},
533 {SENSOR, 0x76, 0x73, 0x35},
534 {SENSOR, 0x77, 0x30, 0x12},
535 {SENSOR, 0x78, 0x79, 0x02},
536 {SENSOR, 0x79, 0x75, 0x06},
537 {SENSOR, 0x7a, 0x77, 0x0a},
538 {SENSOR, 0x7b, 0x78, 0x09},
539 {SENSOR, 0x7c, 0x7d, 0x06},
540 {SENSOR, 0x7d, 0x31, 0x10},
541 {SENSOR, 0x7e, 0x00, 0x7e},
542 {SENSOR, 0x80, 0x59, 0x04},
543 {SENSOR, 0x81, 0x59, 0x04},
544 {SENSOR, 0x82, 0x57, 0x0a},
545 {SENSOR, 0x83, 0x58, 0x0b},
546 {SENSOR, 0x84, 0x47, 0x0c},
547 {SENSOR, 0x85, 0x48, 0x0e},
548 {SENSOR, 0x86, 0x5b, 0x02},
549 {SENSOR, 0x87, 0x00, 0x5c},
550 {SENSOR, MT9M111_CONTEXT_CONTROL, 0x00, 0x08},
551 {SENSOR, 0x60, 0x00, 0x80},
552 {SENSOR, 0x61, 0x00, 0x00},
553 {SENSOR, 0x62, 0x00, 0x00},
554 {SENSOR, 0x63, 0x00, 0x00},
555 {SENSOR, 0x64, 0x00, 0x00},
556 205
557 {SENSOR, MT9M111_SC_ROWSTART, 0x00, 0x0d},
558 {SENSOR, MT9M111_SC_COLSTART, 0x00, 0x18},
559 {SENSOR, MT9M111_SC_WINDOW_HEIGHT, 0x04, 0x04},
560 {SENSOR, MT9M111_SC_WINDOW_WIDTH, 0x05, 0x08},
561 {SENSOR, MT9M111_SC_HBLANK_CONTEXT_B, 0x01, 0x38},
562 {SENSOR, MT9M111_SC_VBLANK_CONTEXT_B, 0x00, 0x11},
563 {SENSOR, MT9M111_SC_HBLANK_CONTEXT_A, 0x01, 0x38},
564 {SENSOR, MT9M111_SC_VBLANK_CONTEXT_A, 0x00, 0x11},
565 {SENSOR, MT9M111_SC_R_MODE_CONTEXT_B, 0x01, 0x03},
566 {SENSOR, MT9M111_SC_R_MODE_CONTEXT_A, 0x01, 0x03},
567 {SENSOR, 0x30, 0x04, 0x00},
568
569 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
570 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
571 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
572 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
573 {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
574 {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
575 {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
576 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
577 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
578 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
579 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
580 {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
581 {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
582 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
583 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
584 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
585 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
586 {BRIDGE, M5602_XB_HSYNC_PARA, 0x05, 0x00},
587 {BRIDGE, M5602_XB_HSYNC_PARA, 0x07, 0x00},
588 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
589 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
590 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xa0, 0x00},
591 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
592 {SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0xf4},
593 {SENSOR, MT9M111_SC_GLOBAL_GAIN, 0x00, 0xea},
594 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
595 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
596 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
597 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
598 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
599 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
600 {SENSOR, MT9M111_SC_RESET, 0x00, 0x09},
601 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
602 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
603 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
604 {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
605 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
606 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
607 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
608 {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
609 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
610 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
611 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
612 {SENSOR, MT9M111_SC_RESET, 0x00, 0x0c},
613 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
614 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
615 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
616 {SENSOR, MT9M111_SC_RESET, 0x00, 0x04},
617 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
618 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
619 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xb3, 0x00},
620 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
621 {SENSOR, MT9M111_CP_GLOBAL_CLK_CONTROL, 0x00, 0x03},
622 {BRIDGE, M5602_XB_GPIO_DIR, 0x05, 0x00},
623 {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
624 {BRIDGE, M5602_XB_GPIO_EN_H, 0x3e, 0x00},
625 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3e, 0x00},
626 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02, 0x00},
627 {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
628 {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
629 {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
630 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04, 0x00},
631 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
632 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
633 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
634 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
635 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
636 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
637 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
638 {BRIDGE, M5602_XB_GPIO_DIR, 0x07, 0x00},
639 {BRIDGE, M5602_XB_GPIO_DAT, 0x0b, 0x00},
640 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
641 {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
642 {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00},
643 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
644 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
645 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
646 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
647 {SENSOR, MT9M111_SC_RESET, 0x00, 0x05},
648 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
649 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
650 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
651 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
652 {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
653 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
654 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
655 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
656 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
657
658 {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
659 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
660 {SENSOR, MT9M111_CP_OPERATING_MODE_CTL, 0x00, 0x10},
661 {SENSOR, MT9M111_CP_LENS_CORRECTION_1, 0x04, 0x2a},
662 {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_A, 0x00, 0x01},
663 {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_B, 0x00, 0x01},
664 {SENSOR, MT9M111_CP_LUMA_OFFSET, 0x00, 0x00},
665 {SENSOR, MT9M111_CP_LUMA_CLIP, 0xff, 0x00},
666 {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_A, 0x14, 0x00},
667 {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_B, 0x14, 0x00},
668
669 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
670 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xcd, 0x00},
671 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
672 {SENSOR, 0xcd, 0x00, 0x0e},
673 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
674 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xd0, 0x00},
675 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
676 {SENSOR, 0xd0, 0x00, 0x40},
677 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x02}, 206 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x02},
678 {SENSOR, MT9M111_CC_AUTO_EXPOSURE_PARAMETER_18, 0x00, 0x00}, 207 {SENSOR, MT9M111_CC_AUTO_EXPOSURE_PARAMETER_18, 0x00, 0x00},
679 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
680 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
681 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
682 {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x07},
683 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
684 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
685 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
686 {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x03}, 208 {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x03},
687 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
688 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
689 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
690 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
691 {SENSOR, 0x33, 0x03, 0x49},
692 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
693 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
694 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
695
696 {SENSOR, 0x33, 0x03, 0x49},
697 {SENSOR, 0x34, 0xc0, 0x19},
698 {SENSOR, 0x3f, 0x20, 0x20},
699 {SENSOR, 0x40, 0x20, 0x20},
700 {SENSOR, 0x5a, 0xc0, 0x0a},
701 {SENSOR, 0x70, 0x7b, 0x0a},
702 {SENSOR, 0x71, 0xff, 0x00},
703 {SENSOR, 0x72, 0x19, 0x0e},
704 {SENSOR, 0x73, 0x18, 0x0f},
705 {SENSOR, 0x74, 0x57, 0x32},
706 {SENSOR, 0x75, 0x56, 0x34},
707 {SENSOR, 0x76, 0x73, 0x35},
708 {SENSOR, 0x77, 0x30, 0x12},
709 {SENSOR, 0x78, 0x79, 0x02},
710 {SENSOR, 0x79, 0x75, 0x06},
711 {SENSOR, 0x7a, 0x77, 0x0a},
712 {SENSOR, 0x7b, 0x78, 0x09},
713 {SENSOR, 0x7c, 0x7d, 0x06},
714 {SENSOR, 0x7d, 0x31, 0x10},
715 {SENSOR, 0x7e, 0x00, 0x7e},
716 {SENSOR, 0x80, 0x59, 0x04},
717 {SENSOR, 0x81, 0x59, 0x04},
718 {SENSOR, 0x82, 0x57, 0x0a},
719 {SENSOR, 0x83, 0x58, 0x0b},
720 {SENSOR, 0x84, 0x47, 0x0c},
721 {SENSOR, 0x85, 0x48, 0x0e},
722 {SENSOR, 0x86, 0x5b, 0x02},
723 {SENSOR, 0x87, 0x00, 0x5c},
724 {SENSOR, MT9M111_CONTEXT_CONTROL, 0x00, 0x08},
725 {SENSOR, 0x60, 0x00, 0x80},
726 {SENSOR, 0x61, 0x00, 0x00},
727 {SENSOR, 0x62, 0x00, 0x00},
728 {SENSOR, 0x63, 0x00, 0x00},
729 {SENSOR, 0x64, 0x00, 0x00},
730 {SENSOR, MT9M111_SC_ROWSTART, 0x00, 0x0d},
731 {SENSOR, MT9M111_SC_COLSTART, 0x00, 0x12},
732 {SENSOR, MT9M111_SC_WINDOW_HEIGHT, 0x04, 0x00},
733 {SENSOR, MT9M111_SC_WINDOW_WIDTH, 0x05, 0x10},
734 {SENSOR, MT9M111_SC_HBLANK_CONTEXT_B, 0x01, 0x60},
735 {SENSOR, MT9M111_SC_VBLANK_CONTEXT_B, 0x00, 0x11},
736 {SENSOR, MT9M111_SC_HBLANK_CONTEXT_A, 0x01, 0x60},
737 {SENSOR, MT9M111_SC_VBLANK_CONTEXT_A, 0x00, 0x11},
738 {SENSOR, MT9M111_SC_R_MODE_CONTEXT_B, 0x01, 0x0f},
739 {SENSOR, MT9M111_SC_R_MODE_CONTEXT_A, 0x01, 0x0f},
740 {SENSOR, 0x30, 0x04, 0x00},
741
742 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
743 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
744 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
745 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
746 {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
747 {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
748 {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
749 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
750 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
751 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
752 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
753 {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
754 {BRIDGE, M5602_XB_VSYNC_PARA, 0xe3, 0x00},
755 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
756 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
757 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
758 {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
759 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
760 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
761 {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00},
762 {BRIDGE, M5602_XB_HSYNC_PARA, 0x87, 0x00},
763 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
764 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
765 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
766
767 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
768 {SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0x90},
769 {SENSOR, MT9M111_SC_GLOBAL_GAIN, 0x00, 0xe6},
770 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
771 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
772 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
773 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
774 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
775 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
776 {SENSOR, MT9M111_SC_RESET, 0x00, 0x09},
777 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
778 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
779 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
780 {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
781 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
782 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
783 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
784 {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
785 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
786 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
787 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
788 {SENSOR, MT9M111_SC_RESET, 0x00, 0x0c},
789 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
790 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
791 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
792 {SENSOR, MT9M111_SC_RESET, 0x00, 0x04},
793 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
794 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
795 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xb3, 0x00},
796 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
797 {SENSOR, MT9M111_CP_GLOBAL_CLK_CONTROL, 0x00, 0x03},
798 209
799 {BRIDGE, M5602_XB_GPIO_DIR, 0x05, 0x00},
800 {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
801 {BRIDGE, M5602_XB_GPIO_EN_H, 0x3e, 0x00},
802 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3e, 0x00},
803 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02, 0x00},
804 {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
805 {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
806 {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
807 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04, 0x00},
808 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
809 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
810 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
811 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
812 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
813 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
814 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
815 {BRIDGE, M5602_XB_GPIO_DIR, 0x07, 0x00},
816 {BRIDGE, M5602_XB_GPIO_DAT, 0x0b, 0x00},
817 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
818 {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
819 {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00},
820 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
821 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
822 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
823 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
824 {SENSOR, MT9M111_SC_RESET, 0x00, 0x05},
825 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00}, 210 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
826 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
827 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
828 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
829 {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
830 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
831 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
832 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x0d, 0x00},
833 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
834 {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
835 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
836 {SENSOR, MT9M111_CP_OPERATING_MODE_CTL, 0x00, 0x10},
837 {SENSOR, MT9M111_CP_LENS_CORRECTION_1, 0x04, 0x2a},
838 {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_A, 0x00, 0x01},
839 {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_B, 0x00, 0x01},
840 {SENSOR, MT9M111_CP_LUMA_OFFSET, 0x00, 0x00},
841 {SENSOR, MT9M111_CP_LUMA_CLIP, 0xff, 0x00},
842 {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_A, 0x14, 0x00},
843 {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_B, 0x14, 0x00},
844
845 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
846 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xcd, 0x00},
847 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
848 {SENSOR, 0xcd, 0x00, 0x0e},
849 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
850 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0xd0, 0x00},
851 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
852 {SENSOR, 0xd0, 0x00, 0x40},
853 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x02},
854 {SENSOR, MT9M111_CC_AUTO_EXPOSURE_PARAMETER_18, 0x00, 0x00},
855 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
856 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
857 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
858 {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x07},
859 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
860 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x28, 0x00},
861 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
862 {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x03},
863 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
864
865 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
866 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
867 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
868 {SENSOR, 0x33, 0x03, 0x49},
869 {BRIDGE, M5602_XB_I2C_DEV_ADDR, 0xba, 0x00},
870 {BRIDGE, M5602_XB_I2C_REG_ADDR, 0x33, 0x00},
871 {BRIDGE, M5602_XB_I2C_CTRL, 0x1a, 0x00},
872
873 {SENSOR, 0x33, 0x03, 0x49}, 211 {SENSOR, 0x33, 0x03, 0x49},
874 {SENSOR, 0x34, 0xc0, 0x19}, 212 {SENSOR, 0x34, 0xc0, 0x19},
875 {SENSOR, 0x3f, 0x20, 0x20}, 213 {SENSOR, 0x3f, 0x20, 0x20},
@@ -898,25 +236,29 @@ static const unsigned char init_mt9m111[][4] =
898 {SENSOR, 0x85, 0x48, 0x0e}, 236 {SENSOR, 0x85, 0x48, 0x0e},
899 {SENSOR, 0x86, 0x5b, 0x02}, 237 {SENSOR, 0x86, 0x5b, 0x02},
900 {SENSOR, 0x87, 0x00, 0x5c}, 238 {SENSOR, 0x87, 0x00, 0x5c},
901 {SENSOR, MT9M111_CONTEXT_CONTROL, 0x00, 0x08}, 239 {SENSOR, MT9M111_CONTEXT_CONTROL, 0x00, MT9M111_SEL_CONTEXT_B},
902 {SENSOR, 0x60, 0x00, 0x80}, 240 {SENSOR, 0x60, 0x00, 0x80},
903 {SENSOR, 0x61, 0x00, 0x00}, 241 {SENSOR, 0x61, 0x00, 0x00},
904 {SENSOR, 0x62, 0x00, 0x00}, 242 {SENSOR, 0x62, 0x00, 0x00},
905 {SENSOR, 0x63, 0x00, 0x00}, 243 {SENSOR, 0x63, 0x00, 0x00},
906 {SENSOR, 0x64, 0x00, 0x00}, 244 {SENSOR, 0x64, 0x00, 0x00},
907 245
908 {SENSOR, MT9M111_SC_ROWSTART, 0x00, 0x0d}, 246 {SENSOR, MT9M111_SC_ROWSTART, 0x00, 0x0d}, /* 13 */
909 {SENSOR, MT9M111_SC_COLSTART, 0x00, 0x12}, 247 {SENSOR, MT9M111_SC_COLSTART, 0x00, 0x12}, /* 18 */
910 {SENSOR, MT9M111_SC_WINDOW_HEIGHT, 0x04, 0x00}, 248 {SENSOR, MT9M111_SC_WINDOW_HEIGHT, 0x04, 0x00}, /* 1024 */
911 {SENSOR, MT9M111_SC_WINDOW_WIDTH, 0x05, 0x10}, 249 {SENSOR, MT9M111_SC_WINDOW_WIDTH, 0x05, 0x10}, /* 1296 */
912 {SENSOR, MT9M111_SC_HBLANK_CONTEXT_B, 0x01, 0x60}, 250 {SENSOR, MT9M111_SC_HBLANK_CONTEXT_B, 0x01, 0x60}, /* 352 */
913 {SENSOR, MT9M111_SC_VBLANK_CONTEXT_B, 0x00, 0x11}, 251 {SENSOR, MT9M111_SC_VBLANK_CONTEXT_B, 0x00, 0x11}, /* 17 */
914 {SENSOR, MT9M111_SC_HBLANK_CONTEXT_A, 0x01, 0x60}, 252 {SENSOR, MT9M111_SC_HBLANK_CONTEXT_A, 0x01, 0x60}, /* 352 */
915 {SENSOR, MT9M111_SC_VBLANK_CONTEXT_A, 0x00, 0x11}, 253 {SENSOR, MT9M111_SC_VBLANK_CONTEXT_A, 0x00, 0x11}, /* 17 */
916 {SENSOR, MT9M111_SC_R_MODE_CONTEXT_B, 0x01, 0x0f}, 254 {SENSOR, MT9M111_SC_R_MODE_CONTEXT_A, 0x01, 0x0f}, /* 271 */
917 {SENSOR, MT9M111_SC_R_MODE_CONTEXT_A, 0x01, 0x0f},
918 {SENSOR, 0x30, 0x04, 0x00}, 255 {SENSOR, 0x30, 0x04, 0x00},
256 /* Set number of blank rows chosen to 400 */
257 {SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0x90},
258};
919 259
260static const unsigned char start_mt9m111[][4] =
261{
920 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00}, 262 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
921 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, 263 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
922 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00}, 264 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
@@ -928,25 +270,6 @@ static const unsigned char init_mt9m111[][4] =
928 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, 270 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
929 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, 271 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
930 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, 272 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
931 {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
932 {BRIDGE, M5602_XB_VSYNC_PARA, 0xe0, 0x00},
933 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
934 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
935 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
936 {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
937 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
938 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
939 {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00}, /* 639*/
940 {BRIDGE, M5602_XB_HSYNC_PARA, 0x7f, 0x00},
941 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
942 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
943 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
944
945 {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
946 /* Set number of blank rows chosen to 400 */
947 {SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0x90},
948 /* Set the global gain to 283 (of 512) */
949 {SENSOR, MT9M111_SC_GLOBAL_GAIN, 0x03, 0x63}
950}; 273};
951 274
952#endif 275#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_ov7660.c b/drivers/media/video/gspca/m5602/m5602_ov7660.c
new file mode 100644
index 000000000000..7aafeb7cfa07
--- /dev/null
+++ b/drivers/media/video/gspca/m5602/m5602_ov7660.c
@@ -0,0 +1,227 @@
1/*
2 * Driver for the ov7660 sensor
3 *
4 * Copyright (C) 2009 Erik Andrén
5 * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
6 * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
7 *
8 * Portions of code to USB interface and ALi driver software,
9 * Copyright (c) 2006 Willem Duinker
10 * v4l2 interface modeled after the V4L2 driver
11 * for SN9C10x PC Camera Controllers
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation, version 2.
16 *
17 */
18
19#include "m5602_ov7660.h"
20
21static int ov7660_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
22static int ov7660_set_gain(struct gspca_dev *gspca_dev, __s32 val);
23
24const static struct ctrl ov7660_ctrls[] = {
25#define GAIN_IDX 1
26 {
27 {
28 .id = V4L2_CID_GAIN,
29 .type = V4L2_CTRL_TYPE_INTEGER,
30 .name = "gain",
31 .minimum = 0x00,
32 .maximum = 0xff,
33 .step = 0x1,
34 .default_value = OV7660_DEFAULT_GAIN,
35 .flags = V4L2_CTRL_FLAG_SLIDER
36 },
37 .set = ov7660_set_gain,
38 .get = ov7660_get_gain
39 },
40};
41
42static struct v4l2_pix_format ov7660_modes[] = {
43 {
44 640,
45 480,
46 V4L2_PIX_FMT_SBGGR8,
47 V4L2_FIELD_NONE,
48 .sizeimage =
49 640 * 480,
50 .bytesperline = 640,
51 .colorspace = V4L2_COLORSPACE_SRGB,
52 .priv = 0
53 }
54};
55
56static void ov7660_dump_registers(struct sd *sd);
57
58int ov7660_probe(struct sd *sd)
59{
60 int err = 0, i;
61 u8 prod_id = 0, ver_id = 0;
62
63 s32 *sensor_settings;
64
65 if (force_sensor) {
66 if (force_sensor == OV7660_SENSOR) {
67 info("Forcing an %s sensor", ov7660.name);
68 goto sensor_found;
69 }
70 /* If we want to force another sensor,
71 don't try to probe this one */
72 return -ENODEV;
73 }
74
75 /* Do the preinit */
76 for (i = 0; i < ARRAY_SIZE(preinit_ov7660) && !err; i++) {
77 u8 data[2];
78
79 if (preinit_ov7660[i][0] == BRIDGE) {
80 err = m5602_write_bridge(sd,
81 preinit_ov7660[i][1],
82 preinit_ov7660[i][2]);
83 } else {
84 data[0] = preinit_ov7660[i][2];
85 err = m5602_write_sensor(sd,
86 preinit_ov7660[i][1], data, 1);
87 }
88 }
89 if (err < 0)
90 return err;
91
92 if (m5602_read_sensor(sd, OV7660_PID, &prod_id, 1))
93 return -ENODEV;
94
95 if (m5602_read_sensor(sd, OV7660_VER, &ver_id, 1))
96 return -ENODEV;
97
98 info("Sensor reported 0x%x%x", prod_id, ver_id);
99
100 if ((prod_id == 0x76) && (ver_id == 0x60)) {
101 info("Detected a ov7660 sensor");
102 goto sensor_found;
103 }
104 return -ENODEV;
105
106sensor_found:
107 sensor_settings = kmalloc(
108 ARRAY_SIZE(ov7660_ctrls) * sizeof(s32), GFP_KERNEL);
109 if (!sensor_settings)
110 return -ENOMEM;
111
112 sd->gspca_dev.cam.cam_mode = ov7660_modes;
113 sd->gspca_dev.cam.nmodes = ARRAY_SIZE(ov7660_modes);
114 sd->desc->ctrls = ov7660_ctrls;
115 sd->desc->nctrls = ARRAY_SIZE(ov7660_ctrls);
116
117 for (i = 0; i < ARRAY_SIZE(ov7660_ctrls); i++)
118 sensor_settings[i] = ov7660_ctrls[i].qctrl.default_value;
119 sd->sensor_priv = sensor_settings;
120
121 return 0;
122}
123
124int ov7660_init(struct sd *sd)
125{
126 int i, err = 0;
127 s32 *sensor_settings = sd->sensor_priv;
128
129 /* Init the sensor */
130 for (i = 0; i < ARRAY_SIZE(init_ov7660); i++) {
131 u8 data[2];
132
133 if (init_ov7660[i][0] == BRIDGE) {
134 err = m5602_write_bridge(sd,
135 init_ov7660[i][1],
136 init_ov7660[i][2]);
137 } else {
138 data[0] = init_ov7660[i][2];
139 err = m5602_write_sensor(sd,
140 init_ov7660[i][1], data, 1);
141 }
142 }
143
144 if (dump_sensor)
145 ov7660_dump_registers(sd);
146
147 err = ov7660_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]);
148 if (err < 0)
149 return err;
150
151 return err;
152}
153
154int ov7660_start(struct sd *sd)
155{
156 return 0;
157}
158
159int ov7660_stop(struct sd *sd)
160{
161 return 0;
162}
163
164void ov7660_disconnect(struct sd *sd)
165{
166 ov7660_stop(sd);
167
168 sd->sensor = NULL;
169 kfree(sd->sensor_priv);
170}
171
172static int ov7660_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
173{
174 struct sd *sd = (struct sd *) gspca_dev;
175 s32 *sensor_settings = sd->sensor_priv;
176
177 *val = sensor_settings[GAIN_IDX];
178 PDEBUG(D_V4L2, "Read gain %d", *val);
179 return 0;
180}
181
182static int ov7660_set_gain(struct gspca_dev *gspca_dev, __s32 val)
183{
184 int err;
185 u8 i2c_data;
186 struct sd *sd = (struct sd *) gspca_dev;
187 s32 *sensor_settings = sd->sensor_priv;
188
189 PDEBUG(D_V4L2, "Setting gain to %d", val);
190
191 sensor_settings[GAIN_IDX] = val;
192
193 err = m5602_write_sensor(sd, OV7660_GAIN, &i2c_data, 1);
194 return err;
195}
196
197static void ov7660_dump_registers(struct sd *sd)
198{
199 int address;
200 info("Dumping the ov7660 register state");
201 for (address = 0; address < 0xa9; address++) {
202 u8 value;
203 m5602_read_sensor(sd, address, &value, 1);
204 info("register 0x%x contains 0x%x",
205 address, value);
206 }
207
208 info("ov7660 register state dump complete");
209
210 info("Probing for which registers that are read/write");
211 for (address = 0; address < 0xff; address++) {
212 u8 old_value, ctrl_value;
213 u8 test_value[2] = {0xff, 0xff};
214
215 m5602_read_sensor(sd, address, &old_value, 1);
216 m5602_write_sensor(sd, address, test_value, 1);
217 m5602_read_sensor(sd, address, &ctrl_value, 1);
218
219 if (ctrl_value == test_value[0])
220 info("register 0x%x is writeable", address);
221 else
222 info("register 0x%x is read only", address);
223
224 /* Restore original value */
225 m5602_write_sensor(sd, address, &old_value, 1);
226 }
227}
diff --git a/drivers/media/video/gspca/m5602/m5602_ov7660.h b/drivers/media/video/gspca/m5602/m5602_ov7660.h
new file mode 100644
index 000000000000..3f2c169a93ea
--- /dev/null
+++ b/drivers/media/video/gspca/m5602/m5602_ov7660.h
@@ -0,0 +1,279 @@
1/*
2 * Driver for the ov7660 sensor
3 *
4 * Copyright (C) 2009 Erik Andrén
5 * Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
6 * Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
7 *
8 * Portions of code to USB interface and ALi driver software,
9 * Copyright (c) 2006 Willem Duinker
10 * v4l2 interface modeled after the V4L2 driver
11 * for SN9C10x PC Camera Controllers
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation, version 2.
16 *
17 */
18
19#ifndef M5602_OV7660_H_
20#define M5602_OV7660_H_
21
22#include "m5602_sensor.h"
23
24#define OV7660_GAIN 0x00
25#define OV7660_BLUE_GAIN 0x01
26#define OV7660_RED_GAIN 0x02
27#define OV7660_VREF 0x03
28#define OV7660_COM1 0x04
29#define OV7660_BAVE 0x05
30#define OV7660_GEAVE 0x06
31#define OV7660_AECHH 0x07
32#define OV7660_RAVE 0x08
33#define OV7660_COM2 0x09
34#define OV7660_PID 0x0a
35#define OV7660_VER 0x0b
36#define OV7660_COM3 0x0c
37#define OV7660_COM4 0x0d
38#define OV7660_COM5 0x0e
39#define OV7660_COM6 0x0f
40#define OV7660_AECH 0x10
41#define OV7660_CLKRC 0x11
42#define OV7660_COM7 0x12
43#define OV7660_COM8 0x13
44#define OV7660_COM9 0x14
45#define OV7660_COM10 0x15
46#define OV7660_RSVD16 0x16
47#define OV7660_HSTART 0x17
48#define OV7660_HSTOP 0x18
49#define OV7660_VSTART 0x19
50#define OV7660_VSTOP 0x1a
51#define OV7660_PSHFT 0x1b
52#define OV7660_MIDH 0x1c
53#define OV7660_MIDL 0x1d
54#define OV7660_MVFP 0x1e
55#define OV7660_LAEC 0x1f
56#define OV7660_BOS 0x20
57#define OV7660_GBOS 0x21
58#define OV7660_GROS 0x22
59#define OV7660_ROS 0x23
60#define OV7660_AEW 0x24
61#define OV7660_AEB 0x25
62#define OV7660_VPT 0x26
63#define OV7660_BBIAS 0x27
64#define OV7660_GbBIAS 0x28
65#define OV7660_RSVD29 0x29
66#define OV7660_RBIAS 0x2c
67#define OV7660_HREF 0x32
68#define OV7660_ADC 0x37
69#define OV7660_OFON 0x39
70#define OV7660_TSLB 0x3a
71#define OV7660_COM12 0x3c
72#define OV7660_COM13 0x3d
73#define OV7660_LCC1 0x62
74#define OV7660_LCC2 0x63
75#define OV7660_LCC3 0x64
76#define OV7660_LCC4 0x65
77#define OV7660_LCC5 0x66
78#define OV7660_HV 0x69
79#define OV7660_RSVDA1 0xa1
80
81#define OV7660_DEFAULT_GAIN 0x0e
82#define OV7660_DEFAULT_RED_GAIN 0x80
83#define OV7660_DEFAULT_BLUE_GAIN 0x80
84#define OV7660_DEFAULT_SATURATION 0x00
85#define OV7660_DEFAULT_EXPOSURE 0x20
86
87/* Kernel module parameters */
88extern int force_sensor;
89extern int dump_sensor;
90
91int ov7660_probe(struct sd *sd);
92int ov7660_init(struct sd *sd);
93int ov7660_start(struct sd *sd);
94int ov7660_stop(struct sd *sd);
95void ov7660_disconnect(struct sd *sd);
96
97const static struct m5602_sensor ov7660 = {
98 .name = "ov7660",
99 .i2c_slave_id = 0x42,
100 .i2c_regW = 1,
101 .probe = ov7660_probe,
102 .init = ov7660_init,
103 .start = ov7660_start,
104 .stop = ov7660_stop,
105 .disconnect = ov7660_disconnect,
106};
107
108static const unsigned char preinit_ov7660[][4] =
109{
110 {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
111 {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
112 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
113 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
114 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
115 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d},
116 {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
117 {BRIDGE, M5602_XB_GPIO_DIR, 0x03},
118 {BRIDGE, M5602_XB_GPIO_DIR, 0x03},
119 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
120 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
121
122 {SENSOR, OV7660_OFON, 0x0c},
123 {SENSOR, OV7660_COM2, 0x11},
124 {SENSOR, OV7660_COM7, 0x05},
125
126 {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
127 {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
128 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
129 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
130 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
131 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x08},
132 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
133 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
134 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
135 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
136 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
137 {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
138 {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
139 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
140 {BRIDGE, M5602_XB_GPIO_EN_L, 0x00}
141};
142
143static const unsigned char init_ov7660[][4] =
144{
145 {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
146 {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
147 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
148 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
149 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
150 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d},
151 {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
152 {BRIDGE, M5602_XB_GPIO_DIR, 0x03},
153 {BRIDGE, M5602_XB_GPIO_DIR, 0x03},
154 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
155 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
156
157 {SENSOR, OV7660_OFON, 0x0c},
158 {SENSOR, OV7660_COM2, 0x11},
159 {SENSOR, OV7660_COM7, 0x05},
160
161 {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
162 {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
163 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
164 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
165 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
166 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x08},
167 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
168 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
169 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
170 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
171 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
172 {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
173 {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
174 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
175 {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
176
177 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x02},
178 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
179
180 {SENSOR, OV7660_AECH, OV7660_DEFAULT_EXPOSURE},
181 {SENSOR, OV7660_COM1, 0x00},
182
183 {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
184 {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
185 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
186 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
187 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
188 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x08},
189 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
190
191 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
192 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
193 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
194 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
195 {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
196 {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
197 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
198 {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
199
200 {SENSOR, OV7660_COM7, 0x80},
201 {SENSOR, OV7660_CLKRC, 0x80},
202 {SENSOR, OV7660_BLUE_GAIN, 0x80},
203 {SENSOR, OV7660_RED_GAIN, 0x80},
204 {SENSOR, OV7660_COM9, 0x4c},
205 {SENSOR, OV7660_OFON, 0x43},
206 {SENSOR, OV7660_COM12, 0x28},
207 {SENSOR, OV7660_COM8, 0x00},
208 {SENSOR, OV7660_COM10, 0x40},
209 {SENSOR, OV7660_HSTART, 0x0c},
210 {SENSOR, OV7660_HSTOP, 0x61},
211 {SENSOR, OV7660_HREF, 0xa4},
212 {SENSOR, OV7660_PSHFT, 0x0b},
213 {SENSOR, OV7660_VSTART, 0x01},
214 {SENSOR, OV7660_VSTOP, 0x7a},
215 {SENSOR, OV7660_VREF, 0x00},
216 {SENSOR, OV7660_COM7, 0x05},
217 {SENSOR, OV7660_COM6, 0x4b},
218 {SENSOR, OV7660_BBIAS, 0x98},
219 {SENSOR, OV7660_GbBIAS, 0x98},
220 {SENSOR, OV7660_RSVD29, 0x98},
221 {SENSOR, OV7660_RBIAS, 0x98},
222 {SENSOR, OV7660_COM1, 0x00},
223 {SENSOR, OV7660_AECH, 0x00},
224 {SENSOR, OV7660_AECHH, 0x00},
225 {SENSOR, OV7660_ADC, 0x04},
226 {SENSOR, OV7660_COM13, 0x00},
227 {SENSOR, OV7660_RSVDA1, 0x23},
228 {SENSOR, OV7660_TSLB, 0x0d},
229 {SENSOR, OV7660_HV, 0x80},
230 {SENSOR, OV7660_LCC1, 0x00},
231 {SENSOR, OV7660_LCC2, 0x00},
232 {SENSOR, OV7660_LCC3, 0x10},
233 {SENSOR, OV7660_LCC4, 0x40},
234 {SENSOR, OV7660_LCC5, 0x01},
235
236 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06},
237 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
238 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
239 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
240 {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81},
241 {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82},
242 {BRIDGE, M5602_XB_SIG_INI, 0x01},
243 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
244 {BRIDGE, M5602_XB_VSYNC_PARA, 0x08},
245 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
246 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
247 {BRIDGE, M5602_XB_VSYNC_PARA, 0x01},
248 {BRIDGE, M5602_XB_VSYNC_PARA, 0xe0}, /* 480 */
249 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
250 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
251 {BRIDGE, M5602_XB_SIG_INI, 0x00},
252 {BRIDGE, M5602_XB_SIG_INI, 0x02},
253 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
254 {BRIDGE, M5602_XB_VSYNC_PARA, 0x27}, /* 39 */
255 {BRIDGE, M5602_XB_VSYNC_PARA, 0x02},
256 {BRIDGE, M5602_XB_VSYNC_PARA, 0xa7}, /* 679 */
257 {BRIDGE, M5602_XB_SIG_INI, 0x00},
258
259 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
260 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
261
262 {SENSOR, OV7660_AECH, 0x20},
263 {SENSOR, OV7660_COM1, 0x00},
264 {SENSOR, OV7660_OFON, 0x0c},
265 {SENSOR, OV7660_COM2, 0x11},
266 {SENSOR, OV7660_COM7, 0x05},
267 {SENSOR, OV7660_BLUE_GAIN, 0x80},
268 {SENSOR, OV7660_RED_GAIN, 0x80},
269
270 {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
271 {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
272 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
273 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
274 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
275 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x08},
276 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}
277};
278
279#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.c b/drivers/media/video/gspca/m5602/m5602_ov9650.c
index fc4548fd441d..c2739d6605a1 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.c
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.c
@@ -18,44 +18,87 @@
18 18
19#include "m5602_ov9650.h" 19#include "m5602_ov9650.h"
20 20
21static int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
22static int ov9650_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
23static int ov9650_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
24static int ov9650_set_gain(struct gspca_dev *gspca_dev, __s32 val);
25static int ov9650_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val);
26static int ov9650_set_red_balance(struct gspca_dev *gspca_dev, __s32 val);
27static int ov9650_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val);
28static int ov9650_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val);
29static int ov9650_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
30static int ov9650_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
31static int ov9650_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
32static int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
33static int ov9650_get_auto_white_balance(struct gspca_dev *gspca_dev,
34 __s32 *val);
35static int ov9650_set_auto_white_balance(struct gspca_dev *gspca_dev,
36 __s32 val);
37static int ov9650_get_auto_gain(struct gspca_dev *gspca_dev, __s32 *val);
38static int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val);
39static int ov9650_get_auto_exposure(struct gspca_dev *gspca_dev, __s32 *val);
40static int ov9650_set_auto_exposure(struct gspca_dev *gspca_dev, __s32 val);
41
21/* Vertically and horizontally flips the image if matched, needed for machines 42/* Vertically and horizontally flips the image if matched, needed for machines
22 where the sensor is mounted upside down */ 43 where the sensor is mounted upside down */
23static 44static
24 const 45 const
25 struct dmi_system_id ov9650_flip_dmi_table[] = { 46 struct dmi_system_id ov9650_flip_dmi_table[] = {
26 { 47 {
27 .ident = "ASUS A6VC", 48 .ident = "ASUS A6Ja",
28 .matches = { 49 .matches = {
29 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 50 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
30 DMI_MATCH(DMI_PRODUCT_NAME, "A6VC") 51 DMI_MATCH(DMI_PRODUCT_NAME, "A6J")
31 } 52 }
32 }, 53 },
33 { 54 {
34 .ident = "ASUS A6VM", 55 .ident = "ASUS A6JC",
35 .matches = { 56 .matches = {
36 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 57 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
37 DMI_MATCH(DMI_PRODUCT_NAME, "A6VM") 58 DMI_MATCH(DMI_PRODUCT_NAME, "A6JC")
38 } 59 }
39 }, 60 },
40 { 61 {
41 .ident = "ASUS A6JC", 62 .ident = "ASUS A6K",
42 .matches = { 63 .matches = {
43 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 64 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
44 DMI_MATCH(DMI_PRODUCT_NAME, "A6JC") 65 DMI_MATCH(DMI_PRODUCT_NAME, "A6K")
45 } 66 }
46 }, 67 },
47 { 68 {
48 .ident = "ASUS A6Ja", 69 .ident = "ASUS A6Kt",
49 .matches = { 70 .matches = {
50 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 71 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
51 DMI_MATCH(DMI_PRODUCT_NAME, "A6J") 72 DMI_MATCH(DMI_PRODUCT_NAME, "A6Kt")
52 } 73 }
53 }, 74 },
54 { 75 {
55 .ident = "ASUS A6Kt", 76 .ident = "ASUS A6VA",
56 .matches = { 77 .matches = {
57 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 78 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
58 DMI_MATCH(DMI_PRODUCT_NAME, "A6Kt") 79 DMI_MATCH(DMI_PRODUCT_NAME, "A6VA")
80 }
81 },
82 {
83
84 .ident = "ASUS A6VC",
85 .matches = {
86 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
87 DMI_MATCH(DMI_PRODUCT_NAME, "A6VC")
88 }
89 },
90 {
91 .ident = "ASUS A6VM",
92 .matches = {
93 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
94 DMI_MATCH(DMI_PRODUCT_NAME, "A6VM")
95 }
96 },
97 {
98 .ident = "ASUS A7V",
99 .matches = {
100 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
101 DMI_MATCH(DMI_PRODUCT_NAME, "A7V")
59 } 102 }
60 }, 103 },
61 { 104 {
@@ -68,7 +111,7 @@ static
68 {} 111 {}
69}; 112};
70 113
71const static struct ctrl ov9650_ctrls[] = { 114static const struct ctrl ov9650_ctrls[] = {
72#define EXPOSURE_IDX 0 115#define EXPOSURE_IDX 0
73 { 116 {
74 { 117 {
@@ -102,6 +145,7 @@ const static struct ctrl ov9650_ctrls[] = {
102#define RED_BALANCE_IDX 2 145#define RED_BALANCE_IDX 2
103 { 146 {
104 { 147 {
148 .id = V4L2_CID_RED_BALANCE,
105 .type = V4L2_CTRL_TYPE_INTEGER, 149 .type = V4L2_CTRL_TYPE_INTEGER,
106 .name = "red balance", 150 .name = "red balance",
107 .minimum = 0x00, 151 .minimum = 0x00,
@@ -116,6 +160,7 @@ const static struct ctrl ov9650_ctrls[] = {
116#define BLUE_BALANCE_IDX 3 160#define BLUE_BALANCE_IDX 3
117 { 161 {
118 { 162 {
163 .id = V4L2_CID_BLUE_BALANCE,
119 .type = V4L2_CTRL_TYPE_INTEGER, 164 .type = V4L2_CTRL_TYPE_INTEGER,
120 .name = "blue balance", 165 .name = "blue balance",
121 .minimum = 0x00, 166 .minimum = 0x00,
@@ -182,7 +227,22 @@ const static struct ctrl ov9650_ctrls[] = {
182 }, 227 },
183 .set = ov9650_set_auto_gain, 228 .set = ov9650_set_auto_gain,
184 .get = ov9650_get_auto_gain 229 .get = ov9650_get_auto_gain
230 },
231#define AUTO_EXPOSURE_IDX 8
232 {
233 {
234 .id = V4L2_CID_EXPOSURE_AUTO,
235 .type = V4L2_CTRL_TYPE_BOOLEAN,
236 .name = "auto exposure",
237 .minimum = 0,
238 .maximum = 1,
239 .step = 1,
240 .default_value = 1
241 },
242 .set = ov9650_set_auto_exposure,
243 .get = ov9650_get_auto_exposure
185 } 244 }
245
186}; 246};
187 247
188static struct v4l2_pix_format ov9650_modes[] = { 248static struct v4l2_pix_format ov9650_modes[] = {
@@ -289,12 +349,6 @@ sensor_found:
289 for (i = 0; i < ARRAY_SIZE(ov9650_ctrls); i++) 349 for (i = 0; i < ARRAY_SIZE(ov9650_ctrls); i++)
290 sensor_settings[i] = ov9650_ctrls[i].qctrl.default_value; 350 sensor_settings[i] = ov9650_ctrls[i].qctrl.default_value;
291 sd->sensor_priv = sensor_settings; 351 sd->sensor_priv = sensor_settings;
292
293 if (dmi_check_system(ov9650_flip_dmi_table) && !err) {
294 info("vflip quirk active");
295 sensor_settings[VFLIP_IDX] = 1;
296 }
297
298 return 0; 352 return 0;
299} 353}
300 354
@@ -316,7 +370,8 @@ int ov9650_init(struct sd *sd)
316 err = m5602_write_bridge(sd, init_ov9650[i][1], data); 370 err = m5602_write_bridge(sd, init_ov9650[i][1], data);
317 } 371 }
318 372
319 err = ov9650_set_exposure(&sd->gspca_dev, sensor_settings[EXPOSURE_IDX]); 373 err = ov9650_set_exposure(&sd->gspca_dev,
374 sensor_settings[EXPOSURE_IDX]);
320 if (err < 0) 375 if (err < 0)
321 return err; 376 return err;
322 377
@@ -324,11 +379,13 @@ int ov9650_init(struct sd *sd)
324 if (err < 0) 379 if (err < 0)
325 return err; 380 return err;
326 381
327 err = ov9650_set_red_balance(&sd->gspca_dev, sensor_settings[RED_BALANCE_IDX]); 382 err = ov9650_set_red_balance(&sd->gspca_dev,
383 sensor_settings[RED_BALANCE_IDX]);
328 if (err < 0) 384 if (err < 0)
329 return err; 385 return err;
330 386
331 err = ov9650_set_blue_balance(&sd->gspca_dev, sensor_settings[BLUE_BALANCE_IDX]); 387 err = ov9650_set_blue_balance(&sd->gspca_dev,
388 sensor_settings[BLUE_BALANCE_IDX]);
332 if (err < 0) 389 if (err < 0)
333 return err; 390 return err;
334 391
@@ -340,11 +397,18 @@ int ov9650_init(struct sd *sd)
340 if (err < 0) 397 if (err < 0)
341 return err; 398 return err;
342 399
343 err = ov9650_set_auto_white_balance(&sd->gspca_dev, sensor_settings[AUTO_WHITE_BALANCE_IDX]); 400 err = ov9650_set_auto_exposure(&sd->gspca_dev,
401 sensor_settings[AUTO_EXPOSURE_IDX]);
402 if (err < 0)
403 return err;
404
405 err = ov9650_set_auto_white_balance(&sd->gspca_dev,
406 sensor_settings[AUTO_WHITE_BALANCE_IDX]);
344 if (err < 0) 407 if (err < 0)
345 return err; 408 return err;
346 409
347 err = ov9650_set_auto_gain(&sd->gspca_dev, sensor_settings[AUTO_GAIN_CTRL_IDX]); 410 err = ov9650_set_auto_gain(&sd->gspca_dev,
411 sensor_settings[AUTO_GAIN_CTRL_IDX]);
348 return err; 412 return err;
349} 413}
350 414
@@ -360,7 +424,10 @@ int ov9650_start(struct sd *sd)
360 int ver_offs = cam->cam_mode[sd->gspca_dev.curr_mode].priv; 424 int ver_offs = cam->cam_mode[sd->gspca_dev.curr_mode].priv;
361 int hor_offs = OV9650_LEFT_OFFSET; 425 int hor_offs = OV9650_LEFT_OFFSET;
362 426
363 if (sensor_settings[VFLIP_IDX]) 427 if ((!dmi_check_system(ov9650_flip_dmi_table) &&
428 sensor_settings[VFLIP_IDX]) ||
429 (dmi_check_system(ov9650_flip_dmi_table) &&
430 !sensor_settings[VFLIP_IDX]))
364 ver_offs--; 431 ver_offs--;
365 432
366 if (width <= 320) 433 if (width <= 320)
@@ -406,6 +473,14 @@ int ov9650_start(struct sd *sd)
406 if (err < 0) 473 if (err < 0)
407 return err; 474 return err;
408 475
476 err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
477 if (err < 0)
478 return err;
479
480 err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 2);
481 if (err < 0)
482 return err;
483
409 err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, 484 err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA,
410 (hor_offs >> 8) & 0xff); 485 (hor_offs >> 8) & 0xff);
411 if (err < 0) 486 if (err < 0)
@@ -425,6 +500,10 @@ int ov9650_start(struct sd *sd)
425 if (err < 0) 500 if (err < 0)
426 return err; 501 return err;
427 502
503 err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
504 if (err < 0)
505 return err;
506
428 switch (width) { 507 switch (width) {
429 case 640: 508 case 640:
430 PDEBUG(D_V4L2, "Configuring camera for VGA mode"); 509 PDEBUG(D_V4L2, "Configuring camera for VGA mode");
@@ -467,32 +546,15 @@ int ov9650_stop(struct sd *sd)
467 return m5602_write_sensor(sd, OV9650_COM2, &data, 1); 546 return m5602_write_sensor(sd, OV9650_COM2, &data, 1);
468} 547}
469 548
470int ov9650_power_down(struct sd *sd)
471{
472 int i, err = 0;
473 for (i = 0; i < ARRAY_SIZE(power_down_ov9650) && !err; i++) {
474 u8 data = power_down_ov9650[i][2];
475 if (power_down_ov9650[i][0] == SENSOR)
476 err = m5602_write_sensor(sd,
477 power_down_ov9650[i][1], &data, 1);
478 else
479 err = m5602_write_bridge(sd, power_down_ov9650[i][1],
480 data);
481 }
482
483 return err;
484}
485
486void ov9650_disconnect(struct sd *sd) 549void ov9650_disconnect(struct sd *sd)
487{ 550{
488 ov9650_stop(sd); 551 ov9650_stop(sd);
489 ov9650_power_down(sd);
490 552
491 sd->sensor = NULL; 553 sd->sensor = NULL;
492 kfree(sd->sensor_priv); 554 kfree(sd->sensor_priv);
493} 555}
494 556
495int ov9650_get_exposure(struct gspca_dev *gspca_dev, __s32 *val) 557static int ov9650_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
496{ 558{
497 struct sd *sd = (struct sd *) gspca_dev; 559 struct sd *sd = (struct sd *) gspca_dev;
498 s32 *sensor_settings = sd->sensor_priv; 560 s32 *sensor_settings = sd->sensor_priv;
@@ -502,7 +564,7 @@ int ov9650_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
502 return 0; 564 return 0;
503} 565}
504 566
505int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val) 567static int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
506{ 568{
507 struct sd *sd = (struct sd *) gspca_dev; 569 struct sd *sd = (struct sd *) gspca_dev;
508 s32 *sensor_settings = sd->sensor_priv; 570 s32 *sensor_settings = sd->sensor_priv;
@@ -532,7 +594,7 @@ int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
532 return err; 594 return err;
533} 595}
534 596
535int ov9650_get_gain(struct gspca_dev *gspca_dev, __s32 *val) 597static int ov9650_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
536{ 598{
537 struct sd *sd = (struct sd *) gspca_dev; 599 struct sd *sd = (struct sd *) gspca_dev;
538 s32 *sensor_settings = sd->sensor_priv; 600 s32 *sensor_settings = sd->sensor_priv;
@@ -542,7 +604,7 @@ int ov9650_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
542 return 0; 604 return 0;
543} 605}
544 606
545int ov9650_set_gain(struct gspca_dev *gspca_dev, __s32 val) 607static int ov9650_set_gain(struct gspca_dev *gspca_dev, __s32 val)
546{ 608{
547 int err; 609 int err;
548 u8 i2c_data; 610 u8 i2c_data;
@@ -573,7 +635,7 @@ int ov9650_set_gain(struct gspca_dev *gspca_dev, __s32 val)
573 return err; 635 return err;
574} 636}
575 637
576int ov9650_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val) 638static int ov9650_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
577{ 639{
578 struct sd *sd = (struct sd *) gspca_dev; 640 struct sd *sd = (struct sd *) gspca_dev;
579 s32 *sensor_settings = sd->sensor_priv; 641 s32 *sensor_settings = sd->sensor_priv;
@@ -583,7 +645,7 @@ int ov9650_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
583 return 0; 645 return 0;
584} 646}
585 647
586int ov9650_set_red_balance(struct gspca_dev *gspca_dev, __s32 val) 648static int ov9650_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
587{ 649{
588 int err; 650 int err;
589 u8 i2c_data; 651 u8 i2c_data;
@@ -599,7 +661,7 @@ int ov9650_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
599 return err; 661 return err;
600} 662}
601 663
602int ov9650_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val) 664static int ov9650_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
603{ 665{
604 struct sd *sd = (struct sd *) gspca_dev; 666 struct sd *sd = (struct sd *) gspca_dev;
605 s32 *sensor_settings = sd->sensor_priv; 667 s32 *sensor_settings = sd->sensor_priv;
@@ -610,7 +672,7 @@ int ov9650_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
610 return 0; 672 return 0;
611} 673}
612 674
613int ov9650_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val) 675static int ov9650_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
614{ 676{
615 int err; 677 int err;
616 u8 i2c_data; 678 u8 i2c_data;
@@ -626,7 +688,7 @@ int ov9650_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
626 return err; 688 return err;
627} 689}
628 690
629int ov9650_get_hflip(struct gspca_dev *gspca_dev, __s32 *val) 691static int ov9650_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
630{ 692{
631 struct sd *sd = (struct sd *) gspca_dev; 693 struct sd *sd = (struct sd *) gspca_dev;
632 s32 *sensor_settings = sd->sensor_priv; 694 s32 *sensor_settings = sd->sensor_priv;
@@ -636,7 +698,7 @@ int ov9650_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
636 return 0; 698 return 0;
637} 699}
638 700
639int ov9650_set_hflip(struct gspca_dev *gspca_dev, __s32 val) 701static int ov9650_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
640{ 702{
641 int err; 703 int err;
642 u8 i2c_data; 704 u8 i2c_data;
@@ -646,13 +708,20 @@ int ov9650_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
646 PDEBUG(D_V4L2, "Set horizontal flip to %d", val); 708 PDEBUG(D_V4L2, "Set horizontal flip to %d", val);
647 709
648 sensor_settings[HFLIP_IDX] = val; 710 sensor_settings[HFLIP_IDX] = val;
649 i2c_data = ((val & 0x01) << 5) | (sensor_settings[VFLIP_IDX] << 4); 711
712 if (!dmi_check_system(ov9650_flip_dmi_table))
713 i2c_data = ((val & 0x01) << 5) |
714 (sensor_settings[VFLIP_IDX] << 4);
715 else
716 i2c_data = ((val & 0x01) << 5) |
717 (!sensor_settings[VFLIP_IDX] << 4);
718
650 err = m5602_write_sensor(sd, OV9650_MVFP, &i2c_data, 1); 719 err = m5602_write_sensor(sd, OV9650_MVFP, &i2c_data, 1);
651 720
652 return err; 721 return err;
653} 722}
654 723
655int ov9650_get_vflip(struct gspca_dev *gspca_dev, __s32 *val) 724static int ov9650_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
656{ 725{
657 struct sd *sd = (struct sd *) gspca_dev; 726 struct sd *sd = (struct sd *) gspca_dev;
658 s32 *sensor_settings = sd->sensor_priv; 727 s32 *sensor_settings = sd->sensor_priv;
@@ -663,7 +732,7 @@ int ov9650_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
663 return 0; 732 return 0;
664} 733}
665 734
666int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val) 735static int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
667{ 736{
668 int err; 737 int err;
669 u8 i2c_data; 738 u8 i2c_data;
@@ -673,6 +742,9 @@ int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
673 PDEBUG(D_V4L2, "Set vertical flip to %d", val); 742 PDEBUG(D_V4L2, "Set vertical flip to %d", val);
674 sensor_settings[VFLIP_IDX] = val; 743 sensor_settings[VFLIP_IDX] = val;
675 744
745 if (dmi_check_system(ov9650_flip_dmi_table))
746 val = !val;
747
676 i2c_data = ((val & 0x01) << 4) | (sensor_settings[VFLIP_IDX] << 5); 748 i2c_data = ((val & 0x01) << 4) | (sensor_settings[VFLIP_IDX] << 5);
677 err = m5602_write_sensor(sd, OV9650_MVFP, &i2c_data, 1); 749 err = m5602_write_sensor(sd, OV9650_MVFP, &i2c_data, 1);
678 if (err < 0) 750 if (err < 0)
@@ -685,48 +757,38 @@ int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
685 return err; 757 return err;
686} 758}
687 759
688int ov9650_get_brightness(struct gspca_dev *gspca_dev, __s32 *val) 760static int ov9650_get_auto_exposure(struct gspca_dev *gspca_dev, __s32 *val)
689{ 761{
690 struct sd *sd = (struct sd *) gspca_dev; 762 struct sd *sd = (struct sd *) gspca_dev;
691 s32 *sensor_settings = sd->sensor_priv; 763 s32 *sensor_settings = sd->sensor_priv;
692 764
693 *val = sensor_settings[GAIN_IDX]; 765 *val = sensor_settings[AUTO_EXPOSURE_IDX];
694 PDEBUG(D_V4L2, "Read gain %d", *val); 766 PDEBUG(D_V4L2, "Read auto exposure control %d", *val);
695
696 return 0; 767 return 0;
697} 768}
698 769
699int ov9650_set_brightness(struct gspca_dev *gspca_dev, __s32 val) 770static int ov9650_set_auto_exposure(struct gspca_dev *gspca_dev,
771 __s32 val)
700{ 772{
701 int err; 773 int err;
702 u8 i2c_data; 774 u8 i2c_data;
703 struct sd *sd = (struct sd *) gspca_dev; 775 struct sd *sd = (struct sd *) gspca_dev;
704 s32 *sensor_settings = sd->sensor_priv; 776 s32 *sensor_settings = sd->sensor_priv;
705 777
706 PDEBUG(D_V4L2, "Set gain to %d", val); 778 PDEBUG(D_V4L2, "Set auto exposure control to %d", val);
707
708 sensor_settings[GAIN_IDX] = val;
709 779
710 /* Read the OV9650_VREF register first to avoid 780 sensor_settings[AUTO_EXPOSURE_IDX] = val;
711 corrupting the VREF high and low bits */ 781 err = m5602_read_sensor(sd, OV9650_COM8, &i2c_data, 1);
712 err = m5602_read_sensor(sd, OV9650_VREF, &i2c_data, 1);
713 if (err < 0)
714 return err;
715
716 /* Mask away all uninteresting bits */
717 i2c_data = ((val & 0x0300) >> 2) | (i2c_data & 0x3F);
718 err = m5602_write_sensor(sd, OV9650_VREF, &i2c_data, 1);
719 if (err < 0) 782 if (err < 0)
720 return err; 783 return err;
721 784
722 /* The 8 LSBs */ 785 i2c_data = ((i2c_data & 0xfe) | ((val & 0x01) << 0));
723 i2c_data = val & 0xff;
724 err = m5602_write_sensor(sd, OV9650_GAIN, &i2c_data, 1);
725 786
726 return err; 787 return m5602_write_sensor(sd, OV9650_COM8, &i2c_data, 1);
727} 788}
728 789
729int ov9650_get_auto_white_balance(struct gspca_dev *gspca_dev, __s32 *val) 790static int ov9650_get_auto_white_balance(struct gspca_dev *gspca_dev,
791 __s32 *val)
730{ 792{
731 struct sd *sd = (struct sd *) gspca_dev; 793 struct sd *sd = (struct sd *) gspca_dev;
732 s32 *sensor_settings = sd->sensor_priv; 794 s32 *sensor_settings = sd->sensor_priv;
@@ -735,7 +797,8 @@ int ov9650_get_auto_white_balance(struct gspca_dev *gspca_dev, __s32 *val)
735 return 0; 797 return 0;
736} 798}
737 799
738int ov9650_set_auto_white_balance(struct gspca_dev *gspca_dev, __s32 val) 800static int ov9650_set_auto_white_balance(struct gspca_dev *gspca_dev,
801 __s32 val)
739{ 802{
740 int err; 803 int err;
741 u8 i2c_data; 804 u8 i2c_data;
@@ -755,7 +818,7 @@ int ov9650_set_auto_white_balance(struct gspca_dev *gspca_dev, __s32 val)
755 return err; 818 return err;
756} 819}
757 820
758int ov9650_get_auto_gain(struct gspca_dev *gspca_dev, __s32 *val) 821static int ov9650_get_auto_gain(struct gspca_dev *gspca_dev, __s32 *val)
759{ 822{
760 struct sd *sd = (struct sd *) gspca_dev; 823 struct sd *sd = (struct sd *) gspca_dev;
761 s32 *sensor_settings = sd->sensor_priv; 824 s32 *sensor_settings = sd->sensor_priv;
@@ -765,7 +828,7 @@ int ov9650_get_auto_gain(struct gspca_dev *gspca_dev, __s32 *val)
765 return 0; 828 return 0;
766} 829}
767 830
768int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val) 831static int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val)
769{ 832{
770 int err; 833 int err;
771 u8 i2c_data; 834 u8 i2c_data;
@@ -780,9 +843,8 @@ int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val)
780 return err; 843 return err;
781 844
782 i2c_data = ((i2c_data & 0xfb) | ((val & 0x01) << 2)); 845 i2c_data = ((i2c_data & 0xfb) | ((val & 0x01) << 2));
783 err = m5602_write_sensor(sd, OV9650_COM8, &i2c_data, 1);
784 846
785 return err; 847 return m5602_write_sensor(sd, OV9650_COM8, &i2c_data, 1);
786} 848}
787 849
788static void ov9650_dump_registers(struct sd *sd) 850static void ov9650_dump_registers(struct sd *sd)
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.h b/drivers/media/video/gspca/m5602/m5602_ov9650.h
index fcc54e4c0f4f..c98c40d69e05 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.h
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.h
@@ -120,6 +120,10 @@
120#define OV9650_SOFT_SLEEP (1 << 4) 120#define OV9650_SOFT_SLEEP (1 << 4)
121#define OV9650_OUTPUT_DRIVE_2X (1 << 0) 121#define OV9650_OUTPUT_DRIVE_2X (1 << 0)
122 122
123#define OV9650_DENOISE_ENABLE (1 << 5)
124#define OV9650_WHITE_PIXEL_ENABLE (1 << 1)
125#define OV9650_WHITE_PIXEL_OPTION (1 << 0)
126
123#define OV9650_LEFT_OFFSET 0x62 127#define OV9650_LEFT_OFFSET 0x62
124 128
125#define GAIN_DEFAULT 0x14 129#define GAIN_DEFAULT 0x14
@@ -137,29 +141,9 @@ int ov9650_probe(struct sd *sd);
137int ov9650_init(struct sd *sd); 141int ov9650_init(struct sd *sd);
138int ov9650_start(struct sd *sd); 142int ov9650_start(struct sd *sd);
139int ov9650_stop(struct sd *sd); 143int ov9650_stop(struct sd *sd);
140int ov9650_power_down(struct sd *sd);
141void ov9650_disconnect(struct sd *sd); 144void ov9650_disconnect(struct sd *sd);
142 145
143int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val); 146static const struct m5602_sensor ov9650 = {
144int ov9650_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
145int ov9650_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
146int ov9650_set_gain(struct gspca_dev *gspca_dev, __s32 val);
147int ov9650_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val);
148int ov9650_set_red_balance(struct gspca_dev *gspca_dev, __s32 val);
149int ov9650_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val);
150int ov9650_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val);
151int ov9650_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
152int ov9650_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
153int ov9650_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
154int ov9650_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
155int ov9650_get_brightness(struct gspca_dev *gspca_dev, __s32 *val);
156int ov9650_set_brightness(struct gspca_dev *gspca_dev, __s32 val);
157int ov9650_get_auto_white_balance(struct gspca_dev *gspca_dev, __s32 *val);
158int ov9650_set_auto_white_balance(struct gspca_dev *gspca_dev, __s32 val);
159int ov9650_get_auto_gain(struct gspca_dev *gspca_dev, __s32 *val);
160int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val);
161
162const static struct m5602_sensor ov9650 = {
163 .name = "OV9650", 147 .name = "OV9650",
164 .i2c_slave_id = 0x60, 148 .i2c_slave_id = 0x60,
165 .i2c_regW = 1, 149 .i2c_regW = 1,
@@ -167,7 +151,6 @@ const static struct m5602_sensor ov9650 = {
167 .init = ov9650_init, 151 .init = ov9650_init,
168 .start = ov9650_start, 152 .start = ov9650_start,
169 .stop = ov9650_stop, 153 .stop = ov9650_stop,
170 .power_down = ov9650_power_down,
171 .disconnect = ov9650_disconnect, 154 .disconnect = ov9650_disconnect,
172}; 155};
173 156
@@ -219,7 +202,7 @@ static const unsigned char init_ov9650[][3] =
219 /* Reset chip */ 202 /* Reset chip */
220 {SENSOR, OV9650_COM7, OV9650_REGISTER_RESET}, 203 {SENSOR, OV9650_COM7, OV9650_REGISTER_RESET},
221 /* One extra reset is needed in order to make the sensor behave 204 /* One extra reset is needed in order to make the sensor behave
222 properly when resuming from ram */ 205 properly when resuming from ram, could be a timing issue */
223 {SENSOR, OV9650_COM7, OV9650_REGISTER_RESET}, 206 {SENSOR, OV9650_COM7, OV9650_REGISTER_RESET},
224 207
225 /* Enable double clock */ 208 /* Enable double clock */
@@ -229,8 +212,7 @@ static const unsigned char init_ov9650[][3] =
229 212
230 /* Set fast AGC/AEC algorithm with unlimited step size */ 213 /* Set fast AGC/AEC algorithm with unlimited step size */
231 {SENSOR, OV9650_COM8, OV9650_FAST_AGC_AEC | 214 {SENSOR, OV9650_COM8, OV9650_FAST_AGC_AEC |
232 OV9650_AEC_UNLIM_STEP_SIZE | 215 OV9650_AEC_UNLIM_STEP_SIZE},
233 OV9650_AWB_EN | OV9650_AGC_EN},
234 216
235 {SENSOR, OV9650_CHLF, 0x10}, 217 {SENSOR, OV9650_CHLF, 0x10},
236 {SENSOR, OV9650_ARBLM, 0xbf}, 218 {SENSOR, OV9650_ARBLM, 0xbf},
@@ -301,8 +283,11 @@ static const unsigned char init_ov9650[][3] =
301 {SENSOR, OV9650_VREF, 0x10}, 283 {SENSOR, OV9650_VREF, 0x10},
302 {SENSOR, OV9650_ADC, 0x04}, 284 {SENSOR, OV9650_ADC, 0x04},
303 {SENSOR, OV9650_HV, 0x40}, 285 {SENSOR, OV9650_HV, 0x40},
286
304 /* Enable denoise, and white-pixel erase */ 287 /* Enable denoise, and white-pixel erase */
305 {SENSOR, OV9650_COM22, 0x23}, 288 {SENSOR, OV9650_COM22, OV9650_DENOISE_ENABLE |
289 OV9650_WHITE_PIXEL_ENABLE |
290 OV9650_WHITE_PIXEL_OPTION},
306 291
307 /* Enable VARIOPIXEL */ 292 /* Enable VARIOPIXEL */
308 {SENSOR, OV9650_COM3, OV9650_VARIOPIXEL}, 293 {SENSOR, OV9650_COM3, OV9650_VARIOPIXEL},
@@ -312,26 +297,6 @@ static const unsigned char init_ov9650[][3] =
312 {SENSOR, OV9650_COM2, OV9650_SOFT_SLEEP | OV9650_OUTPUT_DRIVE_2X}, 297 {SENSOR, OV9650_COM2, OV9650_SOFT_SLEEP | OV9650_OUTPUT_DRIVE_2X},
313}; 298};
314 299
315static const unsigned char power_down_ov9650[][3] =
316{
317 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
318 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
319 {SENSOR, OV9650_COM7, 0x80},
320 {SENSOR, OV9650_OFON, 0xf4},
321 {SENSOR, OV9650_MVFP, 0x80},
322 {SENSOR, OV9650_DBLV, 0x3f},
323 {SENSOR, OV9650_RSVD36, 0x49},
324 {SENSOR, OV9650_COM7, 0x05},
325
326 {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
327 {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
328 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
329 {BRIDGE, M5602_XB_GPIO_EN_L, 0x06},
330 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02},
331 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
332 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
333};
334
335static const unsigned char res_init_ov9650[][3] = 300static const unsigned char res_init_ov9650[][3] =
336{ 301{
337 {SENSOR, OV9650_COM2, OV9650_OUTPUT_DRIVE_2X}, 302 {SENSOR, OV9650_COM2, OV9650_OUTPUT_DRIVE_2X},
diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.c b/drivers/media/video/gspca/m5602/m5602_po1030.c
index eaddf488bad1..8d74d8065b79 100644
--- a/drivers/media/video/gspca/m5602/m5602_po1030.c
+++ b/drivers/media/video/gspca/m5602/m5602_po1030.c
@@ -18,6 +18,29 @@
18 18
19#include "m5602_po1030.h" 19#include "m5602_po1030.h"
20 20
21static int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
22static int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
23static int po1030_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
24static int po1030_set_gain(struct gspca_dev *gspca_dev, __s32 val);
25static int po1030_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val);
26static int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val);
27static int po1030_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val);
28static int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val);
29static int po1030_get_green_balance(struct gspca_dev *gspca_dev, __s32 *val);
30static int po1030_set_green_balance(struct gspca_dev *gspca_dev, __s32 val);
31static int po1030_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
32static int po1030_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
33static int po1030_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
34static int po1030_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
35static int po1030_set_auto_white_balance(struct gspca_dev *gspca_dev,
36 __s32 val);
37static int po1030_get_auto_white_balance(struct gspca_dev *gspca_dev,
38 __s32 *val);
39static int po1030_set_auto_exposure(struct gspca_dev *gspca_dev,
40 __s32 val);
41static int po1030_get_auto_exposure(struct gspca_dev *gspca_dev,
42 __s32 *val);
43
21static struct v4l2_pix_format po1030_modes[] = { 44static struct v4l2_pix_format po1030_modes[] = {
22 { 45 {
23 640, 46 640,
@@ -27,11 +50,12 @@ static struct v4l2_pix_format po1030_modes[] = {
27 .sizeimage = 640 * 480, 50 .sizeimage = 640 * 480,
28 .bytesperline = 640, 51 .bytesperline = 640,
29 .colorspace = V4L2_COLORSPACE_SRGB, 52 .colorspace = V4L2_COLORSPACE_SRGB,
30 .priv = 0 53 .priv = 2
31 } 54 }
32}; 55};
33 56
34const static struct ctrl po1030_ctrls[] = { 57static const struct ctrl po1030_ctrls[] = {
58#define GAIN_IDX 0
35 { 59 {
36 { 60 {
37 .id = V4L2_CID_GAIN, 61 .id = V4L2_CID_GAIN,
@@ -45,7 +69,9 @@ const static struct ctrl po1030_ctrls[] = {
45 }, 69 },
46 .set = po1030_set_gain, 70 .set = po1030_set_gain,
47 .get = po1030_get_gain 71 .get = po1030_get_gain
48 }, { 72 },
73#define EXPOSURE_IDX 1
74 {
49 { 75 {
50 .id = V4L2_CID_EXPOSURE, 76 .id = V4L2_CID_EXPOSURE,
51 .type = V4L2_CTRL_TYPE_INTEGER, 77 .type = V4L2_CTRL_TYPE_INTEGER,
@@ -58,7 +84,9 @@ const static struct ctrl po1030_ctrls[] = {
58 }, 84 },
59 .set = po1030_set_exposure, 85 .set = po1030_set_exposure,
60 .get = po1030_get_exposure 86 .get = po1030_get_exposure
61 }, { 87 },
88#define RED_BALANCE_IDX 2
89 {
62 { 90 {
63 .id = V4L2_CID_RED_BALANCE, 91 .id = V4L2_CID_RED_BALANCE,
64 .type = V4L2_CTRL_TYPE_INTEGER, 92 .type = V4L2_CTRL_TYPE_INTEGER,
@@ -71,7 +99,9 @@ const static struct ctrl po1030_ctrls[] = {
71 }, 99 },
72 .set = po1030_set_red_balance, 100 .set = po1030_set_red_balance,
73 .get = po1030_get_red_balance 101 .get = po1030_get_red_balance
74 }, { 102 },
103#define BLUE_BALANCE_IDX 3
104 {
75 { 105 {
76 .id = V4L2_CID_BLUE_BALANCE, 106 .id = V4L2_CID_BLUE_BALANCE,
77 .type = V4L2_CTRL_TYPE_INTEGER, 107 .type = V4L2_CTRL_TYPE_INTEGER,
@@ -84,7 +114,9 @@ const static struct ctrl po1030_ctrls[] = {
84 }, 114 },
85 .set = po1030_set_blue_balance, 115 .set = po1030_set_blue_balance,
86 .get = po1030_get_blue_balance 116 .get = po1030_get_blue_balance
87 }, { 117 },
118#define HFLIP_IDX 4
119 {
88 { 120 {
89 .id = V4L2_CID_HFLIP, 121 .id = V4L2_CID_HFLIP,
90 .type = V4L2_CTRL_TYPE_BOOLEAN, 122 .type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -96,7 +128,9 @@ const static struct ctrl po1030_ctrls[] = {
96 }, 128 },
97 .set = po1030_set_hflip, 129 .set = po1030_set_hflip,
98 .get = po1030_get_hflip 130 .get = po1030_get_hflip
99 }, { 131 },
132#define VFLIP_IDX 5
133 {
100 { 134 {
101 .id = V4L2_CID_VFLIP, 135 .id = V4L2_CID_VFLIP,
102 .type = V4L2_CTRL_TYPE_BOOLEAN, 136 .type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -108,14 +142,58 @@ const static struct ctrl po1030_ctrls[] = {
108 }, 142 },
109 .set = po1030_set_vflip, 143 .set = po1030_set_vflip,
110 .get = po1030_get_vflip 144 .get = po1030_get_vflip
111 } 145 },
146#define AUTO_WHITE_BALANCE_IDX 6
147 {
148 {
149 .id = V4L2_CID_AUTO_WHITE_BALANCE,
150 .type = V4L2_CTRL_TYPE_BOOLEAN,
151 .name = "auto white balance",
152 .minimum = 0,
153 .maximum = 1,
154 .step = 1,
155 .default_value = 0,
156 },
157 .set = po1030_set_auto_white_balance,
158 .get = po1030_get_auto_white_balance
159 },
160#define AUTO_EXPOSURE_IDX 7
161 {
162 {
163 .id = V4L2_CID_EXPOSURE_AUTO,
164 .type = V4L2_CTRL_TYPE_BOOLEAN,
165 .name = "auto exposure",
166 .minimum = 0,
167 .maximum = 1,
168 .step = 1,
169 .default_value = 0,
170 },
171 .set = po1030_set_auto_exposure,
172 .get = po1030_get_auto_exposure
173 },
174#define GREEN_BALANCE_IDX 8
175 {
176 {
177 .id = M5602_V4L2_CID_GREEN_BALANCE,
178 .type = V4L2_CTRL_TYPE_INTEGER,
179 .name = "green balance",
180 .minimum = 0x00,
181 .maximum = 0xff,
182 .step = 0x1,
183 .default_value = PO1030_GREEN_GAIN_DEFAULT,
184 .flags = V4L2_CTRL_FLAG_SLIDER
185 },
186 .set = po1030_set_green_balance,
187 .get = po1030_get_green_balance
188 },
112}; 189};
113 190
114static void po1030_dump_registers(struct sd *sd); 191static void po1030_dump_registers(struct sd *sd);
115 192
116int po1030_probe(struct sd *sd) 193int po1030_probe(struct sd *sd)
117{ 194{
118 u8 prod_id = 0, ver_id = 0, i; 195 u8 dev_id_h = 0, i;
196 s32 *sensor_settings;
119 197
120 if (force_sensor) { 198 if (force_sensor) {
121 if (force_sensor == PO1030_SENSOR) { 199 if (force_sensor == PO1030_SENSOR) {
@@ -139,28 +217,36 @@ int po1030_probe(struct sd *sd)
139 m5602_write_bridge(sd, preinit_po1030[i][1], data); 217 m5602_write_bridge(sd, preinit_po1030[i][1], data);
140 } 218 }
141 219
142 if (m5602_read_sensor(sd, 0x3, &prod_id, 1)) 220 if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1))
143 return -ENODEV; 221 return -ENODEV;
144 222
145 if (m5602_read_sensor(sd, 0x4, &ver_id, 1)) 223 if (dev_id_h == 0x30) {
146 return -ENODEV;
147
148 if ((prod_id == 0x02) && (ver_id == 0xef)) {
149 info("Detected a po1030 sensor"); 224 info("Detected a po1030 sensor");
150 goto sensor_found; 225 goto sensor_found;
151 } 226 }
152 return -ENODEV; 227 return -ENODEV;
153 228
154sensor_found: 229sensor_found:
230 sensor_settings = kmalloc(
231 ARRAY_SIZE(po1030_ctrls) * sizeof(s32), GFP_KERNEL);
232 if (!sensor_settings)
233 return -ENOMEM;
234
155 sd->gspca_dev.cam.cam_mode = po1030_modes; 235 sd->gspca_dev.cam.cam_mode = po1030_modes;
156 sd->gspca_dev.cam.nmodes = ARRAY_SIZE(po1030_modes); 236 sd->gspca_dev.cam.nmodes = ARRAY_SIZE(po1030_modes);
157 sd->desc->ctrls = po1030_ctrls; 237 sd->desc->ctrls = po1030_ctrls;
158 sd->desc->nctrls = ARRAY_SIZE(po1030_ctrls); 238 sd->desc->nctrls = ARRAY_SIZE(po1030_ctrls);
239
240 for (i = 0; i < ARRAY_SIZE(po1030_ctrls); i++)
241 sensor_settings[i] = po1030_ctrls[i].qctrl.default_value;
242 sd->sensor_priv = sensor_settings;
243
159 return 0; 244 return 0;
160} 245}
161 246
162int po1030_init(struct sd *sd) 247int po1030_init(struct sd *sd)
163{ 248{
249 s32 *sensor_settings = sd->sensor_priv;
164 int i, err = 0; 250 int i, err = 0;
165 251
166 /* Init the sensor */ 252 /* Init the sensor */
@@ -185,47 +271,206 @@ int po1030_init(struct sd *sd)
185 return -EINVAL; 271 return -EINVAL;
186 } 272 }
187 } 273 }
274 if (err < 0)
275 return err;
188 276
189 if (dump_sensor) 277 if (dump_sensor)
190 po1030_dump_registers(sd); 278 po1030_dump_registers(sd);
191 279
280 err = po1030_set_exposure(&sd->gspca_dev,
281 sensor_settings[EXPOSURE_IDX]);
282 if (err < 0)
283 return err;
284
285 err = po1030_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]);
286 if (err < 0)
287 return err;
288
289 err = po1030_set_hflip(&sd->gspca_dev, sensor_settings[HFLIP_IDX]);
290 if (err < 0)
291 return err;
292
293 err = po1030_set_vflip(&sd->gspca_dev, sensor_settings[VFLIP_IDX]);
294 if (err < 0)
295 return err;
296
297 err = po1030_set_red_balance(&sd->gspca_dev,
298 sensor_settings[RED_BALANCE_IDX]);
299 if (err < 0)
300 return err;
301
302 err = po1030_set_blue_balance(&sd->gspca_dev,
303 sensor_settings[BLUE_BALANCE_IDX]);
304 if (err < 0)
305 return err;
306
307 err = po1030_set_green_balance(&sd->gspca_dev,
308 sensor_settings[GREEN_BALANCE_IDX]);
309 if (err < 0)
310 return err;
311
312 err = po1030_set_auto_white_balance(&sd->gspca_dev,
313 sensor_settings[AUTO_WHITE_BALANCE_IDX]);
314 if (err < 0)
315 return err;
316
317 err = po1030_set_auto_exposure(&sd->gspca_dev,
318 sensor_settings[AUTO_EXPOSURE_IDX]);
192 return err; 319 return err;
193} 320}
194 321
195int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val) 322int po1030_start(struct sd *sd)
196{ 323{
197 struct sd *sd = (struct sd *) gspca_dev; 324 struct cam *cam = &sd->gspca_dev.cam;
198 u8 i2c_data; 325 int i, err = 0;
199 int err; 326 int width = cam->cam_mode[sd->gspca_dev.curr_mode].width;
327 int height = cam->cam_mode[sd->gspca_dev.curr_mode].height;
328 int ver_offs = cam->cam_mode[sd->gspca_dev.curr_mode].priv;
329 u8 data;
330
331 switch (width) {
332 case 320:
333 data = PO1030_SUBSAMPLING;
334 err = m5602_write_sensor(sd, PO1030_CONTROL3, &data, 1);
335 if (err < 0)
336 return err;
337
338 data = ((width + 3) >> 8) & 0xff;
339 err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_H, &data, 1);
340 if (err < 0)
341 return err;
342
343 data = (width + 3) & 0xff;
344 err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_L, &data, 1);
345 if (err < 0)
346 return err;
347
348 data = ((height + 1) >> 8) & 0xff;
349 err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_H, &data, 1);
350 if (err < 0)
351 return err;
352
353 data = (height + 1) & 0xff;
354 err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_L, &data, 1);
355
356 height += 6;
357 width -= 1;
358 break;
359
360 case 640:
361 data = 0;
362 err = m5602_write_sensor(sd, PO1030_CONTROL3, &data, 1);
363 if (err < 0)
364 return err;
365
366 data = ((width + 7) >> 8) & 0xff;
367 err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_H, &data, 1);
368 if (err < 0)
369 return err;
370
371 data = (width + 7) & 0xff;
372 err = m5602_write_sensor(sd, PO1030_WINDOWWIDTH_L, &data, 1);
373 if (err < 0)
374 return err;
375
376 data = ((height + 3) >> 8) & 0xff;
377 err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_H, &data, 1);
378 if (err < 0)
379 return err;
380
381 data = (height + 3) & 0xff;
382 err = m5602_write_sensor(sd, PO1030_WINDOWHEIGHT_L, &data, 1);
383
384 height += 12;
385 width -= 2;
386 break;
387 }
388 err = m5602_write_bridge(sd, M5602_XB_SENSOR_TYPE, 0x0c);
389 if (err < 0)
390 return err;
200 391
201 err = m5602_read_sensor(sd, PO1030_REG_INTEGLINES_H, 392 err = m5602_write_bridge(sd, M5602_XB_LINE_OF_FRAME_H, 0x81);
202 &i2c_data, 1);
203 if (err < 0) 393 if (err < 0)
204 return err; 394 return err;
205 *val = (i2c_data << 8);
206 395
207 err = m5602_read_sensor(sd, PO1030_REG_INTEGLINES_M, 396 err = m5602_write_bridge(sd, M5602_XB_PIX_OF_LINE_H, 0x82);
208 &i2c_data, 1); 397 if (err < 0)
209 *val |= i2c_data; 398 return err;
210 399
211 PDEBUG(D_V4L2, "Exposure read as %d", *val); 400 err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0x01);
401 if (err < 0)
402 return err;
403
404 err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA,
405 ((ver_offs >> 8) & 0xff));
406 if (err < 0)
407 return err;
408
409 err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (ver_offs & 0xff));
410 if (err < 0)
411 return err;
412
413 for (i = 0; i < 2 && !err; i++)
414 err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0);
415 if (err < 0)
416 return err;
417
418 err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height >> 8) & 0xff);
419 if (err < 0)
420 return err;
421
422 err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height & 0xff));
423 if (err < 0)
424 return err;
425
426 for (i = 0; i < 2 && !err; i++)
427 err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0);
428
429 for (i = 0; i < 2 && !err; i++)
430 err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
431
432 for (i = 0; i < 2 && !err; i++)
433 err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, 0);
434 if (err < 0)
435 return err;
436
437 err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, (width >> 8) & 0xff);
438 if (err < 0)
439 return err;
212 440
441 err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, (width & 0xff));
442 if (err < 0)
443 return err;
444
445 err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
213 return err; 446 return err;
214} 447}
215 448
216int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val) 449static int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
217{ 450{
218 struct sd *sd = (struct sd *) gspca_dev; 451 struct sd *sd = (struct sd *) gspca_dev;
452 s32 *sensor_settings = sd->sensor_priv;
453
454 *val = sensor_settings[EXPOSURE_IDX];
455 PDEBUG(D_V4L2, "Exposure read as %d", *val);
456 return 0;
457}
458
459static int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
460{
461 struct sd *sd = (struct sd *) gspca_dev;
462 s32 *sensor_settings = sd->sensor_priv;
219 u8 i2c_data; 463 u8 i2c_data;
220 int err; 464 int err;
221 465
466 sensor_settings[EXPOSURE_IDX] = val;
222 PDEBUG(D_V4L2, "Set exposure to %d", val & 0xffff); 467 PDEBUG(D_V4L2, "Set exposure to %d", val & 0xffff);
223 468
224 i2c_data = ((val & 0xff00) >> 8); 469 i2c_data = ((val & 0xff00) >> 8);
225 PDEBUG(D_V4L2, "Set exposure to high byte to 0x%x", 470 PDEBUG(D_V4L2, "Set exposure to high byte to 0x%x",
226 i2c_data); 471 i2c_data);
227 472
228 err = m5602_write_sensor(sd, PO1030_REG_INTEGLINES_H, 473 err = m5602_write_sensor(sd, PO1030_INTEGLINES_H,
229 &i2c_data, 1); 474 &i2c_data, 1);
230 if (err < 0) 475 if (err < 0)
231 return err; 476 return err;
@@ -233,167 +478,256 @@ int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
233 i2c_data = (val & 0xff); 478 i2c_data = (val & 0xff);
234 PDEBUG(D_V4L2, "Set exposure to low byte to 0x%x", 479 PDEBUG(D_V4L2, "Set exposure to low byte to 0x%x",
235 i2c_data); 480 i2c_data);
236 err = m5602_write_sensor(sd, PO1030_REG_INTEGLINES_M, 481 err = m5602_write_sensor(sd, PO1030_INTEGLINES_M,
237 &i2c_data, 1); 482 &i2c_data, 1);
238 483
239 return err; 484 return err;
240} 485}
241 486
242int po1030_get_gain(struct gspca_dev *gspca_dev, __s32 *val) 487static int po1030_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
243{ 488{
244 struct sd *sd = (struct sd *) gspca_dev; 489 struct sd *sd = (struct sd *) gspca_dev;
245 u8 i2c_data; 490 s32 *sensor_settings = sd->sensor_priv;
246 int err;
247 491
248 err = m5602_read_sensor(sd, PO1030_REG_GLOBALGAIN, 492 *val = sensor_settings[GAIN_IDX];
249 &i2c_data, 1);
250 *val = i2c_data;
251 PDEBUG(D_V4L2, "Read global gain %d", *val); 493 PDEBUG(D_V4L2, "Read global gain %d", *val);
252 494 return 0;
253 return err;
254} 495}
255 496
256int po1030_get_hflip(struct gspca_dev *gspca_dev, __s32 *val) 497static int po1030_set_gain(struct gspca_dev *gspca_dev, __s32 val)
257{ 498{
258 struct sd *sd = (struct sd *) gspca_dev; 499 struct sd *sd = (struct sd *) gspca_dev;
500 s32 *sensor_settings = sd->sensor_priv;
259 u8 i2c_data; 501 u8 i2c_data;
260 int err; 502 int err;
261 503
262 err = m5602_read_sensor(sd, PO1030_REG_CONTROL2, 504 sensor_settings[GAIN_IDX] = val;
505
506 i2c_data = val & 0xff;
507 PDEBUG(D_V4L2, "Set global gain to %d", i2c_data);
508 err = m5602_write_sensor(sd, PO1030_GLOBALGAIN,
263 &i2c_data, 1); 509 &i2c_data, 1);
510 return err;
511}
264 512
265 *val = (i2c_data >> 7) & 0x01 ; 513static int po1030_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
514{
515 struct sd *sd = (struct sd *) gspca_dev;
516 s32 *sensor_settings = sd->sensor_priv;
266 517
518 *val = sensor_settings[HFLIP_IDX];
267 PDEBUG(D_V4L2, "Read hflip %d", *val); 519 PDEBUG(D_V4L2, "Read hflip %d", *val);
268 520
269 return err; 521 return 0;
270} 522}
271 523
272int po1030_set_hflip(struct gspca_dev *gspca_dev, __s32 val) 524static int po1030_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
273{ 525{
274 struct sd *sd = (struct sd *) gspca_dev; 526 struct sd *sd = (struct sd *) gspca_dev;
527 s32 *sensor_settings = sd->sensor_priv;
275 u8 i2c_data; 528 u8 i2c_data;
276 int err; 529 int err;
277 530
531 sensor_settings[HFLIP_IDX] = val;
532
278 PDEBUG(D_V4L2, "Set hflip %d", val); 533 PDEBUG(D_V4L2, "Set hflip %d", val);
279 err = m5602_read_sensor(sd, PO1030_REG_CONTROL2, &i2c_data, 1); 534 err = m5602_read_sensor(sd, PO1030_CONTROL2, &i2c_data, 1);
280 if (err < 0) 535 if (err < 0)
281 return err; 536 return err;
282 537
283 i2c_data = (0x7f & i2c_data) | ((val & 0x01) << 7); 538 i2c_data = (0x7f & i2c_data) | ((val & 0x01) << 7);
284 539
285 err = m5602_write_sensor(sd, PO1030_REG_CONTROL2, 540 err = m5602_write_sensor(sd, PO1030_CONTROL2,
286 &i2c_data, 1); 541 &i2c_data, 1);
287 542
288 return err; 543 return err;
289} 544}
290 545
291int po1030_get_vflip(struct gspca_dev *gspca_dev, __s32 *val) 546static int po1030_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
292{ 547{
293 struct sd *sd = (struct sd *) gspca_dev; 548 struct sd *sd = (struct sd *) gspca_dev;
294 u8 i2c_data; 549 s32 *sensor_settings = sd->sensor_priv;
295 int err;
296
297 err = m5602_read_sensor(sd, PO1030_REG_GLOBALGAIN,
298 &i2c_data, 1);
299
300 *val = (i2c_data >> 6) & 0x01;
301 550
551 *val = sensor_settings[VFLIP_IDX];
302 PDEBUG(D_V4L2, "Read vflip %d", *val); 552 PDEBUG(D_V4L2, "Read vflip %d", *val);
303 553
304 return err; 554 return 0;
305} 555}
306 556
307int po1030_set_vflip(struct gspca_dev *gspca_dev, __s32 val) 557static int po1030_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
308{ 558{
309 struct sd *sd = (struct sd *) gspca_dev; 559 struct sd *sd = (struct sd *) gspca_dev;
560 s32 *sensor_settings = sd->sensor_priv;
310 u8 i2c_data; 561 u8 i2c_data;
311 int err; 562 int err;
312 563
564 sensor_settings[VFLIP_IDX] = val;
565
313 PDEBUG(D_V4L2, "Set vflip %d", val); 566 PDEBUG(D_V4L2, "Set vflip %d", val);
314 err = m5602_read_sensor(sd, PO1030_REG_CONTROL2, &i2c_data, 1); 567 err = m5602_read_sensor(sd, PO1030_CONTROL2, &i2c_data, 1);
315 if (err < 0) 568 if (err < 0)
316 return err; 569 return err;
317 570
318 i2c_data = (i2c_data & 0xbf) | ((val & 0x01) << 6); 571 i2c_data = (i2c_data & 0xbf) | ((val & 0x01) << 6);
319 572
320 err = m5602_write_sensor(sd, PO1030_REG_CONTROL2, 573 err = m5602_write_sensor(sd, PO1030_CONTROL2,
321 &i2c_data, 1); 574 &i2c_data, 1);
322 575
323 return err; 576 return err;
324} 577}
325 578
326int po1030_set_gain(struct gspca_dev *gspca_dev, __s32 val) 579static int po1030_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val)
580{
581 struct sd *sd = (struct sd *) gspca_dev;
582 s32 *sensor_settings = sd->sensor_priv;
583
584 *val = sensor_settings[RED_BALANCE_IDX];
585 PDEBUG(D_V4L2, "Read red gain %d", *val);
586 return 0;
587}
588
589static int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
327{ 590{
328 struct sd *sd = (struct sd *) gspca_dev; 591 struct sd *sd = (struct sd *) gspca_dev;
592 s32 *sensor_settings = sd->sensor_priv;
329 u8 i2c_data; 593 u8 i2c_data;
330 int err; 594 int err;
331 595
596 sensor_settings[RED_BALANCE_IDX] = val;
597
332 i2c_data = val & 0xff; 598 i2c_data = val & 0xff;
333 PDEBUG(D_V4L2, "Set global gain to %d", i2c_data); 599 PDEBUG(D_V4L2, "Set red gain to %d", i2c_data);
334 err = m5602_write_sensor(sd, PO1030_REG_GLOBALGAIN, 600 err = m5602_write_sensor(sd, PO1030_RED_GAIN,
335 &i2c_data, 1); 601 &i2c_data, 1);
336 return err; 602 return err;
337} 603}
338 604
339int po1030_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val) 605static int po1030_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val)
340{ 606{
341 struct sd *sd = (struct sd *) gspca_dev; 607 struct sd *sd = (struct sd *) gspca_dev;
342 u8 i2c_data; 608 s32 *sensor_settings = sd->sensor_priv;
343 int err;
344 609
345 err = m5602_read_sensor(sd, PO1030_REG_RED_GAIN, 610 *val = sensor_settings[BLUE_BALANCE_IDX];
346 &i2c_data, 1); 611 PDEBUG(D_V4L2, "Read blue gain %d", *val);
347 *val = i2c_data; 612
348 PDEBUG(D_V4L2, "Read red gain %d", *val); 613 return 0;
349 return err;
350} 614}
351 615
352int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val) 616static int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
353{ 617{
354 struct sd *sd = (struct sd *) gspca_dev; 618 struct sd *sd = (struct sd *) gspca_dev;
619 s32 *sensor_settings = sd->sensor_priv;
355 u8 i2c_data; 620 u8 i2c_data;
356 int err; 621 int err;
357 622
623 sensor_settings[BLUE_BALANCE_IDX] = val;
624
358 i2c_data = val & 0xff; 625 i2c_data = val & 0xff;
359 PDEBUG(D_V4L2, "Set red gain to %d", i2c_data); 626 PDEBUG(D_V4L2, "Set blue gain to %d", i2c_data);
360 err = m5602_write_sensor(sd, PO1030_REG_RED_GAIN, 627 err = m5602_write_sensor(sd, PO1030_BLUE_GAIN,
361 &i2c_data, 1); 628 &i2c_data, 1);
629
362 return err; 630 return err;
363} 631}
364 632
365int po1030_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val) 633static int po1030_get_green_balance(struct gspca_dev *gspca_dev, __s32 *val)
366{ 634{
367 struct sd *sd = (struct sd *) gspca_dev; 635 struct sd *sd = (struct sd *) gspca_dev;
636 s32 *sensor_settings = sd->sensor_priv;
637
638 *val = sensor_settings[GREEN_BALANCE_IDX];
639 PDEBUG(D_V4L2, "Read green gain %d", *val);
640
641 return 0;
642}
643
644static int po1030_set_green_balance(struct gspca_dev *gspca_dev, __s32 val)
645{
646 struct sd *sd = (struct sd *) gspca_dev;
647 s32 *sensor_settings = sd->sensor_priv;
368 u8 i2c_data; 648 u8 i2c_data;
369 int err; 649 int err;
370 650
371 err = m5602_read_sensor(sd, PO1030_REG_BLUE_GAIN, 651 sensor_settings[GREEN_BALANCE_IDX] = val;
652 i2c_data = val & 0xff;
653 PDEBUG(D_V4L2, "Set green gain to %d", i2c_data);
654
655 err = m5602_write_sensor(sd, PO1030_GREEN_1_GAIN,
656 &i2c_data, 1);
657 if (err < 0)
658 return err;
659
660 return m5602_write_sensor(sd, PO1030_GREEN_2_GAIN,
372 &i2c_data, 1); 661 &i2c_data, 1);
373 *val = i2c_data; 662}
374 PDEBUG(D_V4L2, "Read blue gain %d", *val);
375 663
376 return err; 664static int po1030_get_auto_white_balance(struct gspca_dev *gspca_dev,
665 __s32 *val)
666{
667 struct sd *sd = (struct sd *) gspca_dev;
668 s32 *sensor_settings = sd->sensor_priv;
669
670 *val = sensor_settings[AUTO_WHITE_BALANCE_IDX];
671 PDEBUG(D_V4L2, "Auto white balancing is %d", *val);
672
673 return 0;
377} 674}
378 675
379int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val) 676static int po1030_set_auto_white_balance(struct gspca_dev *gspca_dev,
677 __s32 val)
380{ 678{
381 struct sd *sd = (struct sd *) gspca_dev; 679 struct sd *sd = (struct sd *) gspca_dev;
680 s32 *sensor_settings = sd->sensor_priv;
382 u8 i2c_data; 681 u8 i2c_data;
383 int err; 682 int err;
384 i2c_data = val & 0xff;
385 PDEBUG(D_V4L2, "Set blue gain to %d", i2c_data);
386 err = m5602_write_sensor(sd, PO1030_REG_BLUE_GAIN,
387 &i2c_data, 1);
388 683
684 sensor_settings[AUTO_WHITE_BALANCE_IDX] = val;
685
686 err = m5602_read_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1);
687 if (err < 0)
688 return err;
689
690 PDEBUG(D_V4L2, "Set auto white balance to %d", val);
691 i2c_data = (i2c_data & 0xfe) | (val & 0x01);
692 err = m5602_write_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1);
389 return err; 693 return err;
390} 694}
391 695
392int po1030_power_down(struct sd *sd) 696static int po1030_get_auto_exposure(struct gspca_dev *gspca_dev,
697 __s32 *val)
393{ 698{
699 struct sd *sd = (struct sd *) gspca_dev;
700 s32 *sensor_settings = sd->sensor_priv;
701
702 *val = sensor_settings[AUTO_EXPOSURE_IDX];
703 PDEBUG(D_V4L2, "Auto exposure is %d", *val);
394 return 0; 704 return 0;
395} 705}
396 706
707static int po1030_set_auto_exposure(struct gspca_dev *gspca_dev,
708 __s32 val)
709{
710 struct sd *sd = (struct sd *) gspca_dev;
711 s32 *sensor_settings = sd->sensor_priv;
712 u8 i2c_data;
713 int err;
714
715 sensor_settings[AUTO_EXPOSURE_IDX] = val;
716 err = m5602_read_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1);
717 if (err < 0)
718 return err;
719
720 PDEBUG(D_V4L2, "Set auto exposure to %d", val);
721 i2c_data = (i2c_data & 0xfd) | ((val & 0x01) << 1);
722 return m5602_write_sensor(sd, PO1030_AUTOCTRL1, &i2c_data, 1);
723}
724
725void po1030_disconnect(struct sd *sd)
726{
727 sd->sensor = NULL;
728 kfree(sd->sensor_priv);
729}
730
397static void po1030_dump_registers(struct sd *sd) 731static void po1030_dump_registers(struct sd *sd)
398{ 732{
399 int address; 733 int address;
diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.h b/drivers/media/video/gspca/m5602/m5602_po1030.h
index c10b12335818..1ea380b2bbe7 100644
--- a/drivers/media/video/gspca/m5602/m5602_po1030.h
+++ b/drivers/media/video/gspca/m5602/m5602_po1030.h
@@ -25,98 +25,123 @@
25 25
26/*****************************************************************************/ 26/*****************************************************************************/
27 27
28#define PO1030_REG_DEVID_H 0x00 28#define PO1030_DEVID_H 0x00
29#define PO1030_REG_DEVID_L 0x01 29#define PO1030_DEVID_L 0x01
30#define PO1030_REG_FRAMEWIDTH_H 0x04 30#define PO1030_FRAMEWIDTH_H 0x04
31#define PO1030_REG_FRAMEWIDTH_L 0x05 31#define PO1030_FRAMEWIDTH_L 0x05
32#define PO1030_REG_FRAMEHEIGHT_H 0x06 32#define PO1030_FRAMEHEIGHT_H 0x06
33#define PO1030_REG_FRAMEHEIGHT_L 0x07 33#define PO1030_FRAMEHEIGHT_L 0x07
34#define PO1030_REG_WINDOWX_H 0x08 34#define PO1030_WINDOWX_H 0x08
35#define PO1030_REG_WINDOWX_L 0x09 35#define PO1030_WINDOWX_L 0x09
36#define PO1030_REG_WINDOWY_H 0x0a 36#define PO1030_WINDOWY_H 0x0a
37#define PO1030_REG_WINDOWY_L 0x0b 37#define PO1030_WINDOWY_L 0x0b
38#define PO1030_REG_WINDOWWIDTH_H 0x0c 38#define PO1030_WINDOWWIDTH_H 0x0c
39#define PO1030_REG_WINDOWWIDTH_L 0x0d 39#define PO1030_WINDOWWIDTH_L 0x0d
40#define PO1030_REG_WINDOWHEIGHT_H 0x0e 40#define PO1030_WINDOWHEIGHT_H 0x0e
41#define PO1030_REG_WINDOWHEIGHT_L 0x0f 41#define PO1030_WINDOWHEIGHT_L 0x0f
42 42
43#define PO1030_REG_GLOBALIBIAS 0x12 43#define PO1030_GLOBALIBIAS 0x12
44#define PO1030_REG_PIXELIBIAS 0x13 44#define PO1030_PIXELIBIAS 0x13
45 45
46#define PO1030_REG_GLOBALGAIN 0x15 46#define PO1030_GLOBALGAIN 0x15
47#define PO1030_REG_RED_GAIN 0x16 47#define PO1030_RED_GAIN 0x16
48#define PO1030_REG_GREEN_1_GAIN 0x17 48#define PO1030_GREEN_1_GAIN 0x17
49#define PO1030_REG_BLUE_GAIN 0x18 49#define PO1030_BLUE_GAIN 0x18
50#define PO1030_REG_GREEN_2_GAIN 0x19 50#define PO1030_GREEN_2_GAIN 0x19
51 51
52#define PO1030_REG_INTEGLINES_H 0x1a 52#define PO1030_INTEGLINES_H 0x1a
53#define PO1030_REG_INTEGLINES_M 0x1b 53#define PO1030_INTEGLINES_M 0x1b
54#define PO1030_REG_INTEGLINES_L 0x1c 54#define PO1030_INTEGLINES_L 0x1c
55 55
56#define PO1030_REG_CONTROL1 0x1d 56#define PO1030_CONTROL1 0x1d
57#define PO1030_REG_CONTROL2 0x1e 57#define PO1030_CONTROL2 0x1e
58#define PO1030_REG_CONTROL3 0x1f 58#define PO1030_CONTROL3 0x1f
59#define PO1030_REG_CONTROL4 0x20 59#define PO1030_CONTROL4 0x20
60 60
61#define PO1030_REG_PERIOD50_H 0x23 61#define PO1030_PERIOD50_H 0x23
62#define PO1030_REG_PERIOD50_L 0x24 62#define PO1030_PERIOD50_L 0x24
63#define PO1030_REG_PERIOD60_H 0x25 63#define PO1030_PERIOD60_H 0x25
64#define PO1030_REG_PERIOD60_L 0x26 64#define PO1030_PERIOD60_L 0x26
65#define PO1030_REG_REGCLK167 0x27 65#define PO1030_REGCLK167 0x27
66#define PO1030_REG_DELTA50 0x28 66#define PO1030_FLICKER_DELTA50 0x28
67#define PO1030_REG_DELTA60 0x29 67#define PO1030_FLICKERDELTA60 0x29
68 68
69#define PO1030_REG_ADCOFFSET 0x2c 69#define PO1030_ADCOFFSET 0x2c
70 70
71/* Gamma Correction Coeffs */ 71/* Gamma Correction Coeffs */
72#define PO1030_REG_GC0 0x2d 72#define PO1030_GC0 0x2d
73#define PO1030_REG_GC1 0x2e 73#define PO1030_GC1 0x2e
74#define PO1030_REG_GC2 0x2f 74#define PO1030_GC2 0x2f
75#define PO1030_REG_GC3 0x30 75#define PO1030_GC3 0x30
76#define PO1030_REG_GC4 0x31 76#define PO1030_GC4 0x31
77#define PO1030_REG_GC5 0x32 77#define PO1030_GC5 0x32
78#define PO1030_REG_GC6 0x33 78#define PO1030_GC6 0x33
79#define PO1030_REG_GC7 0x34 79#define PO1030_GC7 0x34
80 80
81/* Color Transform Matrix */ 81/* Color Transform Matrix */
82#define PO1030_REG_CT0 0x35 82#define PO1030_CT0 0x35
83#define PO1030_REG_CT1 0x36 83#define PO1030_CT1 0x36
84#define PO1030_REG_CT2 0x37 84#define PO1030_CT2 0x37
85#define PO1030_REG_CT3 0x38 85#define PO1030_CT3 0x38
86#define PO1030_REG_CT4 0x39 86#define PO1030_CT4 0x39
87#define PO1030_REG_CT5 0x3a 87#define PO1030_CT5 0x3a
88#define PO1030_REG_CT6 0x3b 88#define PO1030_CT6 0x3b
89#define PO1030_REG_CT7 0x3c 89#define PO1030_CT7 0x3c
90#define PO1030_REG_CT8 0x3d 90#define PO1030_CT8 0x3d
91 91
92#define PO1030_REG_AUTOCTRL1 0x3e 92#define PO1030_AUTOCTRL1 0x3e
93#define PO1030_REG_AUTOCTRL2 0x3f 93#define PO1030_AUTOCTRL2 0x3f
94 94
95#define PO1030_REG_YTARGET 0x40 95#define PO1030_YTARGET 0x40
96#define PO1030_REG_GLOBALGAINMIN 0x41 96#define PO1030_GLOBALGAINMIN 0x41
97#define PO1030_REG_GLOBALGAINMAX 0x42 97#define PO1030_GLOBALGAINMAX 0x42
98
99#define PO1030_AWB_RED_TUNING 0x47
100#define PO1030_AWB_BLUE_TUNING 0x48
98 101
99/* Output format control */ 102/* Output format control */
100#define PO1030_REG_OUTFORMCTRL1 0x5a 103#define PO1030_OUTFORMCTRL1 0x5a
101#define PO1030_REG_OUTFORMCTRL2 0x5b 104#define PO1030_OUTFORMCTRL2 0x5b
102#define PO1030_REG_OUTFORMCTRL3 0x5c 105#define PO1030_OUTFORMCTRL3 0x5c
103#define PO1030_REG_OUTFORMCTRL4 0x5d 106#define PO1030_OUTFORMCTRL4 0x5d
104#define PO1030_REG_OUTFORMCTRL5 0x5e 107#define PO1030_OUTFORMCTRL5 0x5e
105 108
106/* Imaging coefficients */ 109#define PO1030_EDGE_ENH_OFF 0x5f
107#define PO1030_REG_YBRIGHT 0x73 110#define PO1030_EGA 0x60
108#define PO1030_REG_YCONTRAST 0x74
109#define PO1030_REG_YSATURATION 0x75
110 111
111#define PO1030_HFLIP (1 << 7) 112#define PO1030_Cb_U_GAIN 0x63
112#define PO1030_VFLIP (1 << 6) 113#define PO1030_Cr_V_GAIN 0x64
114
115#define PO1030_YCONTRAST 0x74
116#define PO1030_YSATURATION 0x75
117
118#define PO1030_HFLIP (1 << 7)
119#define PO1030_VFLIP (1 << 6)
120
121#define PO1030_HREF_ENABLE (1 << 6)
122
123#define PO1030_RAW_RGB_BAYER 0x4
124
125#define PO1030_FRAME_EQUAL (1 << 3)
126#define PO1030_AUTO_SUBSAMPLING (1 << 4)
127
128#define PO1030_WEIGHT_WIN_2X (1 << 3)
129
130#define PO1030_SHUTTER_MODE (1 << 6)
131#define PO1030_AUTO_SUBSAMPLING (1 << 4)
132#define PO1030_FRAME_EQUAL (1 << 3)
133
134#define PO1030_SENSOR_RESET (1 << 5)
135
136#define PO1030_SUBSAMPLING (1 << 6)
113 137
114/*****************************************************************************/ 138/*****************************************************************************/
115 139
116#define PO1030_GLOBAL_GAIN_DEFAULT 0x12 140#define PO1030_GLOBAL_GAIN_DEFAULT 0x12
117#define PO1030_EXPOSURE_DEFAULT 0x0085 141#define PO1030_EXPOSURE_DEFAULT 0x0085
118#define PO1030_BLUE_GAIN_DEFAULT 0x40 142#define PO1030_BLUE_GAIN_DEFAULT 0x36
119#define PO1030_RED_GAIN_DEFAULT 0x40 143#define PO1030_RED_GAIN_DEFAULT 0x36
144#define PO1030_GREEN_GAIN_DEFAULT 0x40
120 145
121/*****************************************************************************/ 146/*****************************************************************************/
122 147
@@ -126,20 +151,8 @@ extern int dump_sensor;
126 151
127int po1030_probe(struct sd *sd); 152int po1030_probe(struct sd *sd);
128int po1030_init(struct sd *sd); 153int po1030_init(struct sd *sd);
129int po1030_power_down(struct sd *sd); 154int po1030_start(struct sd *sd);
130 155void po1030_disconnect(struct sd *sd);
131int po1030_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
132int po1030_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
133int po1030_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
134int po1030_set_gain(struct gspca_dev *gspca_dev, __s32 val);
135int po1030_get_red_balance(struct gspca_dev *gspca_dev, __s32 *val);
136int po1030_set_red_balance(struct gspca_dev *gspca_dev, __s32 val);
137int po1030_get_blue_balance(struct gspca_dev *gspca_dev, __s32 *val);
138int po1030_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val);
139int po1030_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
140int po1030_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
141int po1030_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
142int po1030_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
143 156
144static const struct m5602_sensor po1030 = { 157static const struct m5602_sensor po1030 = {
145 .name = "PO1030", 158 .name = "PO1030",
@@ -149,7 +162,8 @@ static const struct m5602_sensor po1030 = {
149 162
150 .probe = po1030_probe, 163 .probe = po1030_probe,
151 .init = po1030_init, 164 .init = po1030_init,
152 .power_down = po1030_power_down, 165 .start = po1030_start,
166 .disconnect = po1030_disconnect,
153}; 167};
154 168
155static const unsigned char preinit_po1030[][3] = 169static const unsigned char preinit_po1030[][3] =
@@ -159,248 +173,103 @@ static const unsigned char preinit_po1030[][3] =
159 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00}, 173 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
160 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}, 174 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
161 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0}, 175 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
162 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d},
163 {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00}, 176 {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
164 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
165 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c}, 177 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
166
167 {SENSOR, PO1030_REG_AUTOCTRL2, 0x24},
168
169 {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
170 {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
171 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
172 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
173 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02},
174 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
175 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
176 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06},
177 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
178 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0}, 178 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
179 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
180 {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81},
181 {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82},
182 {BRIDGE, M5602_XB_SIG_INI, 0x01},
183 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
184 {BRIDGE, M5602_XB_VSYNC_PARA, 0x02},
185 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
186 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
187 {BRIDGE, M5602_XB_VSYNC_PARA, 0x01},
188 {BRIDGE, M5602_XB_VSYNC_PARA, 0xec},
189 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
190 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
191 {BRIDGE, M5602_XB_SIG_INI, 0x00},
192 {BRIDGE, M5602_XB_SIG_INI, 0x02},
193 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
194 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
195 {BRIDGE, M5602_XB_HSYNC_PARA, 0x02},
196 {BRIDGE, M5602_XB_HSYNC_PARA, 0x87},
197 {BRIDGE, M5602_XB_SIG_INI, 0x00},
198
199 {SENSOR, PO1030_REG_AUTOCTRL2, 0x24},
200
201 {BRIDGE, M5602_XB_GPIO_DIR, 0x05}, 179 {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
202 {BRIDGE, M5602_XB_GPIO_DAT, 0x04}, 180 {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
203 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06}, 181 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
204 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06}, 182 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
205 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02}, 183 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02},
184
185 {SENSOR, PO1030_AUTOCTRL2, PO1030_SENSOR_RESET | (1 << 2)},
186
206 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04}, 187 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
207 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}, 188 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
208 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00}, 189 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
209 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}, 190 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
210 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
211 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c}, 191 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
212 {BRIDGE, M5602_XB_GPIO_DIR, 0x05}, 192 {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
213 {BRIDGE, M5602_XB_GPIO_DAT, 0x00} 193 {BRIDGE, M5602_XB_GPIO_DAT, 0x00}
214}; 194};
215 195
216static const unsigned char init_po1030[][4] = 196static const unsigned char init_po1030[][3] =
217{ 197{
218 {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02}, 198 {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
219 {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0}, 199 {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
220 /*sequence 1*/
221 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00}, 200 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
222 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}, 201 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
223 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0}, 202 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
224 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d},
225
226 {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00}, 203 {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
227 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
228 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c}, 204 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
229 /*end of sequence 1*/
230
231 /*sequence 2 (same as stop sequence)*/
232 {SENSOR, PO1030_REG_AUTOCTRL2, 0x24},
233 {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
234 {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
235 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
236 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
237 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02},
238
239 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
240 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
241 /*end of sequence 2*/
242 205
243 /*sequence 5*/ 206 {SENSOR, PO1030_AUTOCTRL2, PO1030_SENSOR_RESET | (1 << 2)},
244 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06},
245 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
246 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
247 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
248 {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81},
249 {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82},
250 {BRIDGE, M5602_XB_SIG_INI, 0x01},
251 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
252 {BRIDGE, M5602_XB_VSYNC_PARA, 0x02},
253 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
254 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
255 {BRIDGE, M5602_XB_VSYNC_PARA, 0x01},
256 {BRIDGE, M5602_XB_VSYNC_PARA, 0xec},
257 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
258 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
259 {BRIDGE, M5602_XB_SIG_INI, 0x00},
260 {BRIDGE, M5602_XB_SIG_INI, 0x02},
261 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
262 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
263 {BRIDGE, M5602_XB_HSYNC_PARA, 0x02},
264 {BRIDGE, M5602_XB_HSYNC_PARA, 0x87},
265 {BRIDGE, M5602_XB_SIG_INI, 0x00},
266 /*end of sequence 5*/
267
268 /*sequence 2 stop */
269 {SENSOR, PO1030_REG_AUTOCTRL2, 0x24},
270 207
271 {BRIDGE, M5602_XB_GPIO_DIR, 0x05}, 208 {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
272 {BRIDGE, M5602_XB_GPIO_DAT, 0x04}, 209 {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
273 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06}, 210 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
211 {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
274 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06}, 212 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
275 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02}, 213 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02},
276 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04}, 214 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
277 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}, 215 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
278 /*end of sequence 2 stop */
279
280/* ---------------------------------
281 * end of init - begin of start
282 * --------------------------------- */
283
284 /*sequence 3*/
285 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
286 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
287 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
288 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
289 /*end of sequence 3*/
290 /*sequence 4*/
291 {BRIDGE, M5602_XB_GPIO_DIR, 0x05}, 216 {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
292 {BRIDGE, M5602_XB_GPIO_DAT, 0x00}, 217 {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
293 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
294 {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
295 218
296 {SENSOR, PO1030_REG_AUTOCTRL2, 0x04}, 219 {SENSOR, PO1030_AUTOCTRL2, 0x04},
220
221 {SENSOR, PO1030_OUTFORMCTRL2, PO1030_RAW_RGB_BAYER},
222 {SENSOR, PO1030_AUTOCTRL1, PO1030_WEIGHT_WIN_2X},
223
224 {SENSOR, PO1030_CONTROL2, 0x03},
225 {SENSOR, 0x21, 0x90},
226 {SENSOR, PO1030_YTARGET, 0x60},
227 {SENSOR, 0x59, 0x13},
228 {SENSOR, PO1030_OUTFORMCTRL1, PO1030_HREF_ENABLE},
229 {SENSOR, PO1030_EDGE_ENH_OFF, 0x00},
230 {SENSOR, PO1030_EGA, 0x80},
231 {SENSOR, 0x78, 0x14},
232 {SENSOR, 0x6f, 0x01},
233 {SENSOR, PO1030_GLOBALGAINMAX, 0x14},
234 {SENSOR, PO1030_Cb_U_GAIN, 0x38},
235 {SENSOR, PO1030_Cr_V_GAIN, 0x38},
236 {SENSOR, PO1030_CONTROL1, PO1030_SHUTTER_MODE |
237 PO1030_AUTO_SUBSAMPLING |
238 PO1030_FRAME_EQUAL},
239 {SENSOR, PO1030_GC0, 0x10},
240 {SENSOR, PO1030_GC1, 0x20},
241 {SENSOR, PO1030_GC2, 0x40},
242 {SENSOR, PO1030_GC3, 0x60},
243 {SENSOR, PO1030_GC4, 0x80},
244 {SENSOR, PO1030_GC5, 0xa0},
245 {SENSOR, PO1030_GC6, 0xc0},
246 {SENSOR, PO1030_GC7, 0xff},
297 247
298 /* Set the width to 751 */ 248 /* Set the width to 751 */
299 {SENSOR, PO1030_REG_FRAMEWIDTH_H, 0x02}, 249 {SENSOR, PO1030_FRAMEWIDTH_H, 0x02},
300 {SENSOR, PO1030_REG_FRAMEWIDTH_L, 0xef}, 250 {SENSOR, PO1030_FRAMEWIDTH_L, 0xef},
301 251
302 /* Set the height to 540 */ 252 /* Set the height to 540 */
303 {SENSOR, PO1030_REG_FRAMEHEIGHT_H, 0x02}, 253 {SENSOR, PO1030_FRAMEHEIGHT_H, 0x02},
304 {SENSOR, PO1030_REG_FRAMEHEIGHT_L, 0x1c}, 254 {SENSOR, PO1030_FRAMEHEIGHT_L, 0x1c},
305 255
306 /* Set the x window to 1 */ 256 /* Set the x window to 1 */
307 {SENSOR, PO1030_REG_WINDOWX_H, 0x00}, 257 {SENSOR, PO1030_WINDOWX_H, 0x00},
308 {SENSOR, PO1030_REG_WINDOWX_L, 0x01}, 258 {SENSOR, PO1030_WINDOWX_L, 0x01},
309 259
310 /* Set the y window to 1 */ 260 /* Set the y window to 1 */
311 {SENSOR, PO1030_REG_WINDOWY_H, 0x00}, 261 {SENSOR, PO1030_WINDOWY_H, 0x00},
312 {SENSOR, PO1030_REG_WINDOWY_L, 0x01}, 262 {SENSOR, PO1030_WINDOWY_L, 0x01},
313
314 {SENSOR, PO1030_REG_WINDOWWIDTH_H, 0x02},
315 {SENSOR, PO1030_REG_WINDOWWIDTH_L, 0x87},
316 {SENSOR, PO1030_REG_WINDOWHEIGHT_H, 0x01},
317 {SENSOR, PO1030_REG_WINDOWHEIGHT_L, 0xe3},
318
319 {SENSOR, PO1030_REG_OUTFORMCTRL2, 0x04},
320 {SENSOR, PO1030_REG_OUTFORMCTRL2, 0x04},
321 {SENSOR, PO1030_REG_AUTOCTRL1, 0x08},
322 {SENSOR, PO1030_REG_CONTROL2, 0x03},
323 {SENSOR, 0x21, 0x90},
324 {SENSOR, PO1030_REG_YTARGET, 0x60},
325 {SENSOR, 0x59, 0x13},
326 {SENSOR, PO1030_REG_OUTFORMCTRL1, 0x40},
327 {SENSOR, 0x5f, 0x00},
328 {SENSOR, 0x60, 0x80},
329 {SENSOR, 0x78, 0x14},
330 {SENSOR, 0x6f, 0x01},
331 {SENSOR, PO1030_REG_CONTROL1, 0x18},
332 {SENSOR, PO1030_REG_GLOBALGAINMAX, 0x14},
333 {SENSOR, 0x63, 0x38},
334 {SENSOR, 0x64, 0x38},
335 {SENSOR, PO1030_REG_CONTROL1, 0x58},
336 {SENSOR, PO1030_REG_RED_GAIN, 0x30},
337 {SENSOR, PO1030_REG_GREEN_1_GAIN, 0x30},
338 {SENSOR, PO1030_REG_BLUE_GAIN, 0x30},
339 {SENSOR, PO1030_REG_GREEN_2_GAIN, 0x30},
340 {SENSOR, PO1030_REG_GC0, 0x10},
341 {SENSOR, PO1030_REG_GC1, 0x20},
342 {SENSOR, PO1030_REG_GC2, 0x40},
343 {SENSOR, PO1030_REG_GC3, 0x60},
344 {SENSOR, PO1030_REG_GC4, 0x80},
345 {SENSOR, PO1030_REG_GC5, 0xa0},
346 {SENSOR, PO1030_REG_GC6, 0xc0},
347 {SENSOR, PO1030_REG_GC7, 0xff},
348 /*end of sequence 4*/
349 /*sequence 5*/
350 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06},
351 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
352 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
353 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
354 {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81},
355 {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82},
356 {BRIDGE, M5602_XB_SIG_INI, 0x01},
357 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
358 {BRIDGE, M5602_XB_VSYNC_PARA, 0x02},
359 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
360 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
361 {BRIDGE, M5602_XB_VSYNC_PARA, 0x01},
362 {BRIDGE, M5602_XB_VSYNC_PARA, 0xec},
363 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
364 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
365 {BRIDGE, M5602_XB_SIG_INI, 0x00},
366 {BRIDGE, M5602_XB_SIG_INI, 0x00},
367 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
368 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
369 {BRIDGE, M5602_XB_HSYNC_PARA, 0x02},
370 {BRIDGE, M5602_XB_HSYNC_PARA, 0x7e},
371 {BRIDGE, M5602_XB_SIG_INI, 0x00},
372 /*end of sequence 5*/
373
374 /*sequence 6*/
375 /* Changing 40 in f0 the image becomes green in bayer mode and red in
376 * rgb mode */
377 {SENSOR, PO1030_REG_RED_GAIN, PO1030_RED_GAIN_DEFAULT},
378 /* in changing 40 in f0 the image becomes green in bayer mode and red in
379 * rgb mode */
380 {SENSOR, PO1030_REG_BLUE_GAIN, PO1030_BLUE_GAIN_DEFAULT},
381 263
382 /* with a very low lighted environment increase the exposure but 264 /* with a very low lighted environment increase the exposure but
383 * decrease the FPS (Frame Per Second) */ 265 * decrease the FPS (Frame Per Second) */
384 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00}, 266 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
385 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0}, 267 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
386 268
387 /* Controls high exposure more than SENSOR_LOW_EXPOSURE, use only in 269 {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
388 * low lighted environment (f0 is more than ff ?)*/ 270 {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
389 {SENSOR, PO1030_REG_INTEGLINES_H, ((PO1030_EXPOSURE_DEFAULT >> 2) 271 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
390 & 0xff)}, 272 {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
391
392 /* Controls middle exposure, use only in high lighted environment */
393 {SENSOR, PO1030_REG_INTEGLINES_M, PO1030_EXPOSURE_DEFAULT & 0xff},
394
395 /* Controls clarity (not sure) */
396 {SENSOR, PO1030_REG_INTEGLINES_L, 0x00},
397 /* Controls gain (the image is more lighted) */
398 {SENSOR, PO1030_REG_GLOBALGAIN, PO1030_GLOBAL_GAIN_DEFAULT},
399
400 /* Sets the width */
401 {SENSOR, PO1030_REG_FRAMEWIDTH_H, 0x02},
402 {SENSOR, PO1030_REG_FRAMEWIDTH_L, 0xef}
403 /*end of sequence 6*/
404}; 273};
405 274
406#endif 275#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
index 4306d596056d..191bcd718979 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.c
@@ -18,6 +18,19 @@
18 18
19#include "m5602_s5k4aa.h" 19#include "m5602_s5k4aa.h"
20 20
21static int s5k4aa_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
22static int s5k4aa_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
23static int s5k4aa_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
24static int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
25static int s5k4aa_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
26static int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
27static int s5k4aa_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
28static int s5k4aa_set_gain(struct gspca_dev *gspca_dev, __s32 val);
29static int s5k4aa_get_noise(struct gspca_dev *gspca_dev, __s32 *val);
30static int s5k4aa_set_noise(struct gspca_dev *gspca_dev, __s32 val);
31static int s5k4aa_get_brightness(struct gspca_dev *gspca_dev, __s32 *val);
32static int s5k4aa_set_brightness(struct gspca_dev *gspca_dev, __s32 val);
33
21static 34static
22 const 35 const
23 struct dmi_system_id s5k4aa_vflip_dmi_table[] = { 36 struct dmi_system_id s5k4aa_vflip_dmi_table[] = {
@@ -46,6 +59,18 @@ static
46 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"), 59 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
47 DMI_MATCH(DMI_PRODUCT_NAME, "GX700/GX705/EX700") 60 DMI_MATCH(DMI_PRODUCT_NAME, "GX700/GX705/EX700")
48 } 61 }
62 }, {
63 .ident = "MSI L735",
64 .matches = {
65 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
66 DMI_MATCH(DMI_PRODUCT_NAME, "MS-1717X")
67 }
68 }, {
69 .ident = "Lenovo Y300",
70 .matches = {
71 DMI_MATCH(DMI_SYS_VENDOR, "L3000 Y300"),
72 DMI_MATCH(DMI_PRODUCT_NAME, "Y300")
73 }
49 }, 74 },
50 { } 75 { }
51}; 76};
@@ -61,10 +86,22 @@ static struct v4l2_pix_format s5k4aa_modes[] = {
61 .bytesperline = 640, 86 .bytesperline = 640,
62 .colorspace = V4L2_COLORSPACE_SRGB, 87 .colorspace = V4L2_COLORSPACE_SRGB,
63 .priv = 0 88 .priv = 0
89 },
90 {
91 1280,
92 1024,
93 V4L2_PIX_FMT_SBGGR8,
94 V4L2_FIELD_NONE,
95 .sizeimage =
96 1280 * 1024,
97 .bytesperline = 1280,
98 .colorspace = V4L2_COLORSPACE_SRGB,
99 .priv = 0
64 } 100 }
65}; 101};
66 102
67const static struct ctrl s5k4aa_ctrls[] = { 103static const struct ctrl s5k4aa_ctrls[] = {
104#define VFLIP_IDX 0
68 { 105 {
69 { 106 {
70 .id = V4L2_CID_VFLIP, 107 .id = V4L2_CID_VFLIP,
@@ -77,8 +114,9 @@ const static struct ctrl s5k4aa_ctrls[] = {
77 }, 114 },
78 .set = s5k4aa_set_vflip, 115 .set = s5k4aa_set_vflip,
79 .get = s5k4aa_get_vflip 116 .get = s5k4aa_get_vflip
80 117 },
81 }, { 118#define HFLIP_IDX 1
119 {
82 { 120 {
83 .id = V4L2_CID_HFLIP, 121 .id = V4L2_CID_HFLIP,
84 .type = V4L2_CTRL_TYPE_BOOLEAN, 122 .type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -90,8 +128,9 @@ const static struct ctrl s5k4aa_ctrls[] = {
90 }, 128 },
91 .set = s5k4aa_set_hflip, 129 .set = s5k4aa_set_hflip,
92 .get = s5k4aa_get_hflip 130 .get = s5k4aa_get_hflip
93 131 },
94 }, { 132#define GAIN_IDX 2
133 {
95 { 134 {
96 .id = V4L2_CID_GAIN, 135 .id = V4L2_CID_GAIN,
97 .type = V4L2_CTRL_TYPE_INTEGER, 136 .type = V4L2_CTRL_TYPE_INTEGER,
@@ -99,12 +138,14 @@ const static struct ctrl s5k4aa_ctrls[] = {
99 .minimum = 0, 138 .minimum = 0,
100 .maximum = 127, 139 .maximum = 127,
101 .step = 1, 140 .step = 1,
102 .default_value = 0xa0, 141 .default_value = S5K4AA_DEFAULT_GAIN,
103 .flags = V4L2_CTRL_FLAG_SLIDER 142 .flags = V4L2_CTRL_FLAG_SLIDER
104 }, 143 },
105 .set = s5k4aa_set_gain, 144 .set = s5k4aa_set_gain,
106 .get = s5k4aa_get_gain 145 .get = s5k4aa_get_gain
107 }, { 146 },
147#define EXPOSURE_IDX 3
148 {
108 { 149 {
109 .id = V4L2_CID_EXPOSURE, 150 .id = V4L2_CID_EXPOSURE,
110 .type = V4L2_CTRL_TYPE_INTEGER, 151 .type = V4L2_CTRL_TYPE_INTEGER,
@@ -117,7 +158,36 @@ const static struct ctrl s5k4aa_ctrls[] = {
117 }, 158 },
118 .set = s5k4aa_set_exposure, 159 .set = s5k4aa_set_exposure,
119 .get = s5k4aa_get_exposure 160 .get = s5k4aa_get_exposure
120 } 161 },
162#define NOISE_SUPP_IDX 4
163 {
164 {
165 .id = V4L2_CID_PRIVATE_BASE,
166 .type = V4L2_CTRL_TYPE_BOOLEAN,
167 .name = "Noise suppression (smoothing)",
168 .minimum = 0,
169 .maximum = 1,
170 .step = 1,
171 .default_value = 1,
172 },
173 .set = s5k4aa_set_noise,
174 .get = s5k4aa_get_noise
175 },
176#define BRIGHTNESS_IDX 5
177 {
178 {
179 .id = V4L2_CID_BRIGHTNESS,
180 .type = V4L2_CTRL_TYPE_INTEGER,
181 .name = "Brightness",
182 .minimum = 0,
183 .maximum = 0x1f,
184 .step = 1,
185 .default_value = S5K4AA_DEFAULT_BRIGHTNESS,
186 },
187 .set = s5k4aa_set_brightness,
188 .get = s5k4aa_get_brightness
189 },
190
121}; 191};
122 192
123static void s5k4aa_dump_registers(struct sd *sd); 193static void s5k4aa_dump_registers(struct sd *sd);
@@ -127,6 +197,7 @@ int s5k4aa_probe(struct sd *sd)
127 u8 prod_id[6] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 197 u8 prod_id[6] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
128 const u8 expected_prod_id[6] = {0x00, 0x10, 0x00, 0x4b, 0x33, 0x75}; 198 const u8 expected_prod_id[6] = {0x00, 0x10, 0x00, 0x4b, 0x33, 0x75};
129 int i, err = 0; 199 int i, err = 0;
200 s32 *sensor_settings;
130 201
131 if (force_sensor) { 202 if (force_sensor) {
132 if (force_sensor == S5K4AA_SENSOR) { 203 if (force_sensor == S5K4AA_SENSOR) {
@@ -185,10 +256,20 @@ int s5k4aa_probe(struct sd *sd)
185 info("Detected a s5k4aa sensor"); 256 info("Detected a s5k4aa sensor");
186 257
187sensor_found: 258sensor_found:
259 sensor_settings = kmalloc(
260 ARRAY_SIZE(s5k4aa_ctrls) * sizeof(s32), GFP_KERNEL);
261 if (!sensor_settings)
262 return -ENOMEM;
263
188 sd->gspca_dev.cam.cam_mode = s5k4aa_modes; 264 sd->gspca_dev.cam.cam_mode = s5k4aa_modes;
189 sd->gspca_dev.cam.nmodes = ARRAY_SIZE(s5k4aa_modes); 265 sd->gspca_dev.cam.nmodes = ARRAY_SIZE(s5k4aa_modes);
190 sd->desc->ctrls = s5k4aa_ctrls; 266 sd->desc->ctrls = s5k4aa_ctrls;
191 sd->desc->nctrls = ARRAY_SIZE(s5k4aa_ctrls); 267 sd->desc->nctrls = ARRAY_SIZE(s5k4aa_ctrls);
268
269 for (i = 0; i < ARRAY_SIZE(s5k4aa_ctrls); i++)
270 sensor_settings[i] = s5k4aa_ctrls[i].qctrl.default_value;
271 sd->sensor_priv = sensor_settings;
272
192 return 0; 273 return 0;
193} 274}
194 275
@@ -197,9 +278,45 @@ int s5k4aa_start(struct sd *sd)
197 int i, err = 0; 278 int i, err = 0;
198 u8 data[2]; 279 u8 data[2];
199 struct cam *cam = &sd->gspca_dev.cam; 280 struct cam *cam = &sd->gspca_dev.cam;
281 s32 *sensor_settings = sd->sensor_priv;
282
283 switch (cam->cam_mode[sd->gspca_dev.curr_mode].width) {
284 case 1280:
285 PDEBUG(D_V4L2, "Configuring camera for SXGA mode");
286
287 for (i = 0; i < ARRAY_SIZE(SXGA_s5k4aa); i++) {
288 switch (SXGA_s5k4aa[i][0]) {
289 case BRIDGE:
290 err = m5602_write_bridge(sd,
291 SXGA_s5k4aa[i][1],
292 SXGA_s5k4aa[i][2]);
293 break;
294
295 case SENSOR:
296 data[0] = SXGA_s5k4aa[i][2];
297 err = m5602_write_sensor(sd,
298 SXGA_s5k4aa[i][1],
299 data, 1);
300 break;
301
302 case SENSOR_LONG:
303 data[0] = SXGA_s5k4aa[i][2];
304 data[1] = SXGA_s5k4aa[i][3];
305 err = m5602_write_sensor(sd,
306 SXGA_s5k4aa[i][1],
307 data, 2);
308 break;
309
310 default:
311 err("Invalid stream command, exiting init");
312 return -EINVAL;
313 }
314 }
315 err = s5k4aa_set_noise(&sd->gspca_dev, 0);
316 if (err < 0)
317 return err;
318 break;
200 319
201 switch (cam->cam_mode[sd->gspca_dev.curr_mode].width)
202 {
203 case 640: 320 case 640:
204 PDEBUG(D_V4L2, "Configuring camera for VGA mode"); 321 PDEBUG(D_V4L2, "Configuring camera for VGA mode");
205 322
@@ -231,8 +348,37 @@ int s5k4aa_start(struct sd *sd)
231 return -EINVAL; 348 return -EINVAL;
232 } 349 }
233 } 350 }
351 err = s5k4aa_set_noise(&sd->gspca_dev, 1);
352 if (err < 0)
353 return err;
354 break;
234 } 355 }
235 return err; 356 if (err < 0)
357 return err;
358
359 err = s5k4aa_set_exposure(&sd->gspca_dev,
360 sensor_settings[EXPOSURE_IDX]);
361 if (err < 0)
362 return err;
363
364 err = s5k4aa_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]);
365 if (err < 0)
366 return err;
367
368 err = s5k4aa_set_brightness(&sd->gspca_dev,
369 sensor_settings[BRIGHTNESS_IDX]);
370 if (err < 0)
371 return err;
372
373 err = s5k4aa_set_noise(&sd->gspca_dev, sensor_settings[NOISE_SUPP_IDX]);
374 if (err < 0)
375 return err;
376
377 err = s5k4aa_set_vflip(&sd->gspca_dev, sensor_settings[VFLIP_IDX]);
378 if (err < 0)
379 return err;
380
381 return s5k4aa_set_hflip(&sd->gspca_dev, sensor_settings[HFLIP_IDX]);
236} 382}
237 383
238int s5k4aa_init(struct sd *sd) 384int s5k4aa_init(struct sd *sd)
@@ -270,62 +416,28 @@ int s5k4aa_init(struct sd *sd)
270 if (dump_sensor) 416 if (dump_sensor)
271 s5k4aa_dump_registers(sd); 417 s5k4aa_dump_registers(sd);
272 418
273 if (!err && dmi_check_system(s5k4aa_vflip_dmi_table)) { 419 return err;
274 u8 data = 0x02;
275 info("vertical flip quirk active");
276 m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
277 m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1);
278 data |= S5K4AA_RM_V_FLIP;
279 data &= ~S5K4AA_RM_H_FLIP;
280 m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
281
282 /* Decrement COLSTART to preserve color order (BGGR) */
283 m5602_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
284 data--;
285 m5602_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
286
287 /* Increment ROWSTART to preserve color order (BGGR) */
288 m5602_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
289 data++;
290 m5602_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
291 }
292
293 return (err < 0) ? err : 0;
294}
295
296int s5k4aa_power_down(struct sd *sd)
297{
298 return 0;
299} 420}
300 421
301int s5k4aa_get_exposure(struct gspca_dev *gspca_dev, __s32 *val) 422static int s5k4aa_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
302{ 423{
303 struct sd *sd = (struct sd *) gspca_dev; 424 struct sd *sd = (struct sd *) gspca_dev;
304 u8 data = S5K4AA_PAGE_MAP_2; 425 s32 *sensor_settings = sd->sensor_priv;
305 int err;
306
307 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
308 if (err < 0)
309 return err;
310 426
311 err = m5602_read_sensor(sd, S5K4AA_EXPOSURE_HI, &data, 1); 427 *val = sensor_settings[EXPOSURE_IDX];
312 if (err < 0)
313 return err;
314
315 *val = data << 8;
316 err = m5602_read_sensor(sd, S5K4AA_EXPOSURE_LO, &data, 1);
317 *val |= data;
318 PDEBUG(D_V4L2, "Read exposure %d", *val); 428 PDEBUG(D_V4L2, "Read exposure %d", *val);
319 429
320 return err; 430 return 0;
321} 431}
322 432
323int s5k4aa_set_exposure(struct gspca_dev *gspca_dev, __s32 val) 433static int s5k4aa_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
324{ 434{
325 struct sd *sd = (struct sd *) gspca_dev; 435 struct sd *sd = (struct sd *) gspca_dev;
436 s32 *sensor_settings = sd->sensor_priv;
326 u8 data = S5K4AA_PAGE_MAP_2; 437 u8 data = S5K4AA_PAGE_MAP_2;
327 int err; 438 int err;
328 439
440 sensor_settings[EXPOSURE_IDX] = val;
329 PDEBUG(D_V4L2, "Set exposure to %d", val); 441 PDEBUG(D_V4L2, "Set exposure to %d", val);
330 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 442 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
331 if (err < 0) 443 if (err < 0)
@@ -340,29 +452,26 @@ int s5k4aa_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
340 return err; 452 return err;
341} 453}
342 454
343int s5k4aa_get_vflip(struct gspca_dev *gspca_dev, __s32 *val) 455static int s5k4aa_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
344{ 456{
345 struct sd *sd = (struct sd *) gspca_dev; 457 struct sd *sd = (struct sd *) gspca_dev;
346 u8 data = S5K4AA_PAGE_MAP_2; 458 s32 *sensor_settings = sd->sensor_priv;
347 int err;
348
349 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
350 if (err < 0)
351 return err;
352 459
353 err = m5602_read_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 460 *val = sensor_settings[VFLIP_IDX];
354 *val = (data & S5K4AA_RM_V_FLIP) >> 7;
355 PDEBUG(D_V4L2, "Read vertical flip %d", *val); 461 PDEBUG(D_V4L2, "Read vertical flip %d", *val);
356 462
357 return err; 463 return 0;
358} 464}
359 465
360int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val) 466static int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
361{ 467{
362 struct sd *sd = (struct sd *) gspca_dev; 468 struct sd *sd = (struct sd *) gspca_dev;
469 s32 *sensor_settings = sd->sensor_priv;
363 u8 data = S5K4AA_PAGE_MAP_2; 470 u8 data = S5K4AA_PAGE_MAP_2;
364 int err; 471 int err;
365 472
473 sensor_settings[VFLIP_IDX] = val;
474
366 PDEBUG(D_V4L2, "Set vertical flip to %d", val); 475 PDEBUG(D_V4L2, "Set vertical flip to %d", val);
367 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 476 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
368 if (err < 0) 477 if (err < 0)
@@ -370,56 +479,48 @@ int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
370 err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1); 479 err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
371 if (err < 0) 480 if (err < 0)
372 return err; 481 return err;
373 data = ((data & ~S5K4AA_RM_V_FLIP) 482
374 | ((val & 0x01) << 7)); 483 err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1);
375 err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
376 if (err < 0) 484 if (err < 0)
377 return err; 485 return err;
378 486
379 if (val) { 487 if (dmi_check_system(s5k4aa_vflip_dmi_table))
380 err = m5602_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1); 488 val = !val;
381 if (err < 0)
382 return err;
383
384 data++;
385 err = m5602_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
386 } else {
387 err = m5602_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
388 if (err < 0)
389 return err;
390 489
391 data--; 490 data = ((data & ~S5K4AA_RM_V_FLIP) | ((val & 0x01) << 7));
392 err = m5602_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1); 491 err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
393 } 492 if (err < 0)
493 return err;
394 494
495 err = m5602_read_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
496 if (err < 0)
497 return err;
498 data = (data & 0xfe) | !val;
499 err = m5602_write_sensor(sd, S5K4AA_ROWSTART_LO, &data, 1);
395 return err; 500 return err;
396} 501}
397 502
398int s5k4aa_get_hflip(struct gspca_dev *gspca_dev, __s32 *val) 503static int s5k4aa_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
399{ 504{
400 struct sd *sd = (struct sd *) gspca_dev; 505 struct sd *sd = (struct sd *) gspca_dev;
401 u8 data = S5K4AA_PAGE_MAP_2; 506 s32 *sensor_settings = sd->sensor_priv;
402 int err;
403
404 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
405 if (err < 0)
406 return err;
407 507
408 err = m5602_read_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 508 *val = sensor_settings[HFLIP_IDX];
409 *val = (data & S5K4AA_RM_H_FLIP) >> 6;
410 PDEBUG(D_V4L2, "Read horizontal flip %d", *val); 509 PDEBUG(D_V4L2, "Read horizontal flip %d", *val);
411 510
412 return err; 511 return 0;
413} 512}
414 513
415int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val) 514static int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
416{ 515{
417 struct sd *sd = (struct sd *) gspca_dev; 516 struct sd *sd = (struct sd *) gspca_dev;
517 s32 *sensor_settings = sd->sensor_priv;
418 u8 data = S5K4AA_PAGE_MAP_2; 518 u8 data = S5K4AA_PAGE_MAP_2;
419 int err; 519 int err;
420 520
421 PDEBUG(D_V4L2, "Set horizontal flip to %d", 521 sensor_settings[HFLIP_IDX] = val;
422 val); 522
523 PDEBUG(D_V4L2, "Set horizontal flip to %d", val);
423 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 524 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
424 if (err < 0) 525 if (err < 0)
425 return err; 526 return err;
@@ -427,62 +528,116 @@ int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
427 if (err < 0) 528 if (err < 0)
428 return err; 529 return err;
429 530
531 err = m5602_read_sensor(sd, S5K4AA_READ_MODE, &data, 1);
532 if (err < 0)
533 return err;
534
535 if (dmi_check_system(s5k4aa_vflip_dmi_table))
536 val = !val;
537
430 data = ((data & ~S5K4AA_RM_H_FLIP) | ((val & 0x01) << 6)); 538 data = ((data & ~S5K4AA_RM_H_FLIP) | ((val & 0x01) << 6));
431 err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1); 539 err = m5602_write_sensor(sd, S5K4AA_READ_MODE, &data, 1);
432 if (err < 0) 540 if (err < 0)
433 return err; 541 return err;
434 542
435 if (val) { 543 err = m5602_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
436 err = m5602_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1); 544 if (err < 0)
437 if (err < 0) 545 return err;
438 return err; 546 data = (data & 0xfe) | !val;
439 data++; 547 err = m5602_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
440 err = m5602_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
441 if (err < 0)
442 return err;
443 } else {
444 err = m5602_read_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
445 if (err < 0)
446 return err;
447 data--;
448 err = m5602_write_sensor(sd, S5K4AA_COLSTART_LO, &data, 1);
449 }
450
451 return err; 548 return err;
452} 549}
453 550
454int s5k4aa_get_gain(struct gspca_dev *gspca_dev, __s32 *val) 551static int s5k4aa_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
552{
553 struct sd *sd = (struct sd *) gspca_dev;
554 s32 *sensor_settings = sd->sensor_priv;
555
556 *val = sensor_settings[GAIN_IDX];
557 PDEBUG(D_V4L2, "Read gain %d", *val);
558 return 0;
559}
560
561static int s5k4aa_set_gain(struct gspca_dev *gspca_dev, __s32 val)
455{ 562{
456 struct sd *sd = (struct sd *) gspca_dev; 563 struct sd *sd = (struct sd *) gspca_dev;
564 s32 *sensor_settings = sd->sensor_priv;
457 u8 data = S5K4AA_PAGE_MAP_2; 565 u8 data = S5K4AA_PAGE_MAP_2;
458 int err; 566 int err;
459 567
568 sensor_settings[GAIN_IDX] = val;
569
570 PDEBUG(D_V4L2, "Set gain to %d", val);
460 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 571 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
461 if (err < 0) 572 if (err < 0)
462 return err; 573 return err;
463 574
464 err = m5602_read_sensor(sd, S5K4AA_GAIN_2, &data, 1); 575 data = val & 0xff;
465 *val = data; 576 err = m5602_write_sensor(sd, S5K4AA_GAIN, &data, 1);
466 PDEBUG(D_V4L2, "Read gain %d", *val);
467 577
468 return err; 578 return err;
469} 579}
470 580
471int s5k4aa_set_gain(struct gspca_dev *gspca_dev, __s32 val) 581static int s5k4aa_get_brightness(struct gspca_dev *gspca_dev, __s32 *val)
582{
583 struct sd *sd = (struct sd *) gspca_dev;
584 s32 *sensor_settings = sd->sensor_priv;
585
586 *val = sensor_settings[BRIGHTNESS_IDX];
587 PDEBUG(D_V4L2, "Read brightness %d", *val);
588 return 0;
589}
590
591static int s5k4aa_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
472{ 592{
473 struct sd *sd = (struct sd *) gspca_dev; 593 struct sd *sd = (struct sd *) gspca_dev;
594 s32 *sensor_settings = sd->sensor_priv;
474 u8 data = S5K4AA_PAGE_MAP_2; 595 u8 data = S5K4AA_PAGE_MAP_2;
475 int err; 596 int err;
476 597
477 PDEBUG(D_V4L2, "Set gain to %d", val); 598 sensor_settings[BRIGHTNESS_IDX] = val;
599
600 PDEBUG(D_V4L2, "Set brightness to %d", val);
478 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1); 601 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
479 if (err < 0) 602 if (err < 0)
480 return err; 603 return err;
481 604
482 data = val & 0xff; 605 data = val & 0xff;
483 err = m5602_write_sensor(sd, S5K4AA_GAIN_2, &data, 1); 606 return m5602_write_sensor(sd, S5K4AA_BRIGHTNESS, &data, 1);
607}
484 608
485 return err; 609static int s5k4aa_get_noise(struct gspca_dev *gspca_dev, __s32 *val)
610{
611 struct sd *sd = (struct sd *) gspca_dev;
612 s32 *sensor_settings = sd->sensor_priv;
613
614 *val = sensor_settings[NOISE_SUPP_IDX];
615 PDEBUG(D_V4L2, "Read noise %d", *val);
616 return 0;
617}
618
619static int s5k4aa_set_noise(struct gspca_dev *gspca_dev, __s32 val)
620{
621 struct sd *sd = (struct sd *) gspca_dev;
622 s32 *sensor_settings = sd->sensor_priv;
623 u8 data = S5K4AA_PAGE_MAP_2;
624 int err;
625
626 sensor_settings[NOISE_SUPP_IDX] = val;
627
628 PDEBUG(D_V4L2, "Set noise to %d", val);
629 err = m5602_write_sensor(sd, S5K4AA_PAGE_MAP, &data, 1);
630 if (err < 0)
631 return err;
632
633 data = val & 0x01;
634 return m5602_write_sensor(sd, S5K4AA_NOISE_SUPP, &data, 1);
635}
636
637void s5k4aa_disconnect(struct sd *sd)
638{
639 sd->sensor = NULL;
640 kfree(sd->sensor_priv);
486} 641}
487 642
488static void s5k4aa_dump_registers(struct sd *sd) 643static void s5k4aa_dump_registers(struct sd *sd)
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
index ca854d4f9475..4440da4e7f0f 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
@@ -47,8 +47,9 @@
47#define S5K4AA_H_BLANK_LO__ 0x1e 47#define S5K4AA_H_BLANK_LO__ 0x1e
48#define S5K4AA_EXPOSURE_HI 0x17 48#define S5K4AA_EXPOSURE_HI 0x17
49#define S5K4AA_EXPOSURE_LO 0x18 49#define S5K4AA_EXPOSURE_LO 0x18
50#define S5K4AA_GAIN_1 0x1f /* (digital?) gain : 5 bits */ 50#define S5K4AA_BRIGHTNESS 0x1f /* (digital?) gain : 5 bits */
51#define S5K4AA_GAIN_2 0x20 /* (analogue?) gain : 7 bits */ 51#define S5K4AA_GAIN 0x20 /* (analogue?) gain : 7 bits */
52#define S5K4AA_NOISE_SUPP 0x37
52 53
53#define S5K4AA_RM_ROW_SKIP_4X 0x08 54#define S5K4AA_RM_ROW_SKIP_4X 0x08
54#define S5K4AA_RM_ROW_SKIP_2X 0x04 55#define S5K4AA_RM_ROW_SKIP_2X 0x04
@@ -57,6 +58,9 @@
57#define S5K4AA_RM_H_FLIP 0x40 58#define S5K4AA_RM_H_FLIP 0x40
58#define S5K4AA_RM_V_FLIP 0x80 59#define S5K4AA_RM_V_FLIP 0x80
59 60
61#define S5K4AA_DEFAULT_GAIN 0x5f
62#define S5K4AA_DEFAULT_BRIGHTNESS 0x10
63
60/*****************************************************************************/ 64/*****************************************************************************/
61 65
62/* Kernel module parameters */ 66/* Kernel module parameters */
@@ -66,25 +70,17 @@ extern int dump_sensor;
66int s5k4aa_probe(struct sd *sd); 70int s5k4aa_probe(struct sd *sd);
67int s5k4aa_init(struct sd *sd); 71int s5k4aa_init(struct sd *sd);
68int s5k4aa_start(struct sd *sd); 72int s5k4aa_start(struct sd *sd);
69int s5k4aa_power_down(struct sd *sd); 73void s5k4aa_disconnect(struct sd *sd);
70
71int s5k4aa_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
72int s5k4aa_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
73int s5k4aa_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
74int s5k4aa_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
75int s5k4aa_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
76int s5k4aa_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
77int s5k4aa_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
78int s5k4aa_set_gain(struct gspca_dev *gspca_dev, __s32 val);
79 74
80static const struct m5602_sensor s5k4aa = { 75static const struct m5602_sensor s5k4aa = {
81 .name = "S5K4AA", 76 .name = "S5K4AA",
77 .i2c_slave_id = 0x5a,
78 .i2c_regW = 2,
79
82 .probe = s5k4aa_probe, 80 .probe = s5k4aa_probe,
83 .init = s5k4aa_init, 81 .init = s5k4aa_init,
84 .start = s5k4aa_start, 82 .start = s5k4aa_start,
85 .power_down = s5k4aa_power_down, 83 .disconnect = s5k4aa_disconnect,
86 .i2c_slave_id = 0x5a,
87 .i2c_regW = 2,
88}; 84};
89 85
90static const unsigned char preinit_s5k4aa[][4] = 86static const unsigned char preinit_s5k4aa[][4] =
@@ -179,30 +175,12 @@ static const unsigned char init_s5k4aa[][4] =
179 {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00}, 175 {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
180 {SENSOR, 0x0c, 0x05, 0x00}, 176 {SENSOR, 0x0c, 0x05, 0x00},
181 {SENSOR, 0x02, 0x0e, 0x00}, 177 {SENSOR, 0x02, 0x0e, 0x00},
182 {SENSOR, S5K4AA_GAIN_1, 0x0f, 0x00},
183 {SENSOR, S5K4AA_GAIN_2, 0x00, 0x00},
184 {SENSOR, S5K4AA_GLOBAL_GAIN__, 0x01, 0x00},
185 {SENSOR, 0x11, 0x00, 0x00},
186 {SENSOR, 0x12, 0x00, 0x00},
187 {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
188 {SENSOR, S5K4AA_READ_MODE, 0xa0, 0x00}, 178 {SENSOR, S5K4AA_READ_MODE, 0xa0, 0x00},
189 {SENSOR, 0x37, 0x00, 0x00}, 179 {SENSOR, 0x37, 0x00, 0x00},
190 {SENSOR, S5K4AA_ROWSTART_HI, 0x00, 0x00}, 180};
191 {SENSOR, S5K4AA_ROWSTART_LO, 0x2a, 0x00},
192 {SENSOR, S5K4AA_COLSTART_HI, 0x00, 0x00},
193 {SENSOR, S5K4AA_COLSTART_LO, 0x0b, 0x00},
194 {SENSOR, S5K4AA_WINDOW_HEIGHT_HI, 0x03, 0x00},
195 {SENSOR, S5K4AA_WINDOW_HEIGHT_LO, 0xc4, 0x00},
196 {SENSOR, S5K4AA_WINDOW_WIDTH_HI, 0x05, 0x00},
197 {SENSOR, S5K4AA_WINDOW_WIDTH_LO, 0x08, 0x00},
198 {SENSOR, S5K4AA_H_BLANK_HI__, 0x00, 0x00},
199 {SENSOR, S5K4AA_H_BLANK_LO__, 0x48, 0x00},
200 {SENSOR, S5K4AA_EXPOSURE_HI, 0x00, 0x00},
201 {SENSOR, S5K4AA_EXPOSURE_LO, 0x43, 0x00},
202 {SENSOR, 0x11, 0x04, 0x00},
203 {SENSOR, 0x12, 0xc3, 0x00},
204 {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
205 181
182static const unsigned char VGA_s5k4aa[][4] =
183{
206 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00}, 184 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
207 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, 185 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
208 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00}, 186 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
@@ -238,7 +216,7 @@ static const unsigned char init_s5k4aa[][4] =
238 {SENSOR, 0x37, 0x01, 0x00}, 216 {SENSOR, 0x37, 0x01, 0x00},
239 /* ROWSTART_HI, ROWSTART_LO : 10 + (1024-960)/2 = 42 = 0x002a */ 217 /* ROWSTART_HI, ROWSTART_LO : 10 + (1024-960)/2 = 42 = 0x002a */
240 {SENSOR, S5K4AA_ROWSTART_HI, 0x00, 0x00}, 218 {SENSOR, S5K4AA_ROWSTART_HI, 0x00, 0x00},
241 {SENSOR, S5K4AA_ROWSTART_LO, 0x2a, 0x00}, 219 {SENSOR, S5K4AA_ROWSTART_LO, 0x29, 0x00},
242 {SENSOR, S5K4AA_COLSTART_HI, 0x00, 0x00}, 220 {SENSOR, S5K4AA_COLSTART_HI, 0x00, 0x00},
243 {SENSOR, S5K4AA_COLSTART_LO, 0x0c, 0x00}, 221 {SENSOR, S5K4AA_COLSTART_LO, 0x0c, 0x00},
244 /* window_height_hi, window_height_lo : 960 = 0x03c0 */ 222 /* window_height_hi, window_height_lo : 960 = 0x03c0 */
@@ -255,12 +233,9 @@ static const unsigned char init_s5k4aa[][4] =
255 {SENSOR, 0x12, 0xc3, 0x00}, 233 {SENSOR, 0x12, 0xc3, 0x00},
256 {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00}, 234 {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
257 {SENSOR, 0x02, 0x0e, 0x00}, 235 {SENSOR, 0x02, 0x0e, 0x00},
258 {SENSOR_LONG, S5K4AA_GLOBAL_GAIN__, 0x0f, 0x00},
259 {SENSOR, S5K4AA_GAIN_1, 0x0b, 0x00},
260 {SENSOR, S5K4AA_GAIN_2, 0xa0, 0x00}
261}; 236};
262 237
263static const unsigned char VGA_s5k4aa[][4] = 238static const unsigned char SXGA_s5k4aa[][4] =
264{ 239{
265 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00}, 240 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
266 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, 241 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
@@ -273,50 +248,42 @@ static const unsigned char VGA_s5k4aa[][4] =
273 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, 248 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
274 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, 249 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
275 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, 250 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
276 /* VSYNC_PARA, VSYNC_PARA : img height 480 = 0x01e0 */ 251 /* VSYNC_PARA, VSYNC_PARA : img height 1024 = 0x0400 */
277 {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00}, 252 {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
278 {BRIDGE, M5602_XB_VSYNC_PARA, 0xe0, 0x00}, 253 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
279 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, 254 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
280 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, 255 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
281 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00}, 256 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
282 {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00}, 257 {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
283 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00}, 258 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
284 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00}, 259 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
285 /* HSYNC_PARA, HSYNC_PARA : img width 640 = 0x0280 */ 260 /* HSYNC_PARA, HSYNC_PARA : img width 1280 = 0x0500 */
286 {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00}, 261 {BRIDGE, M5602_XB_HSYNC_PARA, 0x05, 0x00},
287 {BRIDGE, M5602_XB_HSYNC_PARA, 0x80, 0x00}, 262 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
288 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00}, 263 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
289 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, 264 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
290 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xa0, 0x00}, /* 48 MHz */ 265 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xa0, 0x00}, /* 48 MHz */
291 266
292 {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00}, 267 {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
293 {SENSOR, S5K4AA_READ_MODE, S5K4AA_RM_H_FLIP | S5K4AA_RM_ROW_SKIP_2X 268 {SENSOR, S5K4AA_READ_MODE, S5K4AA_RM_H_FLIP, 0x00},
294 | S5K4AA_RM_COL_SKIP_2X, 0x00},
295 /* 0x37 : Fix image stability when light is too bright and improves
296 * image quality in 640x480, but worsens it in 1280x1024 */
297 {SENSOR, 0x37, 0x01, 0x00}, 269 {SENSOR, 0x37, 0x01, 0x00},
298 /* ROWSTART_HI, ROWSTART_LO : 10 + (1024-960)/2 = 42 = 0x002a */
299 {SENSOR, S5K4AA_ROWSTART_HI, 0x00, 0x00}, 270 {SENSOR, S5K4AA_ROWSTART_HI, 0x00, 0x00},
300 {SENSOR, S5K4AA_ROWSTART_LO, 0x2a, 0x00}, 271 {SENSOR, S5K4AA_ROWSTART_LO, 0x09, 0x00},
301 {SENSOR, S5K4AA_COLSTART_HI, 0x00, 0x00}, 272 {SENSOR, S5K4AA_COLSTART_HI, 0x00, 0x00},
302 {SENSOR, S5K4AA_COLSTART_LO, 0x0c, 0x00}, 273 {SENSOR, S5K4AA_COLSTART_LO, 0x0a, 0x00},
303 /* window_height_hi, window_height_lo : 960 = 0x03c0 */ 274 {SENSOR, S5K4AA_WINDOW_HEIGHT_HI, 0x04, 0x00},
304 {SENSOR, S5K4AA_WINDOW_HEIGHT_HI, 0x03, 0x00}, 275 {SENSOR, S5K4AA_WINDOW_HEIGHT_LO, 0x00, 0x00},
305 {SENSOR, S5K4AA_WINDOW_HEIGHT_LO, 0xc0, 0x00},
306 /* window_width_hi, window_width_lo : 1280 = 0x0500 */
307 {SENSOR, S5K4AA_WINDOW_WIDTH_HI, 0x05, 0x00}, 276 {SENSOR, S5K4AA_WINDOW_WIDTH_HI, 0x05, 0x00},
308 {SENSOR, S5K4AA_WINDOW_WIDTH_LO, 0x00, 0x00}, 277 {SENSOR, S5K4AA_WINDOW_WIDTH_LO, 0x00, 0x00},
309 {SENSOR, S5K4AA_H_BLANK_HI__, 0x00, 0x00}, 278 {SENSOR, S5K4AA_H_BLANK_HI__, 0x01, 0x00},
310 {SENSOR, S5K4AA_H_BLANK_LO__, 0xa8, 0x00}, /* helps to sync... */ 279 {SENSOR, S5K4AA_H_BLANK_LO__, 0xa8, 0x00},
311 {SENSOR, S5K4AA_EXPOSURE_HI, 0x01, 0x00}, 280 {SENSOR, S5K4AA_EXPOSURE_HI, 0x01, 0x00},
312 {SENSOR, S5K4AA_EXPOSURE_LO, 0x00, 0x00}, 281 {SENSOR, S5K4AA_EXPOSURE_LO, 0x00, 0x00},
313 {SENSOR, 0x11, 0x04, 0x00}, 282 {SENSOR, 0x11, 0x04, 0x00},
314 {SENSOR, 0x12, 0xc3, 0x00}, 283 {SENSOR, 0x12, 0xc3, 0x00},
315 {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00}, 284 {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
316 {SENSOR, 0x02, 0x0e, 0x00}, 285 {SENSOR, 0x02, 0x0e, 0x00},
317 {SENSOR_LONG, S5K4AA_GLOBAL_GAIN__, 0x0f, 0x00},
318 {SENSOR, S5K4AA_GAIN_1, 0x0b, 0x00},
319 {SENSOR, S5K4AA_GAIN_2, 0xa0, 0x00}
320}; 286};
321 287
288
322#endif 289#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k83a.c b/drivers/media/video/gspca/m5602/m5602_s5k83a.c
index 42c86aa4dc8d..7127321ace8c 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k83a.c
+++ b/drivers/media/video/gspca/m5602/m5602_s5k83a.c
@@ -16,8 +16,20 @@
16 * 16 *
17 */ 17 */
18 18
19#include <linux/kthread.h>
19#include "m5602_s5k83a.h" 20#include "m5602_s5k83a.h"
20 21
22static int s5k83a_set_gain(struct gspca_dev *gspca_dev, __s32 val);
23static int s5k83a_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
24static int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val);
25static int s5k83a_get_brightness(struct gspca_dev *gspca_dev, __s32 *val);
26static int s5k83a_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
27static int s5k83a_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
28static int s5k83a_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
29static int s5k83a_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
30static int s5k83a_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
31static int s5k83a_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
32
21static struct v4l2_pix_format s5k83a_modes[] = { 33static struct v4l2_pix_format s5k83a_modes[] = {
22 { 34 {
23 640, 35 640,
@@ -32,68 +44,77 @@ static struct v4l2_pix_format s5k83a_modes[] = {
32 } 44 }
33}; 45};
34 46
35const static struct ctrl s5k83a_ctrls[] = { 47static const struct ctrl s5k83a_ctrls[] = {
48#define GAIN_IDX 0
36 { 49 {
37 { 50 {
38 .id = V4L2_CID_BRIGHTNESS, 51 .id = V4L2_CID_GAIN,
39 .type = V4L2_CTRL_TYPE_INTEGER, 52 .type = V4L2_CTRL_TYPE_INTEGER,
40 .name = "brightness", 53 .name = "gain",
41 .minimum = 0x00, 54 .minimum = 0x00,
42 .maximum = 0xff, 55 .maximum = 0xff,
43 .step = 0x01, 56 .step = 0x01,
44 .default_value = S5K83A_DEFAULT_BRIGHTNESS, 57 .default_value = S5K83A_DEFAULT_GAIN,
45 .flags = V4L2_CTRL_FLAG_SLIDER 58 .flags = V4L2_CTRL_FLAG_SLIDER
46 }, 59 },
47 .set = s5k83a_set_brightness, 60 .set = s5k83a_set_gain,
48 .get = s5k83a_get_brightness 61 .get = s5k83a_get_gain
49 62
50 }, { 63 },
64#define BRIGHTNESS_IDX 1
65 {
51 { 66 {
52 .id = V4L2_CID_WHITENESS, 67 .id = V4L2_CID_BRIGHTNESS,
53 .type = V4L2_CTRL_TYPE_INTEGER, 68 .type = V4L2_CTRL_TYPE_INTEGER,
54 .name = "whiteness", 69 .name = "brightness",
55 .minimum = 0x00, 70 .minimum = 0x00,
56 .maximum = 0xff, 71 .maximum = 0xff,
57 .step = 0x01, 72 .step = 0x01,
58 .default_value = S5K83A_DEFAULT_WHITENESS, 73 .default_value = S5K83A_DEFAULT_BRIGHTNESS,
59 .flags = V4L2_CTRL_FLAG_SLIDER 74 .flags = V4L2_CTRL_FLAG_SLIDER
60 }, 75 },
61 .set = s5k83a_set_whiteness, 76 .set = s5k83a_set_brightness,
62 .get = s5k83a_get_whiteness, 77 .get = s5k83a_get_brightness,
63 }, { 78 },
79#define EXPOSURE_IDX 2
80 {
64 { 81 {
65 .id = V4L2_CID_GAIN, 82 .id = V4L2_CID_EXPOSURE,
66 .type = V4L2_CTRL_TYPE_INTEGER, 83 .type = V4L2_CTRL_TYPE_INTEGER,
67 .name = "gain", 84 .name = "exposure",
68 .minimum = 0x00, 85 .minimum = 0x00,
69 .maximum = S5K83A_MAXIMUM_GAIN, 86 .maximum = S5K83A_MAXIMUM_EXPOSURE,
70 .step = 0x01, 87 .step = 0x01,
71 .default_value = S5K83A_DEFAULT_GAIN, 88 .default_value = S5K83A_DEFAULT_EXPOSURE,
72 .flags = V4L2_CTRL_FLAG_SLIDER 89 .flags = V4L2_CTRL_FLAG_SLIDER
73 }, 90 },
74 .set = s5k83a_set_gain, 91 .set = s5k83a_set_exposure,
75 .get = s5k83a_get_gain 92 .get = s5k83a_get_exposure
76 }, { 93 },
94#define HFLIP_IDX 3
95 {
77 { 96 {
78 .id = V4L2_CID_HFLIP, 97 .id = V4L2_CID_HFLIP,
79 .type = V4L2_CTRL_TYPE_BOOLEAN, 98 .type = V4L2_CTRL_TYPE_BOOLEAN,
80 .name = "horizontal flip", 99 .name = "horizontal flip",
81 .minimum = 0, 100 .minimum = 0,
82 .maximum = 1, 101 .maximum = 1,
83 .step = 1, 102 .step = 1,
84 .default_value = 0 103 .default_value = 0
85 }, 104 },
86 .set = s5k83a_set_hflip, 105 .set = s5k83a_set_hflip,
87 .get = s5k83a_get_hflip 106 .get = s5k83a_get_hflip
88 }, { 107 },
108#define VFLIP_IDX 4
109 {
89 { 110 {
90 .id = V4L2_CID_VFLIP, 111 .id = V4L2_CID_VFLIP,
91 .type = V4L2_CTRL_TYPE_BOOLEAN, 112 .type = V4L2_CTRL_TYPE_BOOLEAN,
92 .name = "vertical flip", 113 .name = "vertical flip",
93 .minimum = 0, 114 .minimum = 0,
94 .maximum = 1, 115 .maximum = 1,
95 .step = 1, 116 .step = 1,
96 .default_value = 0 117 .default_value = 0
97 }, 118 },
98 .set = s5k83a_set_vflip, 119 .set = s5k83a_set_vflip,
99 .get = s5k83a_get_vflip 120 .get = s5k83a_get_vflip
@@ -101,9 +122,14 @@ const static struct ctrl s5k83a_ctrls[] = {
101}; 122};
102 123
103static void s5k83a_dump_registers(struct sd *sd); 124static void s5k83a_dump_registers(struct sd *sd);
125static int s5k83a_get_rotation(struct sd *sd, u8 *reg_data);
126static int s5k83a_set_led_indication(struct sd *sd, u8 val);
127static int s5k83a_set_flip_real(struct gspca_dev *gspca_dev,
128 __s32 vflip, __s32 hflip);
104 129
105int s5k83a_probe(struct sd *sd) 130int s5k83a_probe(struct sd *sd)
106{ 131{
132 struct s5k83a_priv *sens_priv;
107 u8 prod_id = 0, ver_id = 0; 133 u8 prod_id = 0, ver_id = 0;
108 int i, err = 0; 134 int i, err = 0;
109 135
@@ -145,16 +171,36 @@ int s5k83a_probe(struct sd *sd)
145 info("Detected a s5k83a sensor"); 171 info("Detected a s5k83a sensor");
146 172
147sensor_found: 173sensor_found:
174 sens_priv = kmalloc(
175 sizeof(struct s5k83a_priv), GFP_KERNEL);
176 if (!sens_priv)
177 return -ENOMEM;
178
179 sens_priv->settings =
180 kmalloc(sizeof(s32)*ARRAY_SIZE(s5k83a_ctrls), GFP_KERNEL);
181 if (!sens_priv->settings)
182 return -ENOMEM;
183
148 sd->gspca_dev.cam.cam_mode = s5k83a_modes; 184 sd->gspca_dev.cam.cam_mode = s5k83a_modes;
149 sd->gspca_dev.cam.nmodes = ARRAY_SIZE(s5k83a_modes); 185 sd->gspca_dev.cam.nmodes = ARRAY_SIZE(s5k83a_modes);
150 sd->desc->ctrls = s5k83a_ctrls; 186 sd->desc->ctrls = s5k83a_ctrls;
151 sd->desc->nctrls = ARRAY_SIZE(s5k83a_ctrls); 187 sd->desc->nctrls = ARRAY_SIZE(s5k83a_ctrls);
188
189 /* null the pointer! thread is't running now */
190 sens_priv->rotation_thread = NULL;
191
192 for (i = 0; i < ARRAY_SIZE(s5k83a_ctrls); i++)
193 sens_priv->settings[i] = s5k83a_ctrls[i].qctrl.default_value;
194
195 sd->sensor_priv = sens_priv;
152 return 0; 196 return 0;
153} 197}
154 198
155int s5k83a_init(struct sd *sd) 199int s5k83a_init(struct sd *sd)
156{ 200{
157 int i, err = 0; 201 int i, err = 0;
202 s32 *sensor_settings =
203 ((struct s5k83a_priv *) sd->sensor_priv)->settings;
158 204
159 for (i = 0; i < ARRAY_SIZE(init_s5k83a) && !err; i++) { 205 for (i = 0; i < ARRAY_SIZE(init_s5k83a) && !err; i++) {
160 u8 data[2] = {0x00, 0x00}; 206 u8 data[2] = {0x00, 0x00};
@@ -187,87 +233,138 @@ int s5k83a_init(struct sd *sd)
187 if (dump_sensor) 233 if (dump_sensor)
188 s5k83a_dump_registers(sd); 234 s5k83a_dump_registers(sd);
189 235
190 return (err < 0) ? err : 0; 236 err = s5k83a_set_gain(&sd->gspca_dev, sensor_settings[GAIN_IDX]);
191} 237 if (err < 0)
238 return err;
192 239
193int s5k83a_start(struct sd *sd) 240 err = s5k83a_set_brightness(&sd->gspca_dev,
194{ 241 sensor_settings[BRIGHTNESS_IDX]);
195 return s5k83a_set_led_indication(sd, 1); 242 if (err < 0)
196} 243 return err;
197 244
198int s5k83a_stop(struct sd *sd) 245 err = s5k83a_set_exposure(&sd->gspca_dev,
199{ 246 sensor_settings[EXPOSURE_IDX]);
200 return s5k83a_set_led_indication(sd, 0); 247 if (err < 0)
248 return err;
249
250 err = s5k83a_set_hflip(&sd->gspca_dev, sensor_settings[HFLIP_IDX]);
251 if (err < 0)
252 return err;
253
254 err = s5k83a_set_vflip(&sd->gspca_dev, sensor_settings[VFLIP_IDX]);
255
256 return err;
201} 257}
202 258
203int s5k83a_power_down(struct sd *sd) 259static int rotation_thread_function(void *data)
204{ 260{
261 struct sd *sd = (struct sd *) data;
262 struct s5k83a_priv *sens_priv = sd->sensor_priv;
263 u8 reg, previous_rotation = 0;
264 __s32 vflip, hflip;
265
266 set_current_state(TASK_INTERRUPTIBLE);
267 while (!schedule_timeout(100)) {
268 if (mutex_lock_interruptible(&sd->gspca_dev.usb_lock))
269 break;
270
271 s5k83a_get_rotation(sd, &reg);
272 if (previous_rotation != reg) {
273 previous_rotation = reg;
274 info("Camera was flipped");
275
276 s5k83a_get_vflip((struct gspca_dev *) sd, &vflip);
277 s5k83a_get_hflip((struct gspca_dev *) sd, &hflip);
278
279 if (reg) {
280 vflip = !vflip;
281 hflip = !hflip;
282 }
283 s5k83a_set_flip_real((struct gspca_dev *) sd,
284 vflip, hflip);
285 }
286
287 mutex_unlock(&sd->gspca_dev.usb_lock);
288 set_current_state(TASK_INTERRUPTIBLE);
289 }
290
291 /* return to "front" flip */
292 if (previous_rotation) {
293 s5k83a_get_vflip((struct gspca_dev *) sd, &vflip);
294 s5k83a_get_hflip((struct gspca_dev *) sd, &hflip);
295 s5k83a_set_flip_real((struct gspca_dev *) sd, vflip, hflip);
296 }
297
298 sens_priv->rotation_thread = NULL;
205 return 0; 299 return 0;
206} 300}
207 301
208static void s5k83a_dump_registers(struct sd *sd) 302int s5k83a_start(struct sd *sd)
209{ 303{
210 int address; 304 int i, err = 0;
211 u8 page, old_page; 305 struct s5k83a_priv *sens_priv = sd->sensor_priv;
212 m5602_read_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1);
213 306
214 for (page = 0; page < 16; page++) { 307 /* Create another thread, polling the GPIO ports of the camera to check
215 m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1); 308 if it got rotated. This is how the windows driver does it so we have
216 info("Dumping the s5k83a register state for page 0x%x", page); 309 to assume that there is no better way of accomplishing this */
217 for (address = 0; address <= 0xff; address++) { 310 sens_priv->rotation_thread = kthread_create(rotation_thread_function,
218 u8 val = 0; 311 sd, "rotation thread");
219 m5602_read_sensor(sd, address, &val, 1); 312 wake_up_process(sens_priv->rotation_thread);
220 info("register 0x%x contains 0x%x", 313
221 address, val); 314 /* Preinit the sensor */
222 } 315 for (i = 0; i < ARRAY_SIZE(start_s5k83a) && !err; i++) {
316 u8 data[2] = {start_s5k83a[i][2], start_s5k83a[i][3]};
317 if (start_s5k83a[i][0] == SENSOR)
318 err = m5602_write_sensor(sd, start_s5k83a[i][1],
319 data, 2);
320 else
321 err = m5602_write_bridge(sd, start_s5k83a[i][1],
322 data[0]);
223 } 323 }
224 info("s5k83a register state dump complete"); 324 if (err < 0)
325 return err;
225 326
226 for (page = 0; page < 16; page++) { 327 return s5k83a_set_led_indication(sd, 1);
227 m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1); 328}
228 info("Probing for which registers that are read/write "
229 "for page 0x%x", page);
230 for (address = 0; address <= 0xff; address++) {
231 u8 old_val, ctrl_val, test_val = 0xff;
232 329
233 m5602_read_sensor(sd, address, &old_val, 1); 330int s5k83a_stop(struct sd *sd)
234 m5602_write_sensor(sd, address, &test_val, 1); 331{
235 m5602_read_sensor(sd, address, &ctrl_val, 1); 332 struct s5k83a_priv *sens_priv = sd->sensor_priv;
236 333
237 if (ctrl_val == test_val) 334 if (sens_priv->rotation_thread)
238 info("register 0x%x is writeable", address); 335 kthread_stop(sens_priv->rotation_thread);
239 else
240 info("register 0x%x is read only", address);
241 336
242 /* Restore original val */ 337 return s5k83a_set_led_indication(sd, 0);
243 m5602_write_sensor(sd, address, &old_val, 1);
244 }
245 }
246 info("Read/write register probing complete");
247 m5602_write_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1);
248} 338}
249 339
250int s5k83a_get_brightness(struct gspca_dev *gspca_dev, __s32 *val) 340void s5k83a_disconnect(struct sd *sd)
251{ 341{
252 int err; 342 struct s5k83a_priv *sens_priv = sd->sensor_priv;
253 u8 data[2];
254 struct sd *sd = (struct sd *) gspca_dev;
255 343
256 err = m5602_read_sensor(sd, S5K83A_BRIGHTNESS, data, 2); 344 s5k83a_stop(sd);
257 if (err < 0) 345
258 return err; 346 sd->sensor = NULL;
347 kfree(sens_priv->settings);
348 kfree(sens_priv);
349}
259 350
260 data[1] = data[1] << 1; 351static int s5k83a_get_gain(struct gspca_dev *gspca_dev, __s32 *val)
261 *val = data[1]; 352{
353 struct sd *sd = (struct sd *) gspca_dev;
354 struct s5k83a_priv *sens_priv = sd->sensor_priv;
262 355
263 return err; 356 *val = sens_priv->settings[GAIN_IDX];
357 return 0;
264} 358}
265 359
266int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val) 360static int s5k83a_set_gain(struct gspca_dev *gspca_dev, __s32 val)
267{ 361{
268 int err; 362 int err;
269 u8 data[2]; 363 u8 data[2];
270 struct sd *sd = (struct sd *) gspca_dev; 364 struct sd *sd = (struct sd *) gspca_dev;
365 struct s5k83a_priv *sens_priv = sd->sensor_priv;
366
367 sens_priv->settings[GAIN_IDX] = val;
271 368
272 data[0] = 0x00; 369 data[0] = 0x00;
273 data[1] = 0x20; 370 data[1] = 0x20;
@@ -283,89 +380,69 @@ int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
283 380
284 /* FIXME: This is not sane, we need to figure out the composition 381 /* FIXME: This is not sane, we need to figure out the composition
285 of these registers */ 382 of these registers */
286 data[0] = val >> 3; /* brightness, high 5 bits */ 383 data[0] = val >> 3; /* gain, high 5 bits */
287 data[1] = val >> 1; /* brightness, high 7 bits */ 384 data[1] = val >> 1; /* gain, high 7 bits */
288 err = m5602_write_sensor(sd, S5K83A_BRIGHTNESS, data, 2); 385 err = m5602_write_sensor(sd, S5K83A_GAIN, data, 2);
289 386
290 return err; 387 return err;
291} 388}
292 389
293int s5k83a_get_whiteness(struct gspca_dev *gspca_dev, __s32 *val) 390static int s5k83a_get_brightness(struct gspca_dev *gspca_dev, __s32 *val)
294{ 391{
295 int err;
296 u8 data;
297 struct sd *sd = (struct sd *) gspca_dev; 392 struct sd *sd = (struct sd *) gspca_dev;
393 struct s5k83a_priv *sens_priv = sd->sensor_priv;
298 394
299 err = m5602_read_sensor(sd, S5K83A_WHITENESS, &data, 1); 395 *val = sens_priv->settings[BRIGHTNESS_IDX];
300 if (err < 0) 396 return 0;
301 return err;
302
303 *val = data;
304
305 return err;
306} 397}
307 398
308int s5k83a_set_whiteness(struct gspca_dev *gspca_dev, __s32 val) 399static int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val)
309{ 400{
310 int err; 401 int err;
311 u8 data[1]; 402 u8 data[1];
312 struct sd *sd = (struct sd *) gspca_dev; 403 struct sd *sd = (struct sd *) gspca_dev;
404 struct s5k83a_priv *sens_priv = sd->sensor_priv;
313 405
406 sens_priv->settings[BRIGHTNESS_IDX] = val;
314 data[0] = val; 407 data[0] = val;
315 err = m5602_write_sensor(sd, S5K83A_WHITENESS, data, 1); 408 err = m5602_write_sensor(sd, S5K83A_BRIGHTNESS, data, 1);
316
317 return err; 409 return err;
318} 410}
319 411
320int s5k83a_get_gain(struct gspca_dev *gspca_dev, __s32 *val) 412static int s5k83a_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
321{ 413{
322 int err;
323 u8 data[2];
324 struct sd *sd = (struct sd *) gspca_dev; 414 struct sd *sd = (struct sd *) gspca_dev;
415 struct s5k83a_priv *sens_priv = sd->sensor_priv;
325 416
326 err = m5602_read_sensor(sd, S5K83A_GAIN, data, 2); 417 *val = sens_priv->settings[EXPOSURE_IDX];
327 if (err < 0) 418 return 0;
328 return err;
329
330 data[1] = data[1] & 0x3f;
331 if (data[1] > S5K83A_MAXIMUM_GAIN)
332 data[1] = S5K83A_MAXIMUM_GAIN;
333
334 *val = data[1];
335
336 return err;
337} 419}
338 420
339int s5k83a_set_gain(struct gspca_dev *gspca_dev, __s32 val) 421static int s5k83a_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
340{ 422{
341 int err; 423 int err;
342 u8 data[2]; 424 u8 data[2];
343 struct sd *sd = (struct sd *) gspca_dev; 425 struct sd *sd = (struct sd *) gspca_dev;
426 struct s5k83a_priv *sens_priv = sd->sensor_priv;
344 427
428 sens_priv->settings[EXPOSURE_IDX] = val;
345 data[0] = 0; 429 data[0] = 0;
346 data[1] = val; 430 data[1] = val;
347 err = m5602_write_sensor(sd, S5K83A_GAIN, data, 2); 431 err = m5602_write_sensor(sd, S5K83A_EXPOSURE, data, 2);
348 return err; 432 return err;
349} 433}
350 434
351int s5k83a_get_vflip(struct gspca_dev *gspca_dev, __s32 *val) 435static int s5k83a_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
352{ 436{
353 int err;
354 u8 data[1];
355 struct sd *sd = (struct sd *) gspca_dev; 437 struct sd *sd = (struct sd *) gspca_dev;
438 struct s5k83a_priv *sens_priv = sd->sensor_priv;
356 439
357 data[0] = 0x05; 440 *val = sens_priv->settings[VFLIP_IDX];
358 err = m5602_write_sensor(sd, S5K83A_PAGE_MAP, data, 1); 441 return 0;
359 if (err < 0)
360 return err;
361
362 err = m5602_read_sensor(sd, S5K83A_FLIP, data, 1);
363 *val = (data[0] | 0x40) ? 1 : 0;
364
365 return err;
366} 442}
367 443
368int s5k83a_set_vflip(struct gspca_dev *gspca_dev, __s32 val) 444static int s5k83a_set_flip_real(struct gspca_dev *gspca_dev,
445 __s32 vflip, __s32 hflip)
369{ 446{
370 int err; 447 int err;
371 u8 data[1]; 448 u8 data[1];
@@ -376,69 +453,83 @@ int s5k83a_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
376 if (err < 0) 453 if (err < 0)
377 return err; 454 return err;
378 455
379 err = m5602_read_sensor(sd, S5K83A_FLIP, data, 1); 456 /* six bit is vflip, seven is hflip */
380 if (err < 0) 457 data[0] = S5K83A_FLIP_MASK;
381 return err; 458 data[0] = (vflip) ? data[0] | 0x40 : data[0];
459 data[0] = (hflip) ? data[0] | 0x80 : data[0];
382 460
383 /* set or zero six bit, seven is hflip */
384 data[0] = (val) ? (data[0] & 0x80) | 0x40 | S5K83A_FLIP_MASK
385 : (data[0] & 0x80) | S5K83A_FLIP_MASK;
386 err = m5602_write_sensor(sd, S5K83A_FLIP, data, 1); 461 err = m5602_write_sensor(sd, S5K83A_FLIP, data, 1);
387 if (err < 0) 462 if (err < 0)
388 return err; 463 return err;
389 464
390 data[0] = (val) ? 0x0b : 0x0a; 465 data[0] = (vflip) ? 0x0b : 0x0a;
391 err = m5602_write_sensor(sd, S5K83A_VFLIP_TUNE, data, 1); 466 err = m5602_write_sensor(sd, S5K83A_VFLIP_TUNE, data, 1);
467 if (err < 0)
468 return err;
392 469
470 data[0] = (hflip) ? 0x0a : 0x0b;
471 err = m5602_write_sensor(sd, S5K83A_HFLIP_TUNE, data, 1);
393 return err; 472 return err;
394} 473}
395 474
396int s5k83a_get_hflip(struct gspca_dev *gspca_dev, __s32 *val) 475static int s5k83a_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
397{ 476{
398 int err; 477 int err;
399 u8 data[1]; 478 u8 reg;
479 __s32 hflip;
400 struct sd *sd = (struct sd *) gspca_dev; 480 struct sd *sd = (struct sd *) gspca_dev;
481 struct s5k83a_priv *sens_priv = sd->sensor_priv;
401 482
402 data[0] = 0x05; 483 sens_priv->settings[VFLIP_IDX] = val;
403 err = m5602_write_sensor(sd, S5K83A_PAGE_MAP, data, 1); 484
485 s5k83a_get_hflip(gspca_dev, &hflip);
486
487 err = s5k83a_get_rotation(sd, &reg);
404 if (err < 0) 488 if (err < 0)
405 return err; 489 return err;
490 if (reg) {
491 val = !val;
492 hflip = !hflip;
493 }
406 494
407 err = m5602_read_sensor(sd, S5K83A_FLIP, data, 1); 495 err = s5k83a_set_flip_real(gspca_dev, val, hflip);
408 *val = (data[0] | 0x80) ? 1 : 0;
409
410 return err; 496 return err;
411} 497}
412 498
413int s5k83a_set_hflip(struct gspca_dev *gspca_dev, __s32 val) 499static int s5k83a_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
500{
501 struct sd *sd = (struct sd *) gspca_dev;
502 struct s5k83a_priv *sens_priv = sd->sensor_priv;
503
504 *val = sens_priv->settings[HFLIP_IDX];
505 return 0;
506}
507
508static int s5k83a_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
414{ 509{
415 int err; 510 int err;
416 u8 data[1]; 511 u8 reg;
512 __s32 vflip;
417 struct sd *sd = (struct sd *) gspca_dev; 513 struct sd *sd = (struct sd *) gspca_dev;
514 struct s5k83a_priv *sens_priv = sd->sensor_priv;
418 515
419 data[0] = 0x05; 516 sens_priv->settings[HFLIP_IDX] = val;
420 err = m5602_write_sensor(sd, S5K83A_PAGE_MAP, data, 1);
421 if (err < 0)
422 return err;
423 517
424 err = m5602_read_sensor(sd, S5K83A_FLIP, data, 1); 518 s5k83a_get_vflip(gspca_dev, &vflip);
425 if (err < 0)
426 return err;
427 519
428 /* set or zero seven bit, six is vflip */ 520 err = s5k83a_get_rotation(sd, &reg);
429 data[0] = (val) ? (data[0] & 0x40) | 0x80 | S5K83A_FLIP_MASK
430 : (data[0] & 0x40) | S5K83A_FLIP_MASK;
431 err = m5602_write_sensor(sd, S5K83A_FLIP, data, 1);
432 if (err < 0) 521 if (err < 0)
433 return err; 522 return err;
523 if (reg) {
524 val = !val;
525 vflip = !vflip;
526 }
434 527
435 data[0] = (val) ? 0x0a : 0x0b; 528 err = s5k83a_set_flip_real(gspca_dev, vflip, val);
436 err = m5602_write_sensor(sd, S5K83A_HFLIP_TUNE, data, 1);
437
438 return err; 529 return err;
439} 530}
440 531
441int s5k83a_set_led_indication(struct sd *sd, u8 val) 532static int s5k83a_set_led_indication(struct sd *sd, u8 val)
442{ 533{
443 int err = 0; 534 int err = 0;
444 u8 data[1]; 535 u8 data[1];
@@ -454,5 +545,55 @@ int s5k83a_set_led_indication(struct sd *sd, u8 val)
454 545
455 err = m5602_write_bridge(sd, M5602_XB_GPIO_DAT, data[0]); 546 err = m5602_write_bridge(sd, M5602_XB_GPIO_DAT, data[0]);
456 547
457 return (err < 0) ? err : 0; 548 return err;
549}
550
551/* Get camera rotation on Acer notebooks */
552static int s5k83a_get_rotation(struct sd *sd, u8 *reg_data)
553{
554 int err = m5602_read_bridge(sd, M5602_XB_GPIO_DAT, reg_data);
555 *reg_data = (*reg_data & S5K83A_GPIO_ROTATION_MASK) ? 0 : 1;
556 return err;
557}
558
559static void s5k83a_dump_registers(struct sd *sd)
560{
561 int address;
562 u8 page, old_page;
563 m5602_read_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1);
564
565 for (page = 0; page < 16; page++) {
566 m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1);
567 info("Dumping the s5k83a register state for page 0x%x", page);
568 for (address = 0; address <= 0xff; address++) {
569 u8 val = 0;
570 m5602_read_sensor(sd, address, &val, 1);
571 info("register 0x%x contains 0x%x",
572 address, val);
573 }
574 }
575 info("s5k83a register state dump complete");
576
577 for (page = 0; page < 16; page++) {
578 m5602_write_sensor(sd, S5K83A_PAGE_MAP, &page, 1);
579 info("Probing for which registers that are read/write "
580 "for page 0x%x", page);
581 for (address = 0; address <= 0xff; address++) {
582 u8 old_val, ctrl_val, test_val = 0xff;
583
584 m5602_read_sensor(sd, address, &old_val, 1);
585 m5602_write_sensor(sd, address, &test_val, 1);
586 m5602_read_sensor(sd, address, &ctrl_val, 1);
587
588 if (ctrl_val == test_val)
589 info("register 0x%x is writeable", address);
590 else
591 info("register 0x%x is read only", address);
592
593 /* Restore original val */
594 m5602_write_sensor(sd, address, &old_val, 1);
595 }
596 }
597 info("Read/write register probing complete");
598 m5602_write_sensor(sd, S5K83A_PAGE_MAP, &old_page, 1);
458} 599}
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k83a.h b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
index 819ab25272be..7814b078acde 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k83a.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
@@ -21,20 +21,21 @@
21 21
22#include "m5602_sensor.h" 22#include "m5602_sensor.h"
23 23
24#define S5K83A_FLIP 0x01 24#define S5K83A_FLIP 0x01
25#define S5K83A_HFLIP_TUNE 0x03 25#define S5K83A_HFLIP_TUNE 0x03
26#define S5K83A_VFLIP_TUNE 0x05 26#define S5K83A_VFLIP_TUNE 0x05
27#define S5K83A_WHITENESS 0x0a 27#define S5K83A_BRIGHTNESS 0x0a
28#define S5K83A_GAIN 0x18 28#define S5K83A_EXPOSURE 0x18
29#define S5K83A_BRIGHTNESS 0x1b 29#define S5K83A_GAIN 0x1b
30#define S5K83A_PAGE_MAP 0xec 30#define S5K83A_PAGE_MAP 0xec
31 31
32#define S5K83A_DEFAULT_BRIGHTNESS 0x71 32#define S5K83A_DEFAULT_GAIN 0x71
33#define S5K83A_DEFAULT_WHITENESS 0x7e 33#define S5K83A_DEFAULT_BRIGHTNESS 0x7e
34#define S5K83A_DEFAULT_GAIN 0x00 34#define S5K83A_DEFAULT_EXPOSURE 0x00
35#define S5K83A_MAXIMUM_GAIN 0x3c 35#define S5K83A_MAXIMUM_EXPOSURE 0x3c
36#define S5K83A_FLIP_MASK 0x10 36#define S5K83A_FLIP_MASK 0x10
37#define S5K83A_GPIO_LED_MASK 0x10 37#define S5K83A_GPIO_LED_MASK 0x10
38#define S5K83A_GPIO_ROTATION_MASK 0x40
38 39
39/*****************************************************************************/ 40/*****************************************************************************/
40 41
@@ -46,20 +47,7 @@ int s5k83a_probe(struct sd *sd);
46int s5k83a_init(struct sd *sd); 47int s5k83a_init(struct sd *sd);
47int s5k83a_start(struct sd *sd); 48int s5k83a_start(struct sd *sd);
48int s5k83a_stop(struct sd *sd); 49int s5k83a_stop(struct sd *sd);
49int s5k83a_power_down(struct sd *sd); 50void s5k83a_disconnect(struct sd *sd);
50
51int s5k83a_set_led_indication(struct sd *sd, u8 val);
52
53int s5k83a_set_brightness(struct gspca_dev *gspca_dev, __s32 val);
54int s5k83a_get_brightness(struct gspca_dev *gspca_dev, __s32 *val);
55int s5k83a_set_whiteness(struct gspca_dev *gspca_dev, __s32 val);
56int s5k83a_get_whiteness(struct gspca_dev *gspca_dev, __s32 *val);
57int s5k83a_set_gain(struct gspca_dev *gspca_dev, __s32 val);
58int s5k83a_get_gain(struct gspca_dev *gspca_dev, __s32 *val);
59int s5k83a_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
60int s5k83a_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
61int s5k83a_get_hflip(struct gspca_dev *gspca_dev, __s32 *val);
62int s5k83a_set_hflip(struct gspca_dev *gspca_dev, __s32 val);
63 51
64static const struct m5602_sensor s5k83a = { 52static const struct m5602_sensor s5k83a = {
65 .name = "S5K83A", 53 .name = "S5K83A",
@@ -67,11 +55,18 @@ static const struct m5602_sensor s5k83a = {
67 .init = s5k83a_init, 55 .init = s5k83a_init,
68 .start = s5k83a_start, 56 .start = s5k83a_start,
69 .stop = s5k83a_stop, 57 .stop = s5k83a_stop,
70 .power_down = s5k83a_power_down, 58 .disconnect = s5k83a_disconnect,
71 .i2c_slave_id = 0x5a, 59 .i2c_slave_id = 0x5a,
72 .i2c_regW = 2, 60 .i2c_regW = 2,
73}; 61};
74 62
63struct s5k83a_priv {
64 /* We use another thread periodically
65 probing the orientation of the camera */
66 struct task_struct *rotation_thread;
67 s32 *settings;
68};
69
75static const unsigned char preinit_s5k83a[][4] = 70static const unsigned char preinit_s5k83a[][4] =
76{ 71{
77 {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00}, 72 {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
@@ -108,8 +103,6 @@ static const unsigned char preinit_s5k83a[][4] =
108 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00}, 103 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
109 {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00}, 104 {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
110 {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00}, 105 {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00},
111
112 {SENSOR, S5K83A_PAGE_MAP, 0x00, 0x00}
113}; 106};
114 107
115/* This could probably be considerably shortened. 108/* This could probably be considerably shortened.
@@ -117,86 +110,8 @@ static const unsigned char preinit_s5k83a[][4] =
117*/ 110*/
118static const unsigned char init_s5k83a[][4] = 111static const unsigned char init_s5k83a[][4] =
119{ 112{
120 {SENSOR, S5K83A_PAGE_MAP, 0x04, 0x00}, 113 /* The following sequence is useless after a clean boot
121 {SENSOR, 0xaf, 0x01, 0x00}, 114 but is necessary after resume from suspend */
122 {SENSOR, S5K83A_PAGE_MAP, 0x00, 0x00},
123 {SENSOR, 0x7b, 0xff, 0x00},
124 {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
125 {SENSOR, 0x01, 0x50, 0x00},
126 {SENSOR, 0x12, 0x20, 0x00},
127 {SENSOR, 0x17, 0x40, 0x00},
128 {SENSOR, S5K83A_BRIGHTNESS, 0x0f, 0x00},
129 {SENSOR, 0x1c, 0x00, 0x00},
130 {SENSOR, 0x02, 0x70, 0x00},
131 {SENSOR, 0x03, 0x0b, 0x00},
132 {SENSOR, 0x04, 0xf0, 0x00},
133 {SENSOR, 0x05, 0x0b, 0x00},
134 {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
135
136 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
137 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
138 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
139 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
140 {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
141 {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
142 {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
143 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
144 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
145 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
146 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
147 {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
148 {BRIDGE, M5602_XB_VSYNC_PARA, 0xe4, 0x00},
149 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
150 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
151 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
152 {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
153 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
154 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
155 {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00},
156 {BRIDGE, M5602_XB_HSYNC_PARA, 0x87, 0x00},
157 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
158 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
159 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
160
161 {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
162 {SENSOR, 0x06, 0x71, 0x00},
163 {SENSOR, 0x07, 0xe8, 0x00},
164 {SENSOR, 0x08, 0x02, 0x00},
165 {SENSOR, 0x09, 0x88, 0x00},
166 {SENSOR, 0x14, 0x00, 0x00},
167 {SENSOR, 0x15, 0x20, 0x00},
168 {SENSOR, 0x19, 0x00, 0x00},
169 {SENSOR, 0x1a, 0x98, 0x00},
170 {SENSOR, 0x0f, 0x02, 0x00},
171 {SENSOR, 0x10, 0xe5, 0x00},
172 {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
173 {SENSOR_LONG, 0x14, 0x00, 0x20},
174 {SENSOR_LONG, 0x0d, 0x00, 0x7d},
175 {SENSOR_LONG, 0x1b, 0x0d, 0x05},
176
177 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
178 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
179 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
180 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
181 {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
182 {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
183 {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
184 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
185 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
186 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
187 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
188 {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
189 {BRIDGE, M5602_XB_VSYNC_PARA, 0xe4, 0x00},
190 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
191 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
192 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
193 {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
194 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
195 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
196 {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00},
197 {BRIDGE, M5602_XB_HSYNC_PARA, 0x87, 0x00},
198
199 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
200 {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00}, 115 {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
201 {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00}, 116 {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
202 {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00}, 117 {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00},
@@ -216,7 +131,7 @@ static const unsigned char init_s5k83a[][4] =
216 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, 131 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
217 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00}, 132 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00},
218 {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00}, 133 {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
219 {BRIDGE, M5602_XB_GPIO_DAT, 0x1c, 0x00}, 134 {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
220 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00}, 135 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
221 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00}, 136 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00},
222 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00}, 137 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
@@ -225,109 +140,34 @@ static const unsigned char init_s5k83a[][4] =
225 140
226 {SENSOR, S5K83A_PAGE_MAP, 0x04, 0x00}, 141 {SENSOR, S5K83A_PAGE_MAP, 0x04, 0x00},
227 {SENSOR, 0xaf, 0x01, 0x00}, 142 {SENSOR, 0xaf, 0x01, 0x00},
228 {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00}, 143 {SENSOR, S5K83A_PAGE_MAP, 0x00, 0x00},
229 /* ff ( init value )is very dark) || 71 and f0 better */
230 {SENSOR, 0x7b, 0xff, 0x00}, 144 {SENSOR, 0x7b, 0xff, 0x00},
231 {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00}, 145 {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
232 {SENSOR, 0x01, 0x50, 0x00}, 146 {SENSOR, 0x01, 0x50, 0x00},
233 {SENSOR, 0x12, 0x20, 0x00}, 147 {SENSOR, 0x12, 0x20, 0x00},
234 {SENSOR, 0x17, 0x40, 0x00}, 148 {SENSOR, 0x17, 0x40, 0x00},
235 {SENSOR, S5K83A_BRIGHTNESS, 0x0f, 0x00},
236 {SENSOR, 0x1c, 0x00, 0x00}, 149 {SENSOR, 0x1c, 0x00, 0x00},
237 {SENSOR, 0x02, 0x70, 0x00}, 150 {SENSOR, 0x02, 0x70, 0x00},
238 /* some values like 0x10 give a blue-purple image */
239 {SENSOR, 0x03, 0x0b, 0x00}, 151 {SENSOR, 0x03, 0x0b, 0x00},
240 {SENSOR, 0x04, 0xf0, 0x00}, 152 {SENSOR, 0x04, 0xf0, 0x00},
241 {SENSOR, 0x05, 0x0b, 0x00}, 153 {SENSOR, 0x05, 0x0b, 0x00},
242 {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
243
244 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
245 /* under 80 don't work, highter depend on value */
246 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
247
248 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
249 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
250 {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
251 {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
252 {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
253 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
254 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
255 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
256 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
257 {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
258 {BRIDGE, M5602_XB_VSYNC_PARA, 0xe4, 0x00},
259 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
260 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
261 {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
262 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
263 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
264 {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00},
265 {BRIDGE, M5602_XB_HSYNC_PARA, 0x7f, 0x00},
266
267 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
268 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
269 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
270
271 {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
272 {SENSOR, 0x06, 0x71, 0x00}, 154 {SENSOR, 0x06, 0x71, 0x00},
273 {SENSOR, 0x07, 0xe8, 0x00}, 155 {SENSOR, 0x07, 0xe8, 0x00}, /* 488 */
274 {SENSOR, 0x08, 0x02, 0x00}, 156 {SENSOR, 0x08, 0x02, 0x00},
275 {SENSOR, 0x09, 0x88, 0x00}, 157 {SENSOR, 0x09, 0x88, 0x00}, /* 648 */
276 {SENSOR, 0x14, 0x00, 0x00}, 158 {SENSOR, 0x14, 0x00, 0x00},
277 {SENSOR, 0x15, 0x20, 0x00}, 159 {SENSOR, 0x15, 0x20, 0x00}, /* 32 */
278 {SENSOR, 0x19, 0x00, 0x00}, 160 {SENSOR, 0x19, 0x00, 0x00},
279 {SENSOR, 0x1a, 0x98, 0x00}, 161 {SENSOR, 0x1a, 0x98, 0x00}, /* 152 */
280 {SENSOR, 0x0f, 0x02, 0x00}, 162 {SENSOR, 0x0f, 0x02, 0x00},
281 {SENSOR, 0x10, 0xe5, 0x00}, 163 {SENSOR, 0x10, 0xe5, 0x00}, /* 741 */
282 {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00}, 164 /* normal colors
283 {SENSOR_LONG, 0x14, 0x00, 0x20}, 165 (this is value after boot, but after tries can be different) */
284 {SENSOR_LONG, 0x0d, 0x00, 0x7d}, 166 {SENSOR, 0x00, 0x06, 0x00},
285 {SENSOR_LONG, 0x1b, 0x0d, 0x05}, 167};
286
287 /* The following sequence is useless after a clean boot
288 but is necessary after resume from suspend */
289 {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
290 {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
291 {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00},
292 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3f, 0x00},
293 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
294 {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
295 {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
296 {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
297 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0xb0, 0x00},
298 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0x80, 0x00},
299 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
300 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
301 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
302 {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
303 {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
304 {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
305 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
306 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00},
307 {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
308 {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
309 {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
310 {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00},
311 {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
312 {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
313 {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00},
314
315 {SENSOR, S5K83A_PAGE_MAP, 0x04, 0x00},
316 {SENSOR, 0xaf, 0x01, 0x00},
317 {SENSOR, S5K83A_PAGE_MAP, 0x00, 0x00},
318 {SENSOR, 0x7b, 0xff, 0x00},
319 {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
320 {SENSOR, 0x01, 0x50, 0x00},
321 {SENSOR, 0x12, 0x20, 0x00},
322 {SENSOR, 0x17, 0x40, 0x00},
323 {SENSOR, S5K83A_BRIGHTNESS, 0x0f, 0x00},
324 {SENSOR, 0x1c, 0x00, 0x00},
325 {SENSOR, 0x02, 0x70, 0x00},
326 {SENSOR, 0x03, 0x0b, 0x00},
327 {SENSOR, 0x04, 0xf0, 0x00},
328 {SENSOR, 0x05, 0x0b, 0x00},
329 {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
330 168
169static const unsigned char start_s5k83a[][4] =
170{
331 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00}, 171 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
332 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, 172 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
333 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00}, 173 {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
@@ -340,7 +180,7 @@ static const unsigned char init_s5k83a[][4] =
340 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, 180 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
341 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, 181 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
342 {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00}, 182 {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
343 {BRIDGE, M5602_XB_VSYNC_PARA, 0xe4, 0x00}, 183 {BRIDGE, M5602_XB_VSYNC_PARA, 0xe4, 0x00}, /* 484 */
344 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, 184 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
345 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00}, 185 {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
346 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00}, 186 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
@@ -348,50 +188,10 @@ static const unsigned char init_s5k83a[][4] =
348 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00}, 188 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
349 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00}, 189 {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
350 {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00}, 190 {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00},
351 {BRIDGE, M5602_XB_HSYNC_PARA, 0x7f, 0x00}, 191 {BRIDGE, M5602_XB_HSYNC_PARA, 0x7f, 0x00}, /* 639 */
352 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00}, 192 {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
353 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00}, 193 {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
354 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00}, 194 {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
355
356 {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
357 {SENSOR, 0x06, 0x71, 0x00},
358 {SENSOR, 0x07, 0xe8, 0x00},
359 {SENSOR, 0x08, 0x02, 0x00},
360 {SENSOR, 0x09, 0x88, 0x00},
361 {SENSOR, 0x14, 0x00, 0x00},
362 {SENSOR, 0x15, 0x20, 0x00},
363 {SENSOR, 0x19, 0x00, 0x00},
364 {SENSOR, 0x1a, 0x98, 0x00},
365 {SENSOR, 0x0f, 0x02, 0x00},
366
367 {SENSOR, 0x10, 0xe5, 0x00},
368 {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
369 {SENSOR_LONG, 0x14, 0x00, 0x20},
370 {SENSOR_LONG, 0x0d, 0x00, 0x7d},
371 {SENSOR_LONG, 0x1b, 0x0d, 0x05},
372
373 /* normal colors
374 (this is value after boot, but after tries can be different) */
375 {SENSOR, 0x00, 0x06, 0x00},
376
377 /* set default brightness */
378 {SENSOR_LONG, 0x14, 0x00, 0x20},
379 {SENSOR_LONG, 0x0d, 0x01, 0x00},
380 {SENSOR_LONG, 0x1b, S5K83A_DEFAULT_BRIGHTNESS >> 3,
381 S5K83A_DEFAULT_BRIGHTNESS >> 1},
382
383 /* set default whiteness */
384 {SENSOR, S5K83A_WHITENESS, S5K83A_DEFAULT_WHITENESS, 0x00},
385
386 /* set default gain */
387 {SENSOR_LONG, 0x18, 0x00, S5K83A_DEFAULT_GAIN},
388
389 /* set default flip */
390 {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
391 {SENSOR, S5K83A_FLIP, 0x00 | S5K83A_FLIP_MASK, 0x00},
392 {SENSOR, S5K83A_HFLIP_TUNE, 0x0b, 0x00},
393 {SENSOR, S5K83A_VFLIP_TUNE, 0x0a, 0x00}
394
395}; 195};
396 196
397#endif 197#endif
diff --git a/drivers/media/video/gspca/m5602/m5602_sensor.h b/drivers/media/video/gspca/m5602/m5602_sensor.h
index 0d3026936f2e..edff4f1f586f 100644
--- a/drivers/media/video/gspca/m5602/m5602_sensor.h
+++ b/drivers/media/video/gspca/m5602/m5602_sensor.h
@@ -21,13 +21,17 @@
21 21
22#include "m5602_bridge.h" 22#include "m5602_bridge.h"
23 23
24#define M5602_V4L2_CID_GREEN_BALANCE (V4L2_CID_PRIVATE_BASE + 0)
25#define M5602_V4L2_CID_NOISE_SUPPRESION (V4L2_CID_PRIVATE_BASE + 1)
26
24/* Enumerates all supported sensors */ 27/* Enumerates all supported sensors */
25enum sensors { 28enum sensors {
26 OV9650_SENSOR = 1, 29 OV9650_SENSOR = 1,
27 S5K83A_SENSOR = 2, 30 S5K83A_SENSOR = 2,
28 S5K4AA_SENSOR = 3, 31 S5K4AA_SENSOR = 3,
29 MT9M111_SENSOR = 4, 32 MT9M111_SENSOR = 4,
30 PO1030_SENSOR = 5 33 PO1030_SENSOR = 5,
34 OV7660_SENSOR = 6,
31}; 35};
32 36
33/* Enumerates all possible instruction types */ 37/* Enumerates all possible instruction types */
@@ -61,9 +65,6 @@ struct m5602_sensor {
61 65
62 /* Executed when the device is disconnected */ 66 /* Executed when the device is disconnected */
63 void (*disconnect)(struct sd *sd); 67 void (*disconnect)(struct sd *sd);
64
65 /* Performs a power down sequence */
66 int (*power_down)(struct sd *sd);
67}; 68};
68 69
69#endif 70#endif
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 2a901a4a6f00..30132513400c 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -321,6 +321,7 @@ static const struct sd_desc sd_desc = {
321/* -- module initialisation -- */ 321/* -- module initialisation -- */
322static const __devinitdata struct usb_device_id device_table[] = { 322static const __devinitdata struct usb_device_id device_table[] = {
323 {USB_DEVICE(0x08ca, 0x0111)}, 323 {USB_DEVICE(0x08ca, 0x0111)},
324 {USB_DEVICE(0x093a, 0x010f)},
324 {} 325 {}
325}; 326};
326MODULE_DEVICE_TABLE(usb, device_table); 327MODULE_DEVICE_TABLE(usb, device_table);
@@ -347,8 +348,11 @@ static struct usb_driver sd_driver = {
347/* -- module insert / remove -- */ 348/* -- module insert / remove -- */
348static int __init sd_mod_init(void) 349static int __init sd_mod_init(void)
349{ 350{
350 if (usb_register(&sd_driver) < 0) 351 int ret;
351 return -1; 352
353 ret = usb_register(&sd_driver);
354 if (ret < 0)
355 return ret;
352 PDEBUG(D_PROBE, "registered"); 356 PDEBUG(D_PROBE, "registered");
353 return 0; 357 return 0;
354} 358}
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 1fff37b79891..188866ac6cef 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -50,6 +50,13 @@ static int i2c_detect_tries = 10;
50struct sd { 50struct sd {
51 struct gspca_dev gspca_dev; /* !! must be the first item */ 51 struct gspca_dev gspca_dev; /* !! must be the first item */
52 52
53 char bridge;
54#define BRIDGE_OV511 0
55#define BRIDGE_OV511PLUS 1
56#define BRIDGE_OV518 2
57#define BRIDGE_OV518PLUS 3
58#define BRIDGE_OV519 4
59
53 /* Determined by sensor type */ 60 /* Determined by sensor type */
54 __u8 sif; 61 __u8 sif;
55 62
@@ -87,6 +94,9 @@ static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
87static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val); 94static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
88static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val); 95static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
89static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val); 96static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
97static void setbrightness(struct gspca_dev *gspca_dev);
98static void setcontrast(struct gspca_dev *gspca_dev);
99static void setcolors(struct gspca_dev *gspca_dev);
90 100
91static struct ctrl sd_ctrls[] = { 101static struct ctrl sd_ctrls[] = {
92 { 102 {
@@ -164,7 +174,7 @@ static struct ctrl sd_ctrls[] = {
164 }, 174 },
165}; 175};
166 176
167static const struct v4l2_pix_format vga_mode[] = { 177static const struct v4l2_pix_format ov519_vga_mode[] = {
168 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 178 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
169 .bytesperline = 320, 179 .bytesperline = 320,
170 .sizeimage = 320 * 240 * 3 / 8 + 590, 180 .sizeimage = 320 * 240 * 3 / 8 + 590,
@@ -176,7 +186,7 @@ static const struct v4l2_pix_format vga_mode[] = {
176 .colorspace = V4L2_COLORSPACE_JPEG, 186 .colorspace = V4L2_COLORSPACE_JPEG,
177 .priv = 0}, 187 .priv = 0},
178}; 188};
179static const struct v4l2_pix_format sif_mode[] = { 189static const struct v4l2_pix_format ov519_sif_mode[] = {
180 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, 190 {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
181 .bytesperline = 176, 191 .bytesperline = 176,
182 .sizeimage = 176 * 144 * 3 / 8 + 590, 192 .sizeimage = 176 * 144 * 3 / 8 + 590,
@@ -189,6 +199,47 @@ static const struct v4l2_pix_format sif_mode[] = {
189 .priv = 0}, 199 .priv = 0},
190}; 200};
191 201
202static const struct v4l2_pix_format ov518_vga_mode[] = {
203 {320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
204 .bytesperline = 320,
205 .sizeimage = 320 * 240 * 3 / 8 + 590,
206 .colorspace = V4L2_COLORSPACE_JPEG,
207 .priv = 1},
208 {640, 480, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
209 .bytesperline = 640,
210 .sizeimage = 640 * 480 * 3 / 8 + 590,
211 .colorspace = V4L2_COLORSPACE_JPEG,
212 .priv = 0},
213};
214static const struct v4l2_pix_format ov518_sif_mode[] = {
215 {176, 144, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
216 .bytesperline = 176,
217 .sizeimage = 40000,
218 .colorspace = V4L2_COLORSPACE_JPEG,
219 .priv = 1},
220 {352, 288, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
221 .bytesperline = 352,
222 .sizeimage = 352 * 288 * 3 / 8 + 590,
223 .colorspace = V4L2_COLORSPACE_JPEG,
224 .priv = 0},
225};
226
227
228/* Registers common to OV511 / OV518 */
229#define R51x_SYS_RESET 0x50
230#define R51x_SYS_INIT 0x53
231#define R51x_SYS_SNAP 0x52
232#define R51x_SYS_CUST_ID 0x5F
233#define R51x_COMP_LUT_BEGIN 0x80
234
235/* OV511 Camera interface register numbers */
236#define R511_SYS_LED_CTL 0x55 /* OV511+ only */
237#define OV511_RESET_NOREGS 0x3F /* All but OV511 & regs */
238
239/* OV518 Camera interface register numbers */
240#define R518_GPIO_OUT 0x56 /* OV518(+) only */
241#define R518_GPIO_CTL 0x57 /* OV518(+) only */
242
192/* OV519 Camera interface register numbers */ 243/* OV519 Camera interface register numbers */
193#define OV519_R10_H_SIZE 0x10 244#define OV519_R10_H_SIZE 0x10
194#define OV519_R11_V_SIZE 0x11 245#define OV519_R11_V_SIZE 0x11
@@ -224,6 +275,8 @@ static const struct v4l2_pix_format sif_mode[] = {
224 275
225/* OV7610 registers */ 276/* OV7610 registers */
226#define OV7610_REG_GAIN 0x00 /* gain setting (5:0) */ 277#define OV7610_REG_GAIN 0x00 /* gain setting (5:0) */
278#define OV7610_REG_BLUE 0x01 /* blue channel balance */
279#define OV7610_REG_RED 0x02 /* red channel balance */
227#define OV7610_REG_SAT 0x03 /* saturation */ 280#define OV7610_REG_SAT 0x03 /* saturation */
228#define OV8610_REG_HUE 0x04 /* 04 reserved */ 281#define OV8610_REG_HUE 0x04 /* 04 reserved */
229#define OV7610_REG_CNT 0x05 /* Y contrast */ 282#define OV7610_REG_CNT 0x05 /* Y contrast */
@@ -846,11 +899,12 @@ static unsigned char ov7670_abs_to_sm(unsigned char v)
846static int reg_w(struct sd *sd, __u16 index, __u8 value) 899static int reg_w(struct sd *sd, __u16 index, __u8 value)
847{ 900{
848 int ret; 901 int ret;
902 int req = (sd->bridge <= BRIDGE_OV511PLUS) ? 2 : 1;
849 903
850 sd->gspca_dev.usb_buf[0] = value; 904 sd->gspca_dev.usb_buf[0] = value;
851 ret = usb_control_msg(sd->gspca_dev.dev, 905 ret = usb_control_msg(sd->gspca_dev.dev,
852 usb_sndctrlpipe(sd->gspca_dev.dev, 0), 906 usb_sndctrlpipe(sd->gspca_dev.dev, 0),
853 1, /* REQ_IO (ov518/519) */ 907 req,
854 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 908 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
855 0, index, 909 0, index,
856 sd->gspca_dev.usb_buf, 1, 500); 910 sd->gspca_dev.usb_buf, 1, 500);
@@ -864,10 +918,11 @@ static int reg_w(struct sd *sd, __u16 index, __u8 value)
864static int reg_r(struct sd *sd, __u16 index) 918static int reg_r(struct sd *sd, __u16 index)
865{ 919{
866 int ret; 920 int ret;
921 int req = (sd->bridge <= BRIDGE_OV511PLUS) ? 3 : 1;
867 922
868 ret = usb_control_msg(sd->gspca_dev.dev, 923 ret = usb_control_msg(sd->gspca_dev.dev,
869 usb_rcvctrlpipe(sd->gspca_dev.dev, 0), 924 usb_rcvctrlpipe(sd->gspca_dev.dev, 0),
870 1, /* REQ_IO */ 925 req,
871 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 926 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
872 0, index, sd->gspca_dev.usb_buf, 1, 500); 927 0, index, sd->gspca_dev.usb_buf, 1, 500);
873 928
@@ -924,6 +979,28 @@ static int reg_w_mask(struct sd *sd,
924} 979}
925 980
926/* 981/*
982 * Writes multiple (n) byte value to a single register. Only valid with certain
983 * registers (0x30 and 0xc4 - 0xce).
984 */
985static int ov518_reg_w32(struct sd *sd, __u16 index, u32 value, int n)
986{
987 int ret;
988
989 *((u32 *)sd->gspca_dev.usb_buf) = __cpu_to_le32(value);
990
991 ret = usb_control_msg(sd->gspca_dev.dev,
992 usb_sndctrlpipe(sd->gspca_dev.dev, 0),
993 1 /* REG_IO */,
994 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
995 0, index,
996 sd->gspca_dev.usb_buf, n, 500);
997 if (ret < 0)
998 PDEBUG(D_ERR, "Write reg32 [%02x] %08x failed", index, value);
999 return ret;
1000}
1001
1002
1003/*
927 * The OV518 I2C I/O procedure is different, hence, this function. 1004 * The OV518 I2C I/O procedure is different, hence, this function.
928 * This is normally only called from i2c_w(). Note that this function 1005 * This is normally only called from i2c_w(). Note that this function
929 * always succeeds regardless of whether the sensor is present and working. 1006 * always succeeds regardless of whether the sensor is present and working.
@@ -1014,20 +1091,47 @@ static inline int ov51x_stop(struct sd *sd)
1014{ 1091{
1015 PDEBUG(D_STREAM, "stopping"); 1092 PDEBUG(D_STREAM, "stopping");
1016 sd->stopped = 1; 1093 sd->stopped = 1;
1017 return reg_w(sd, OV519_SYS_RESET1, 0x0f); 1094 switch (sd->bridge) {
1095 case BRIDGE_OV511:
1096 case BRIDGE_OV511PLUS:
1097 return reg_w(sd, R51x_SYS_RESET, 0x3d);
1098 case BRIDGE_OV518:
1099 case BRIDGE_OV518PLUS:
1100 return reg_w_mask(sd, R51x_SYS_RESET, 0x3a, 0x3a);
1101 case BRIDGE_OV519:
1102 return reg_w(sd, OV519_SYS_RESET1, 0x0f);
1103 }
1104
1105 return 0;
1018} 1106}
1019 1107
1020/* Restarts OV511 after ov511_stop() is called. Has no effect if it is not 1108/* Restarts OV511 after ov511_stop() is called. Has no effect if it is not
1021 * actually stopped (for performance). */ 1109 * actually stopped (for performance). */
1022static inline int ov51x_restart(struct sd *sd) 1110static inline int ov51x_restart(struct sd *sd)
1023{ 1111{
1112 int rc;
1113
1024 PDEBUG(D_STREAM, "restarting"); 1114 PDEBUG(D_STREAM, "restarting");
1025 if (!sd->stopped) 1115 if (!sd->stopped)
1026 return 0; 1116 return 0;
1027 sd->stopped = 0; 1117 sd->stopped = 0;
1028 1118
1029 /* Reinitialize the stream */ 1119 /* Reinitialize the stream */
1030 return reg_w(sd, OV519_SYS_RESET1, 0x00); 1120 switch (sd->bridge) {
1121 case BRIDGE_OV511:
1122 case BRIDGE_OV511PLUS:
1123 return reg_w(sd, R51x_SYS_RESET, 0x00);
1124 case BRIDGE_OV518:
1125 case BRIDGE_OV518PLUS:
1126 rc = reg_w(sd, 0x2f, 0x80);
1127 if (rc < 0)
1128 return rc;
1129 return reg_w(sd, R51x_SYS_RESET, 0x00);
1130 case BRIDGE_OV519:
1131 return reg_w(sd, OV519_SYS_RESET1, 0x00);
1132 }
1133
1134 return 0;
1031} 1135}
1032 1136
1033/* This does an initial reset of an OmniVision sensor and ensures that I2C 1137/* This does an initial reset of an OmniVision sensor and ensures that I2C
@@ -1287,16 +1391,161 @@ static int ov6xx0_configure(struct sd *sd)
1287/* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */ 1391/* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */
1288static void ov51x_led_control(struct sd *sd, int on) 1392static void ov51x_led_control(struct sd *sd, int on)
1289{ 1393{
1290 reg_w_mask(sd, OV519_GPIO_DATA_OUT0, !on, 1); /* 0 / 1 */ 1394 switch (sd->bridge) {
1395 /* OV511 has no LED control */
1396 case BRIDGE_OV511PLUS:
1397 reg_w(sd, R511_SYS_LED_CTL, on ? 1 : 0);
1398 break;
1399 case BRIDGE_OV518:
1400 case BRIDGE_OV518PLUS:
1401 reg_w_mask(sd, R518_GPIO_OUT, on ? 0x02 : 0x00, 0x02);
1402 break;
1403 case BRIDGE_OV519:
1404 reg_w_mask(sd, OV519_GPIO_DATA_OUT0, !on, 1); /* 0 / 1 */
1405 break;
1406 }
1291} 1407}
1292 1408
1293/* this function is called at probe time */ 1409/* OV518 quantization tables are 8x4 (instead of 8x8) */
1294static int sd_config(struct gspca_dev *gspca_dev, 1410static int ov518_upload_quan_tables(struct sd *sd)
1295 const struct usb_device_id *id) 1411{
1412 const unsigned char yQuanTable518[] = {
1413 5, 4, 5, 6, 6, 7, 7, 7,
1414 5, 5, 5, 5, 6, 7, 7, 7,
1415 6, 6, 6, 6, 7, 7, 7, 8,
1416 7, 7, 6, 7, 7, 7, 8, 8
1417 };
1418
1419 const unsigned char uvQuanTable518[] = {
1420 6, 6, 6, 7, 7, 7, 7, 7,
1421 6, 6, 6, 7, 7, 7, 7, 7,
1422 6, 6, 6, 7, 7, 7, 7, 8,
1423 7, 7, 7, 7, 7, 7, 8, 8
1424 };
1425
1426 const unsigned char *pYTable = yQuanTable518;
1427 const unsigned char *pUVTable = uvQuanTable518;
1428 unsigned char val0, val1;
1429 int i, rc, reg = R51x_COMP_LUT_BEGIN;
1430
1431 PDEBUG(D_PROBE, "Uploading quantization tables");
1432
1433 for (i = 0; i < 16; i++) {
1434 val0 = *pYTable++;
1435 val1 = *pYTable++;
1436 val0 &= 0x0f;
1437 val1 &= 0x0f;
1438 val0 |= val1 << 4;
1439 rc = reg_w(sd, reg, val0);
1440 if (rc < 0)
1441 return rc;
1442
1443 val0 = *pUVTable++;
1444 val1 = *pUVTable++;
1445 val0 &= 0x0f;
1446 val1 &= 0x0f;
1447 val0 |= val1 << 4;
1448 rc = reg_w(sd, reg + 16, val0);
1449 if (rc < 0)
1450 return rc;
1451
1452 reg++;
1453 }
1454
1455 return 0;
1456}
1457
1458/* This initializes the OV518/OV518+ and the sensor */
1459static int ov518_configure(struct gspca_dev *gspca_dev)
1296{ 1460{
1297 struct sd *sd = (struct sd *) gspca_dev; 1461 struct sd *sd = (struct sd *) gspca_dev;
1298 struct cam *cam; 1462 int rc;
1463
1464 /* For 518 and 518+ */
1465 static struct ov_regvals init_518[] = {
1466 { R51x_SYS_RESET, 0x40 },
1467 { R51x_SYS_INIT, 0xe1 },
1468 { R51x_SYS_RESET, 0x3e },
1469 { R51x_SYS_INIT, 0xe1 },
1470 { R51x_SYS_RESET, 0x00 },
1471 { R51x_SYS_INIT, 0xe1 },
1472 { 0x46, 0x00 },
1473 { 0x5d, 0x03 },
1474 };
1475
1476 static struct ov_regvals norm_518[] = {
1477 { R51x_SYS_SNAP, 0x02 }, /* Reset */
1478 { R51x_SYS_SNAP, 0x01 }, /* Enable */
1479 { 0x31, 0x0f },
1480 { 0x5d, 0x03 },
1481 { 0x24, 0x9f },
1482 { 0x25, 0x90 },
1483 { 0x20, 0x00 },
1484 { 0x51, 0x04 },
1485 { 0x71, 0x19 },
1486 { 0x2f, 0x80 },
1487 };
1488
1489 static struct ov_regvals norm_518_p[] = {
1490 { R51x_SYS_SNAP, 0x02 }, /* Reset */
1491 { R51x_SYS_SNAP, 0x01 }, /* Enable */
1492 { 0x31, 0x0f },
1493 { 0x5d, 0x03 },
1494 { 0x24, 0x9f },
1495 { 0x25, 0x90 },
1496 { 0x20, 0x60 },
1497 { 0x51, 0x02 },
1498 { 0x71, 0x19 },
1499 { 0x40, 0xff },
1500 { 0x41, 0x42 },
1501 { 0x46, 0x00 },
1502 { 0x33, 0x04 },
1503 { 0x21, 0x19 },
1504 { 0x3f, 0x10 },
1505 { 0x2f, 0x80 },
1506 };
1507
1508 /* First 5 bits of custom ID reg are a revision ID on OV518 */
1509 PDEBUG(D_PROBE, "Device revision %d",
1510 0x1F & reg_r(sd, R51x_SYS_CUST_ID));
1511
1512 rc = write_regvals(sd, init_518, ARRAY_SIZE(init_518));
1513 if (rc < 0)
1514 return rc;
1515
1516 /* Set LED GPIO pin to output mode */
1517 rc = reg_w_mask(sd, R518_GPIO_CTL, 0x00, 0x02);
1518 if (rc < 0)
1519 return rc;
1299 1520
1521 switch (sd->bridge) {
1522 case BRIDGE_OV518:
1523 rc = write_regvals(sd, norm_518, ARRAY_SIZE(norm_518));
1524 if (rc < 0)
1525 return rc;
1526 break;
1527 case BRIDGE_OV518PLUS:
1528 rc = write_regvals(sd, norm_518_p, ARRAY_SIZE(norm_518_p));
1529 if (rc < 0)
1530 return rc;
1531 break;
1532 }
1533
1534 rc = ov518_upload_quan_tables(sd);
1535 if (rc < 0) {
1536 PDEBUG(D_ERR, "Error uploading quantization tables");
1537 return rc;
1538 }
1539
1540 rc = reg_w(sd, 0x2f, 0x80);
1541 if (rc < 0)
1542 return rc;
1543
1544 return 0;
1545}
1546
1547static int ov519_configure(struct sd *sd)
1548{
1300 static const struct ov_regvals init_519[] = { 1549 static const struct ov_regvals init_519[] = {
1301 { 0x5a, 0x6d }, /* EnableSystem */ 1550 { 0x5a, 0x6d }, /* EnableSystem */
1302 { 0x53, 0x9b }, 1551 { 0x53, 0x9b },
@@ -1313,8 +1562,32 @@ static int sd_config(struct gspca_dev *gspca_dev,
1313 /* windows reads 0x55 at this point*/ 1562 /* windows reads 0x55 at this point*/
1314 }; 1563 };
1315 1564
1316 if (write_regvals(sd, init_519, ARRAY_SIZE(init_519))) 1565 return write_regvals(sd, init_519, ARRAY_SIZE(init_519));
1566}
1567
1568/* this function is called at probe time */
1569static int sd_config(struct gspca_dev *gspca_dev,
1570 const struct usb_device_id *id)
1571{
1572 struct sd *sd = (struct sd *) gspca_dev;
1573 struct cam *cam;
1574 int ret = 0;
1575
1576 sd->bridge = id->driver_info;
1577
1578 switch (sd->bridge) {
1579 case BRIDGE_OV518:
1580 case BRIDGE_OV518PLUS:
1581 ret = ov518_configure(gspca_dev);
1582 break;
1583 case BRIDGE_OV519:
1584 ret = ov519_configure(sd);
1585 break;
1586 }
1587
1588 if (ret)
1317 goto error; 1589 goto error;
1590
1318 ov51x_led_control(sd, 0); /* turn LED off */ 1591 ov51x_led_control(sd, 0); /* turn LED off */
1319 1592
1320 /* Test for 76xx */ 1593 /* Test for 76xx */
@@ -1360,12 +1633,26 @@ static int sd_config(struct gspca_dev *gspca_dev,
1360 } 1633 }
1361 1634
1362 cam = &gspca_dev->cam; 1635 cam = &gspca_dev->cam;
1363 if (!sd->sif) { 1636 switch (sd->bridge) {
1364 cam->cam_mode = vga_mode; 1637 case BRIDGE_OV518:
1365 cam->nmodes = ARRAY_SIZE(vga_mode); 1638 case BRIDGE_OV518PLUS:
1366 } else { 1639 if (!sd->sif) {
1367 cam->cam_mode = sif_mode; 1640 cam->cam_mode = ov518_vga_mode;
1368 cam->nmodes = ARRAY_SIZE(sif_mode); 1641 cam->nmodes = ARRAY_SIZE(ov518_vga_mode);
1642 } else {
1643 cam->cam_mode = ov518_sif_mode;
1644 cam->nmodes = ARRAY_SIZE(ov518_sif_mode);
1645 }
1646 break;
1647 case BRIDGE_OV519:
1648 if (!sd->sif) {
1649 cam->cam_mode = ov519_vga_mode;
1650 cam->nmodes = ARRAY_SIZE(ov519_vga_mode);
1651 } else {
1652 cam->cam_mode = ov519_sif_mode;
1653 cam->nmodes = ARRAY_SIZE(ov519_sif_mode);
1654 }
1655 break;
1369 } 1656 }
1370 sd->brightness = BRIGHTNESS_DEF; 1657 sd->brightness = BRIGHTNESS_DEF;
1371 sd->contrast = CONTRAST_DEF; 1658 sd->contrast = CONTRAST_DEF;
@@ -1422,6 +1709,106 @@ static int sd_init(struct gspca_dev *gspca_dev)
1422 return 0; 1709 return 0;
1423} 1710}
1424 1711
1712/* Sets up the OV518/OV518+ with the given image parameters
1713 *
1714 * OV518 needs a completely different approach, until we can figure out what
1715 * the individual registers do. Also, only 15 FPS is supported now.
1716 *
1717 * Do not put any sensor-specific code in here (including I2C I/O functions)
1718 */
1719static int ov518_mode_init_regs(struct sd *sd)
1720{
1721 int hsegs, vsegs;
1722
1723 /******** Set the mode ********/
1724
1725 reg_w(sd, 0x2b, 0);
1726 reg_w(sd, 0x2c, 0);
1727 reg_w(sd, 0x2d, 0);
1728 reg_w(sd, 0x2e, 0);
1729 reg_w(sd, 0x3b, 0);
1730 reg_w(sd, 0x3c, 0);
1731 reg_w(sd, 0x3d, 0);
1732 reg_w(sd, 0x3e, 0);
1733
1734 if (sd->bridge == BRIDGE_OV518) {
1735 /* Set 8-bit (YVYU) input format */
1736 reg_w_mask(sd, 0x20, 0x08, 0x08);
1737
1738 /* Set 12-bit (4:2:0) output format */
1739 reg_w_mask(sd, 0x28, 0x80, 0xf0);
1740 reg_w_mask(sd, 0x38, 0x80, 0xf0);
1741 } else {
1742 reg_w(sd, 0x28, 0x80);
1743 reg_w(sd, 0x38, 0x80);
1744 }
1745
1746 hsegs = sd->gspca_dev.width / 16;
1747 vsegs = sd->gspca_dev.height / 4;
1748
1749 reg_w(sd, 0x29, hsegs);
1750 reg_w(sd, 0x2a, vsegs);
1751
1752 reg_w(sd, 0x39, hsegs);
1753 reg_w(sd, 0x3a, vsegs);
1754
1755 /* Windows driver does this here; who knows why */
1756 reg_w(sd, 0x2f, 0x80);
1757
1758 /******** Set the framerate (to 30 FPS) ********/
1759 if (sd->bridge == BRIDGE_OV518PLUS)
1760 sd->clockdiv = 1;
1761 else
1762 sd->clockdiv = 0;
1763
1764 /* Mode independent, but framerate dependent, regs */
1765 reg_w(sd, 0x51, 0x04); /* Clock divider; lower==faster */
1766 reg_w(sd, 0x22, 0x18);
1767 reg_w(sd, 0x23, 0xff);
1768
1769 if (sd->bridge == BRIDGE_OV518PLUS)
1770 reg_w(sd, 0x21, 0x19);
1771 else
1772 reg_w(sd, 0x71, 0x17); /* Compression-related? */
1773
1774 /* FIXME: Sensor-specific */
1775 /* Bit 5 is what matters here. Of course, it is "reserved" */
1776 i2c_w(sd, 0x54, 0x23);
1777
1778 reg_w(sd, 0x2f, 0x80);
1779
1780 if (sd->bridge == BRIDGE_OV518PLUS) {
1781 reg_w(sd, 0x24, 0x94);
1782 reg_w(sd, 0x25, 0x90);
1783 ov518_reg_w32(sd, 0xc4, 400, 2); /* 190h */
1784 ov518_reg_w32(sd, 0xc6, 540, 2); /* 21ch */
1785 ov518_reg_w32(sd, 0xc7, 540, 2); /* 21ch */
1786 ov518_reg_w32(sd, 0xc8, 108, 2); /* 6ch */
1787 ov518_reg_w32(sd, 0xca, 131098, 3); /* 2001ah */
1788 ov518_reg_w32(sd, 0xcb, 532, 2); /* 214h */
1789 ov518_reg_w32(sd, 0xcc, 2400, 2); /* 960h */
1790 ov518_reg_w32(sd, 0xcd, 32, 2); /* 20h */
1791 ov518_reg_w32(sd, 0xce, 608, 2); /* 260h */
1792 } else {
1793 reg_w(sd, 0x24, 0x9f);
1794 reg_w(sd, 0x25, 0x90);
1795 ov518_reg_w32(sd, 0xc4, 400, 2); /* 190h */
1796 ov518_reg_w32(sd, 0xc6, 381, 2); /* 17dh */
1797 ov518_reg_w32(sd, 0xc7, 381, 2); /* 17dh */
1798 ov518_reg_w32(sd, 0xc8, 128, 2); /* 80h */
1799 ov518_reg_w32(sd, 0xca, 183331, 3); /* 2cc23h */
1800 ov518_reg_w32(sd, 0xcb, 746, 2); /* 2eah */
1801 ov518_reg_w32(sd, 0xcc, 1750, 2); /* 6d6h */
1802 ov518_reg_w32(sd, 0xcd, 45, 2); /* 2dh */
1803 ov518_reg_w32(sd, 0xce, 851, 2); /* 353h */
1804 }
1805
1806 reg_w(sd, 0x2f, 0x80);
1807
1808 return 0;
1809}
1810
1811
1425/* Sets up the OV519 with the given image parameters 1812/* Sets up the OV519 with the given image parameters
1426 * 1813 *
1427 * OV519 needs a completely different approach, until we can figure out what 1814 * OV519 needs a completely different approach, until we can figure out what
@@ -1740,6 +2127,11 @@ static int set_ov_sensor_window(struct sd *sd)
1740 hwebase = 0x3a; 2127 hwebase = 0x3a;
1741 vwsbase = 0x05; 2128 vwsbase = 0x05;
1742 vwebase = 0x06; 2129 vwebase = 0x06;
2130 if (qvga) {
2131 /* HDG: this fixes U and V getting swapped */
2132 hwsbase--;
2133 vwsbase--;
2134 }
1743 break; 2135 break;
1744 case SEN_OV7620: 2136 case SEN_OV7620:
1745 hwsbase = 0x2f; /* From 7620.SET (spec is wrong) */ 2137 hwsbase = 0x2f; /* From 7620.SET (spec is wrong) */
@@ -1855,15 +2247,28 @@ static int set_ov_sensor_window(struct sd *sd)
1855static int sd_start(struct gspca_dev *gspca_dev) 2247static int sd_start(struct gspca_dev *gspca_dev)
1856{ 2248{
1857 struct sd *sd = (struct sd *) gspca_dev; 2249 struct sd *sd = (struct sd *) gspca_dev;
1858 int ret; 2250 int ret = 0;
1859 2251
1860 ret = ov519_mode_init_regs(sd); 2252 switch (sd->bridge) {
2253 case BRIDGE_OV518:
2254 case BRIDGE_OV518PLUS:
2255 ret = ov518_mode_init_regs(sd);
2256 break;
2257 case BRIDGE_OV519:
2258 ret = ov519_mode_init_regs(sd);
2259 break;
2260 }
1861 if (ret < 0) 2261 if (ret < 0)
1862 goto out; 2262 goto out;
2263
1863 ret = set_ov_sensor_window(sd); 2264 ret = set_ov_sensor_window(sd);
1864 if (ret < 0) 2265 if (ret < 0)
1865 goto out; 2266 goto out;
1866 2267
2268 setcontrast(gspca_dev);
2269 setbrightness(gspca_dev);
2270 setcolors(gspca_dev);
2271
1867 ret = ov51x_restart(sd); 2272 ret = ov51x_restart(sd);
1868 if (ret < 0) 2273 if (ret < 0)
1869 goto out; 2274 goto out;
@@ -1882,7 +2287,30 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
1882 ov51x_led_control(sd, 0); 2287 ov51x_led_control(sd, 0);
1883} 2288}
1884 2289
1885static void sd_pkt_scan(struct gspca_dev *gspca_dev, 2290static void ov518_pkt_scan(struct gspca_dev *gspca_dev,
2291 struct gspca_frame *frame, /* target */
2292 __u8 *data, /* isoc packet */
2293 int len) /* iso packet length */
2294{
2295 PDEBUG(D_STREAM, "ov518_pkt_scan: %d bytes", len);
2296
2297 if (len & 7) {
2298 len--;
2299 PDEBUG(D_STREAM, "packet number: %d\n", (int)data[len]);
2300 }
2301
2302 /* A false positive here is likely, until OVT gives me
2303 * the definitive SOF/EOF format */
2304 if ((!(data[0] | data[1] | data[2] | data[3] | data[5])) && data[6]) {
2305 gspca_frame_add(gspca_dev, LAST_PACKET, frame, data, 0);
2306 gspca_frame_add(gspca_dev, FIRST_PACKET, frame, data, 0);
2307 }
2308
2309 /* intermediate packet */
2310 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len);
2311}
2312
2313static void ov519_pkt_scan(struct gspca_dev *gspca_dev,
1886 struct gspca_frame *frame, /* target */ 2314 struct gspca_frame *frame, /* target */
1887 __u8 *data, /* isoc packet */ 2315 __u8 *data, /* isoc packet */
1888 int len) /* iso packet length */ 2316 int len) /* iso packet length */
@@ -1926,6 +2354,27 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1926 data, len); 2354 data, len);
1927} 2355}
1928 2356
2357static void sd_pkt_scan(struct gspca_dev *gspca_dev,
2358 struct gspca_frame *frame, /* target */
2359 __u8 *data, /* isoc packet */
2360 int len) /* iso packet length */
2361{
2362 struct sd *sd = (struct sd *) gspca_dev;
2363
2364 switch (sd->bridge) {
2365 case BRIDGE_OV511:
2366 case BRIDGE_OV511PLUS:
2367 break;
2368 case BRIDGE_OV518:
2369 case BRIDGE_OV518PLUS:
2370 ov518_pkt_scan(gspca_dev, frame, data, len);
2371 break;
2372 case BRIDGE_OV519:
2373 ov519_pkt_scan(gspca_dev, frame, data, len);
2374 break;
2375 }
2376}
2377
1929/* -- management routines -- */ 2378/* -- management routines -- */
1930 2379
1931static void setbrightness(struct gspca_dev *gspca_dev) 2380static void setbrightness(struct gspca_dev *gspca_dev)
@@ -1970,6 +2419,7 @@ static void setcontrast(struct gspca_dev *gspca_dev)
1970 break; 2419 break;
1971 case SEN_OV6630: 2420 case SEN_OV6630:
1972 i2c_w_mask(sd, OV7610_REG_CNT, val >> 4, 0x0f); 2421 i2c_w_mask(sd, OV7610_REG_CNT, val >> 4, 0x0f);
2422 break;
1973 case SEN_OV8610: { 2423 case SEN_OV8610: {
1974 static const __u8 ctab[] = { 2424 static const __u8 ctab[] = {
1975 0x03, 0x09, 0x0b, 0x0f, 0x53, 0x6f, 0x35, 0x7f 2425 0x03, 0x09, 0x0b, 0x0f, 0x53, 0x6f, 0x35, 0x7f
@@ -2136,19 +2586,21 @@ static const struct sd_desc sd_desc = {
2136 2586
2137/* -- module initialisation -- */ 2587/* -- module initialisation -- */
2138static const __devinitdata struct usb_device_id device_table[] = { 2588static const __devinitdata struct usb_device_id device_table[] = {
2139 {USB_DEVICE(0x041e, 0x4052)}, 2589 {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 },
2140 {USB_DEVICE(0x041e, 0x405f)}, 2590 {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
2141 {USB_DEVICE(0x041e, 0x4060)}, 2591 {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
2142 {USB_DEVICE(0x041e, 0x4061)}, 2592 {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
2143 {USB_DEVICE(0x041e, 0x4064)}, 2593 {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 },
2144 {USB_DEVICE(0x041e, 0x4068)}, 2594 {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 },
2145 {USB_DEVICE(0x045e, 0x028c)}, 2595 {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 },
2146 {USB_DEVICE(0x054c, 0x0154)}, 2596 {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 },
2147 {USB_DEVICE(0x054c, 0x0155)}, 2597 {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },
2148 {USB_DEVICE(0x05a9, 0x0519)}, 2598 {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 },
2149 {USB_DEVICE(0x05a9, 0x0530)}, 2599 {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 },
2150 {USB_DEVICE(0x05a9, 0x4519)}, 2600 {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 },
2151 {USB_DEVICE(0x05a9, 0x8519)}, 2601 {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 },
2602 {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 },
2603 {USB_DEVICE(0x05a9, 0xa518), .driver_info = BRIDGE_OV518PLUS },
2152 {} 2604 {}
2153}; 2605};
2154 2606
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 19e0bc60de14..4b528b372911 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -60,10 +60,23 @@ struct sd {
60static struct ctrl sd_ctrls[] = { 60static struct ctrl sd_ctrls[] = {
61}; 61};
62 62
63static const struct v4l2_pix_format vga_mode[] = { 63static const struct v4l2_pix_format vga_yuyv_mode[] = {
64 {640, 480, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE, 64 {640, 480, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE,
65 .bytesperline = 640 * 2, 65 .bytesperline = 640 * 2,
66 .sizeimage = 640 * 480 * 2, 66 .sizeimage = 640 * 480 * 2,
67 .colorspace = V4L2_COLORSPACE_SRGB,
68 .priv = 0},
69};
70
71static const struct v4l2_pix_format vga_jpeg_mode[] = {
72 {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
73 .bytesperline = 320,
74 .sizeimage = 320 * 240 * 3 / 8 + 590,
75 .colorspace = V4L2_COLORSPACE_JPEG,
76 .priv = 1},
77 {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
78 .bytesperline = 640,
79 .sizeimage = 640 * 480 * 3 / 8 + 590,
67 .colorspace = V4L2_COLORSPACE_JPEG, 80 .colorspace = V4L2_COLORSPACE_JPEG,
68 .priv = 0}, 81 .priv = 0},
69}; 82};
@@ -244,7 +257,7 @@ static const u8 bridge_init_ov965x[][2] = {
244}; 257};
245 258
246static const u8 sensor_init_ov965x[][2] = { 259static const u8 sensor_init_ov965x[][2] = {
247 {0x12, 0x80}, /* com7 - reset */ 260 {0x12, 0x80}, /* com7 - SSCB reset */
248 {0x00, 0x00}, /* gain */ 261 {0x00, 0x00}, /* gain */
249 {0x01, 0x80}, /* blue */ 262 {0x01, 0x80}, /* blue */
250 {0x02, 0x80}, /* red */ 263 {0x02, 0x80}, /* red */
@@ -254,10 +267,10 @@ static const u8 sensor_init_ov965x[][2] = {
254 {0x0e, 0x61}, /* com5 */ 267 {0x0e, 0x61}, /* com5 */
255 {0x0f, 0x42}, /* com6 */ 268 {0x0f, 0x42}, /* com6 */
256 {0x11, 0x00}, /* clkrc */ 269 {0x11, 0x00}, /* clkrc */
257 {0x12, 0x02}, /* com7 */ 270 {0x12, 0x02}, /* com7 - 15fps VGA YUYV */
258 {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ 271 {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */
259 {0x14, 0x28}, /* com9 */ 272 {0x14, 0x28}, /* com9 */
260 {0x16, 0x24}, /* rsvd16 */ 273 {0x16, 0x24}, /* reg16 */
261 {0x17, 0x1d}, /* hstart*/ 274 {0x17, 0x1d}, /* hstart*/
262 {0x18, 0xbd}, /* hstop */ 275 {0x18, 0xbd}, /* hstop */
263 {0x19, 0x01}, /* vstrt */ 276 {0x19, 0x01}, /* vstrt */
@@ -269,24 +282,24 @@ static const u8 sensor_init_ov965x[][2] = {
269 {0x27, 0x08}, /* bbias */ 282 {0x27, 0x08}, /* bbias */
270 {0x28, 0x08}, /* gbbias */ 283 {0x28, 0x08}, /* gbbias */
271 {0x29, 0x15}, /* gr com */ 284 {0x29, 0x15}, /* gr com */
272 {0x2a, 0x00}, 285 {0x2a, 0x00}, /* exhch */
273 {0x2b, 0x00}, 286 {0x2b, 0x00}, /* exhcl */
274 {0x2c, 0x08}, /* rbias */ 287 {0x2c, 0x08}, /* rbias */
275 {0x32, 0xff}, /* href */ 288 {0x32, 0xff}, /* href */
276 {0x33, 0x00}, /* chlf */ 289 {0x33, 0x00}, /* chlf */
277 {0x34, 0x3f}, /* arblm */ 290 {0x34, 0x3f}, /* aref1 */
278 {0x35, 0x00}, /* rsvd35 */ 291 {0x35, 0x00}, /* aref2 */
279 {0x36, 0xf8}, /* rsvd36 */ 292 {0x36, 0xf8}, /* aref3 */
280 {0x38, 0x72}, /* acom38 */ 293 {0x38, 0x72}, /* adc2 */
281 {0x39, 0x57}, /* ofon */ 294 {0x39, 0x57}, /* aref4 */
282 {0x3a, 0x80}, /* tslb */ 295 {0x3a, 0x80}, /* tslb - yuyv */
283 {0x3b, 0xc4}, 296 {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */
284 {0x3d, 0x99}, /* com13 */ 297 {0x3d, 0x99}, /* com13 */
285 {0x3f, 0xc1}, 298 {0x3f, 0xc1}, /* edge */
286 {0x40, 0xc0}, /* com15 */ 299 {0x40, 0xc0}, /* com15 */
287 {0x41, 0x40}, /* com16 */ 300 {0x41, 0x40}, /* com16 */
288 {0x42, 0xc0}, 301 {0x42, 0xc0}, /* com17 */
289 {0x43, 0x0a}, 302 {0x43, 0x0a}, /* rsvd */
290 {0x44, 0xf0}, 303 {0x44, 0xf0},
291 {0x45, 0x46}, 304 {0x45, 0x46},
292 {0x46, 0x62}, 305 {0x46, 0x62},
@@ -297,22 +310,22 @@ static const u8 sensor_init_ov965x[][2] = {
297 {0x4c, 0x7f}, 310 {0x4c, 0x7f},
298 {0x4d, 0x7f}, 311 {0x4d, 0x7f},
299 {0x4e, 0x7f}, 312 {0x4e, 0x7f},
300 {0x4f, 0x98}, 313 {0x4f, 0x98}, /* matrix */
301 {0x50, 0x98}, 314 {0x50, 0x98},
302 {0x51, 0x00}, 315 {0x51, 0x00},
303 {0x52, 0x28}, 316 {0x52, 0x28},
304 {0x53, 0x70}, 317 {0x53, 0x70},
305 {0x54, 0x98}, 318 {0x54, 0x98},
306 {0x58, 0x1a}, 319 {0x58, 0x1a}, /* matrix coef sign */
307 {0x59, 0x85}, 320 {0x59, 0x85}, /* AWB control */
308 {0x5a, 0xa9}, 321 {0x5a, 0xa9},
309 {0x5b, 0x64}, 322 {0x5b, 0x64},
310 {0x5c, 0x84}, 323 {0x5c, 0x84},
311 {0x5d, 0x53}, 324 {0x5d, 0x53},
312 {0x5e, 0x0e}, 325 {0x5e, 0x0e},
313 {0x5f, 0xf0}, 326 {0x5f, 0xf0}, /* AWB blue limit */
314 {0x60, 0xf0}, 327 {0x60, 0xf0}, /* AWB red limit */
315 {0x61, 0xf0}, 328 {0x61, 0xf0}, /* AWB green limit */
316 {0x62, 0x00}, /* lcc1 */ 329 {0x62, 0x00}, /* lcc1 */
317 {0x63, 0x00}, /* lcc2 */ 330 {0x63, 0x00}, /* lcc2 */
318 {0x64, 0x02}, /* lcc3 */ 331 {0x64, 0x02}, /* lcc3 */
@@ -324,15 +337,15 @@ static const u8 sensor_init_ov965x[][2] = {
324 {0x6d, 0x55}, 337 {0x6d, 0x55},
325 {0x6e, 0x00}, 338 {0x6e, 0x00},
326 {0x6f, 0x9d}, 339 {0x6f, 0x9d},
327 {0x70, 0x21}, 340 {0x70, 0x21}, /* dnsth */
328 {0x71, 0x78}, 341 {0x71, 0x78},
329 {0x72, 0x00}, 342 {0x72, 0x00}, /* poidx */
330 {0x73, 0x01}, 343 {0x73, 0x01}, /* pckdv */
331 {0x74, 0x3a}, 344 {0x74, 0x3a}, /* xindx */
332 {0x75, 0x35}, 345 {0x75, 0x35}, /* yindx */
333 {0x76, 0x01}, 346 {0x76, 0x01},
334 {0x77, 0x02}, 347 {0x77, 0x02},
335 {0x7a, 0x12}, 348 {0x7a, 0x12}, /* gamma curve */
336 {0x7b, 0x08}, 349 {0x7b, 0x08},
337 {0x7c, 0x16}, 350 {0x7c, 0x16},
338 {0x7d, 0x30}, 351 {0x7d, 0x30},
@@ -349,33 +362,33 @@ static const u8 sensor_init_ov965x[][2] = {
349 {0x88, 0xe6}, 362 {0x88, 0xe6},
350 {0x89, 0xf2}, 363 {0x89, 0xf2},
351 {0x8a, 0x03}, 364 {0x8a, 0x03},
352 {0x8c, 0x89}, 365 {0x8c, 0x89}, /* com19 */
353 {0x14, 0x28}, /* com9 */ 366 {0x14, 0x28}, /* com9 */
354 {0x90, 0x7d}, 367 {0x90, 0x7d},
355 {0x91, 0x7b}, 368 {0x91, 0x7b},
356 {0x9d, 0x03}, 369 {0x9d, 0x03}, /* lcc6 */
357 {0x9e, 0x04}, 370 {0x9e, 0x04}, /* lcc7 */
358 {0x9f, 0x7a}, 371 {0x9f, 0x7a},
359 {0xa0, 0x79}, 372 {0xa0, 0x79},
360 {0xa1, 0x40}, /* aechm */ 373 {0xa1, 0x40}, /* aechm */
361 {0xa4, 0x50}, 374 {0xa4, 0x50}, /* com21 */
362 {0xa5, 0x68}, /* com26 */ 375 {0xa5, 0x68}, /* com26 */
363 {0xa6, 0x4a}, 376 {0xa6, 0x4a}, /* AWB green */
364 {0xa8, 0xc1}, /* acoma8 */ 377 {0xa8, 0xc1}, /* refa8 */
365 {0xa9, 0xef}, /* acoma9 */ 378 {0xa9, 0xef}, /* refa9 */
366 {0xaa, 0x92}, 379 {0xaa, 0x92},
367 {0xab, 0x04}, 380 {0xab, 0x04},
368 {0xac, 0x80}, 381 {0xac, 0x80}, /* black level control */
369 {0xad, 0x80}, 382 {0xad, 0x80},
370 {0xae, 0x80}, 383 {0xae, 0x80},
371 {0xaf, 0x80}, 384 {0xaf, 0x80},
372 {0xb2, 0xf2}, 385 {0xb2, 0xf2},
373 {0xb3, 0x20}, 386 {0xb3, 0x20},
374 {0xb4, 0x20}, 387 {0xb4, 0x20}, /* ctrlb4 */
375 {0xb5, 0x00}, 388 {0xb5, 0x00},
376 {0xb6, 0xaf}, 389 {0xb6, 0xaf},
377 {0xbb, 0xae}, 390 {0xbb, 0xae},
378 {0xbc, 0x7f}, 391 {0xbc, 0x7f}, /* ADC channel offsets */
379 {0xdb, 0x7f}, 392 {0xdb, 0x7f},
380 {0xbe, 0x7f}, 393 {0xbe, 0x7f},
381 {0xbf, 0x7f}, 394 {0xbf, 0x7f},
@@ -384,7 +397,7 @@ static const u8 sensor_init_ov965x[][2] = {
384 {0xc2, 0x01}, 397 {0xc2, 0x01},
385 {0xc3, 0x4e}, 398 {0xc3, 0x4e},
386 {0xc6, 0x85}, 399 {0xc6, 0x85},
387 {0xc7, 0x80}, 400 {0xc7, 0x80}, /* com24 */
388 {0xc9, 0xe0}, 401 {0xc9, 0xe0},
389 {0xca, 0xe8}, 402 {0xca, 0xe8},
390 {0xcb, 0xf0}, 403 {0xcb, 0xf0},
@@ -399,11 +412,11 @@ static const u8 sensor_init_ov965x[][2] = {
399 {0x58, 0x1a}, 412 {0x58, 0x1a},
400 {0xff, 0x41}, /* read 41, write ff 00 */ 413 {0xff, 0x41}, /* read 41, write ff 00 */
401 {0x41, 0x40}, /* com16 */ 414 {0x41, 0x40}, /* com16 */
402 {0xc5, 0x03}, 415 {0xc5, 0x03}, /* 60 Hz banding filter */
403 {0x6a, 0x02}, 416 {0x6a, 0x02}, /* 50 Hz banding filter */
404 417
405 {0x12, 0x62}, /* com7 - VGA + CIF */ 418 {0x12, 0x62}, /* com7 - 30fps VGA YUV */
406 {0x36, 0xfa}, /* rsvd36 */ 419 {0x36, 0xfa}, /* aref3 */
407 {0x69, 0x0a}, /* hv */ 420 {0x69, 0x0a}, /* hv */
408 {0x8c, 0x89}, /* com22 */ 421 {0x8c, 0x89}, /* com22 */
409 {0x14, 0x28}, /* com9 */ 422 {0x14, 0x28}, /* com9 */
@@ -442,8 +455,8 @@ static const u8 bridge_init_ov965x_2[][2] = {
442 {0x52, 0x3c}, 455 {0x52, 0x3c},
443 {0x53, 0x00}, 456 {0x53, 0x00},
444 {0x54, 0x00}, 457 {0x54, 0x00},
445 {0x55, 0x00}, 458 {0x55, 0x00}, /* brightness */
446 {0x57, 0x00}, 459 {0x57, 0x00}, /* contrast 2 */
447 {0x5c, 0x00}, 460 {0x5c, 0x00},
448 {0x5a, 0xa0}, 461 {0x5a, 0xa0},
449 {0x5b, 0x78}, 462 {0x5b, 0x78},
@@ -489,9 +502,66 @@ static const u8 sensor_init_ov965x_2[][2] = {
489 {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ 502 {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */
490}; 503};
491 504
505static const u8 sensor_start_ov965x[][2] = {
506 {0x12, 0x62}, /* com7 - 30fps VGA YUV */
507 {0x36, 0xfa}, /* aref3 */
508 {0x69, 0x0a}, /* hv */
509 {0x8c, 0x89}, /* com22 */
510 {0x14, 0x28}, /* com9 */
511 {0x3e, 0x0c}, /* com14 */
512 {0x41, 0x40}, /* com16 */
513 {0x72, 0x00},
514 {0x73, 0x00},
515 {0x74, 0x3a},
516 {0x75, 0x35},
517 {0x76, 0x01},
518 {0xc7, 0x80}, /* com24 */
519 {0x03, 0x12}, /* vref */
520 {0x17, 0x16}, /* hstart */
521 {0x18, 0x02}, /* hstop */
522 {0x19, 0x01}, /* vstrt */
523 {0x1a, 0x3d}, /* vstop */
524 {0x32, 0xff}, /* href */
525 {0xc0, 0xaa},
526 {}
527};
528
492static const u8 bridge_start_ov965x[][2] = { 529static const u8 bridge_start_ov965x[][2] = {
530 {0x94, 0xaa},
531 {0xf1, 0x60},
532 {0xe5, 0x04},
533 {0xc0, 0x50},
534 {0xc1, 0x3c},
535 {0x8c, 0x00},
536 {0x8d, 0x1c},
537 {0x34, 0x05},
538 {}
539};
540
541static const u8 bridge_start_ov965x_vga[][2] = {
542 {0xc2, 0x0c},
543 {0xc3, 0xf9},
544 {0xda, 0x01},
545 {0x50, 0x00},
546 {0x51, 0xa0},
547 {0x52, 0x3c},
548 {0x53, 0x00},
549 {0x54, 0x00},
550 {0x55, 0x00},
551 {0x57, 0x00},
552 {0x5c, 0x00},
553 {0x5a, 0xa0},
554 {0x5b, 0x78},
555 {0x35, 0x02},
556 {0xd9, 0x10},
557 {0x94, 0x11},
558 {}
559};
560
561static const u8 bridge_start_ov965x_cif[][2] = {
493 {0xc2, 0x4c}, 562 {0xc2, 0x4c},
494 {0xc3, 0xf9}, 563 {0xc3, 0xf9},
564 {0xda, 0x00},
495 {0x50, 0x00}, 565 {0x50, 0x00},
496 {0x51, 0xa0}, 566 {0x51, 0xa0},
497 {0x52, 0x78}, 567 {0x52, 0x78},
@@ -500,30 +570,54 @@ static const u8 bridge_start_ov965x[][2] = {
500 {0x55, 0x00}, 570 {0x55, 0x00},
501 {0x57, 0x00}, 571 {0x57, 0x00},
502 {0x5c, 0x00}, 572 {0x5c, 0x00},
503 {0x5a, 0x28}, 573 {0x5a, 0x50},
504 {0x5b, 0x1e}, 574 {0x5b, 0x3c},
505 {0x35, 0x00}, 575 {0x35, 0x02},
506 {0xd9, 0x21}, 576 {0xd9, 0x10},
507 {0x94, 0x11}, 577 {0x94, 0x11},
578 {}
508}; 579};
509 580
510static const u8 sensor_start_ov965x[][2] = { 581static const u8 sensor_start_ov965x_vga[][2] = {
511 {0x3b, 0xe4}, 582 {0x3b, 0xc4}, /* com11 - night mode 1/4 frame rate */
583 {0x1e, 0x04}, /* mvfp */
584 {0x13, 0xe0}, /* com8 */
585 {0x00, 0x00},
586 {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */
587 {0x11, 0x03}, /* clkrc */
588 {0x6b, 0x5a}, /* dblv */
589 {0x6a, 0x05}, /* 50 Hz banding filter */
590 {0xc5, 0x07}, /* 60 Hz banding filter */
591 {0xa2, 0x4b}, /* bd50 */
592 {0xa3, 0x3e}, /* bd60 */
593
594 {0x2d, 0x00}, /* advfl */
595 {}
596};
597
598static const u8 sensor_start_ov965x_cif[][2] = {
599 {0x3b, 0xe4}, /* com11 - night mode 1/4 frame rate */
512 {0x1e, 0x04}, /* mvfp */ 600 {0x1e, 0x04}, /* mvfp */
513 {0x13, 0xe0}, /* com8 */ 601 {0x13, 0xe0}, /* com8 */
514 {0x00, 0x00}, 602 {0x00, 0x00},
515 {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */ 603 {0x13, 0xe7}, /* com8 - everything (AGC, AWB and AEC) */
516 {0x11, 0x01}, /* clkrc */ 604 {0x11, 0x01}, /* clkrc */
517 {0x6b, 0x5a}, /* dblv */ 605 {0x6b, 0x5a}, /* dblv */
518 {0x6a, 0x02}, 606 {0x6a, 0x02}, /* 50 Hz banding filter */
519 {0xc5, 0x03}, 607 {0xc5, 0x03}, /* 60 Hz banding filter */
520 {0xa2, 0x96}, 608 {0xa2, 0x96}, /* bd50 */
521 {0xa3, 0x7d}, 609 {0xa3, 0x7d}, /* bd60 */
610
522 {0xff, 0x13}, /* read 13, write ff 00 */ 611 {0xff, 0x13}, /* read 13, write ff 00 */
523 {0x13, 0xe7}, 612 {0x13, 0xe7},
524 {0x3a, 0x80}, 613 {0x3a, 0x80}, /* tslb - yuyv */
614 {}
615};
616
617static const u8 sensor_start_ov965x_2[][2] = {
525 {0xff, 0x42}, /* read 42, write ff 00 */ 618 {0xff, 0x42}, /* read 42, write ff 00 */
526 {0x42, 0xc1}, 619 {0x42, 0xc1}, /* com17 - 50 Hz filter */
620 {}
527}; 621};
528 622
529 623
@@ -705,11 +799,17 @@ static int sd_config(struct gspca_dev *gspca_dev,
705 799
706 cam = &gspca_dev->cam; 800 cam = &gspca_dev->cam;
707 801
708 cam->cam_mode = vga_mode; 802 if (sd->sensor == SENSOR_OV772X) {
709 cam->nmodes = ARRAY_SIZE(vga_mode); 803 cam->cam_mode = vga_yuyv_mode;
804 cam->nmodes = ARRAY_SIZE(vga_yuyv_mode);
710 805
711 cam->bulk_size = 16384; 806 cam->bulk = 1;
712 cam->bulk_nurbs = 2; 807 cam->bulk_size = 16384;
808 cam->bulk_nurbs = 2;
809 } else { /* ov965x */
810 cam->cam_mode = vga_jpeg_mode;
811 cam->nmodes = ARRAY_SIZE(vga_jpeg_mode);
812 }
713 813
714 return 0; 814 return 0;
715} 815}
@@ -778,6 +878,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
778static int sd_start(struct gspca_dev *gspca_dev) 878static int sd_start(struct gspca_dev *gspca_dev)
779{ 879{
780 struct sd *sd = (struct sd *) gspca_dev; 880 struct sd *sd = (struct sd *) gspca_dev;
881 int mode;
781 882
782 switch (sd->sensor) { 883 switch (sd->sensor) {
783 case SENSOR_OV772X: 884 case SENSOR_OV772X:
@@ -786,13 +887,28 @@ static int sd_start(struct gspca_dev *gspca_dev)
786 break; 887 break;
787 default: 888 default:
788/* case SENSOR_OV965X: */ 889/* case SENSOR_OV965X: */
789 reg_w_array(gspca_dev, bridge_start_ov965x, 890
790 ARRAY_SIZE(bridge_start_ov965x));
791 sccb_w_array(gspca_dev, sensor_start_ov965x, 891 sccb_w_array(gspca_dev, sensor_start_ov965x,
792 ARRAY_SIZE(sensor_start_ov965x)); 892 ARRAY_SIZE(sensor_start_ov965x));
893 reg_w_array(gspca_dev, bridge_start_ov965x,
894 ARRAY_SIZE(bridge_start_ov965x));
895 mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
896 if (mode != 0) { /* 320x240 */
897 reg_w_array(gspca_dev, bridge_start_ov965x_cif,
898 ARRAY_SIZE(bridge_start_ov965x_cif));
899 sccb_w_array(gspca_dev, sensor_start_ov965x_cif,
900 ARRAY_SIZE(sensor_start_ov965x_cif));
901 } else { /* 640x480 */
902 reg_w_array(gspca_dev, bridge_start_ov965x_vga,
903 ARRAY_SIZE(bridge_start_ov965x_vga));
904 sccb_w_array(gspca_dev, sensor_start_ov965x_vga,
905 ARRAY_SIZE(sensor_start_ov965x_vga));
906 }
907 sccb_w_array(gspca_dev, sensor_start_ov965x_2,
908 ARRAY_SIZE(sensor_start_ov965x_2));
909 ov534_reg_write(gspca_dev, 0xe0, 0x00);
793 ov534_reg_write(gspca_dev, 0xe0, 0x00); 910 ov534_reg_write(gspca_dev, 0xe0, 0x00);
794 ov534_set_led(gspca_dev, 1); 911 ov534_set_led(gspca_dev, 1);
795/*fixme: other sensor start omitted*/
796 } 912 }
797 return 0; 913 return 0;
798} 914}
@@ -832,9 +948,11 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, struct gspca_frame *frame,
832 __u32 this_pts; 948 __u32 this_pts;
833 u16 this_fid; 949 u16 this_fid;
834 int remaining_len = len; 950 int remaining_len = len;
951 int payload_len;
835 952
953 payload_len = gspca_dev->cam.bulk ? 2048 : 2040;
836 do { 954 do {
837 len = min(remaining_len, 2040); /*fixme: was 2048*/ 955 len = min(remaining_len, payload_len);
838 956
839 /* Payloads are prefixed with a UVC-style header. We 957 /* Payloads are prefixed with a UVC-style header. We
840 consider a frame to start when the FID toggles, or the PTS 958 consider a frame to start when the FID toggles, or the PTS
@@ -864,30 +982,27 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, struct gspca_frame *frame,
864 982
865 /* If PTS or FID has changed, start a new frame. */ 983 /* If PTS or FID has changed, start a new frame. */
866 if (this_pts != sd->last_pts || this_fid != sd->last_fid) { 984 if (this_pts != sd->last_pts || this_fid != sd->last_fid) {
867 gspca_frame_add(gspca_dev, FIRST_PACKET, frame, 985 if (gspca_dev->last_packet_type == INTER_PACKET)
868 NULL, 0); 986 frame = gspca_frame_add(gspca_dev,
987 LAST_PACKET, frame,
988 NULL, 0);
869 sd->last_pts = this_pts; 989 sd->last_pts = this_pts;
870 sd->last_fid = this_fid; 990 sd->last_fid = this_fid;
871 } 991 gspca_frame_add(gspca_dev, FIRST_PACKET, frame,
872
873 /* Add the data from this payload */
874 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
875 data + 12, len - 12); 992 data + 12, len - 12);
876
877 /* If this packet is marked as EOF, end the frame */ 993 /* If this packet is marked as EOF, end the frame */
878 if (data[1] & UVC_STREAM_EOF) { 994 } else if (data[1] & UVC_STREAM_EOF) {
879 sd->last_pts = 0; 995 sd->last_pts = 0;
880
881 if (frame->data_end - frame->data !=
882 gspca_dev->width * gspca_dev->height * 2) {
883 PDEBUG(D_PACK, "short frame");
884 goto discard;
885 }
886
887 frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame, 996 frame = gspca_frame_add(gspca_dev, LAST_PACKET, frame,
888 NULL, 0); 997 data + 12, len - 12);
998 } else {
999
1000 /* Add the data from this payload */
1001 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
1002 data + 12, len - 12);
889 } 1003 }
890 1004
1005
891 /* Done this payload */ 1006 /* Done this payload */
892 goto scan_next; 1007 goto scan_next;
893 1008
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 153d0a91d4b5..cf3af8de6e97 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -877,6 +877,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
877 cam->cam_mode = sif_mode; 877 cam->cam_mode = sif_mode;
878 cam->nmodes = ARRAY_SIZE(sif_mode); 878 cam->nmodes = ARRAY_SIZE(sif_mode);
879 } 879 }
880 cam->npkt = 36; /* 36 packets per ISOC message */
881
880 sd->brightness = BRIGHTNESS_DEF; 882 sd->brightness = BRIGHTNESS_DEF;
881 sd->gain = GAIN_DEF; 883 sd->gain = GAIN_DEF;
882 sd->exposure = EXPOSURE_DEF; 884 sd->exposure = EXPOSURE_DEF;
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index c72e19d3ac37..dc6a6f11354a 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -62,7 +62,6 @@ struct sd {
62#define BRIDGE_SN9C105 1 62#define BRIDGE_SN9C105 1
63#define BRIDGE_SN9C110 2 63#define BRIDGE_SN9C110 2
64#define BRIDGE_SN9C120 3 64#define BRIDGE_SN9C120 3
65#define BRIDGE_SN9C325 4
66 u8 sensor; /* Type of image sensor chip */ 65 u8 sensor; /* Type of image sensor chip */
67#define SENSOR_HV7131R 0 66#define SENSOR_HV7131R 0
68#define SENSOR_MI0360 1 67#define SENSOR_MI0360 1
@@ -354,9 +353,9 @@ static const u8 sn_ov7648[0x1c] = {
354 353
355static const u8 sn_ov7660[0x1c] = { 354static const u8 sn_ov7660[0x1c] = {
356/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 355/* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */
357 0x00, 0x61, 0x40, 0x00, 0x1a, 0x20, 0x20, 0x20, 356 0x00, 0x61, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00,
358/* reg8 reg9 rega regb regc regd rege regf */ 357/* reg8 reg9 rega regb regc regd rege regf */
359 0x81, 0x21, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10, 358 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
360/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 359/* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */
361 0x03, 0x00, 0x01, 0x01, 0x08, 0x28, 0x1e, 0x20, 360 0x03, 0x00, 0x01, 0x01, 0x08, 0x28, 0x1e, 0x20,
362/* reg18 reg19 reg1a reg1b */ 361/* reg18 reg19 reg1a reg1b */
@@ -757,6 +756,7 @@ static const u8 ov7660_sensor_init[][8] = {
757 {0xc1, 0x21, 0x88, 0xaf, 0xc7, 0xdf, 0x00, 0x10}, /* gamma curve */ 756 {0xc1, 0x21, 0x88, 0xaf, 0xc7, 0xdf, 0x00, 0x10}, /* gamma curve */
758 {0xc1, 0x21, 0x8b, 0x99, 0x99, 0xcf, 0x00, 0x10}, /* reserved */ 757 {0xc1, 0x21, 0x8b, 0x99, 0x99, 0xcf, 0x00, 0x10}, /* reserved */
759 {0xb1, 0x21, 0x92, 0x00, 0x00, 0x00, 0x00, 0x10}, /* DM_LNL/H */ 758 {0xb1, 0x21, 0x92, 0x00, 0x00, 0x00, 0x00, 0x10}, /* DM_LNL/H */
759 {0xb1, 0x21, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x10},
760/****** (some exchanges in the win trace) ******/ 760/****** (some exchanges in the win trace) ******/
761 {0xa1, 0x21, 0x1e, 0x01, 0x00, 0x00, 0x00, 0x10}, /* MVFP */ 761 {0xa1, 0x21, 0x1e, 0x01, 0x00, 0x00, 0x00, 0x10}, /* MVFP */
762 /* bits[3..0]reserved */ 762 /* bits[3..0]reserved */
@@ -1065,9 +1065,9 @@ static int configure_gpio(struct gspca_dev *gspca_dev,
1065 struct sd *sd = (struct sd *) gspca_dev; 1065 struct sd *sd = (struct sd *) gspca_dev;
1066 const u8 *reg9a; 1066 const u8 *reg9a;
1067 static const u8 reg9a_def[] = 1067 static const u8 reg9a_def[] =
1068 {0x08, 0x40, 0x20, 0x10, 0x00, 0x04}; 1068 {0x00, 0x40, 0x20, 0x00, 0x00, 0x00};
1069 static const u8 reg9a_sn9c325[] = 1069 static const u8 reg9a_spec[] =
1070 {0x0a, 0x40, 0x38, 0x30, 0x00, 0x20}; 1070 {0x00, 0x40, 0x38, 0x30, 0x00, 0x20};
1071 static const u8 regd4[] = {0x60, 0x00, 0x00}; 1071 static const u8 regd4[] = {0x60, 0x00, 0x00};
1072 1072
1073 reg_w1(gspca_dev, 0xf1, 0x00); 1073 reg_w1(gspca_dev, 0xf1, 0x00);
@@ -1077,9 +1077,10 @@ static int configure_gpio(struct gspca_dev *gspca_dev,
1077 reg_w(gspca_dev, 0x01, &sn9c1xx[1], 2); 1077 reg_w(gspca_dev, 0x01, &sn9c1xx[1], 2);
1078 reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2); 1078 reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2);
1079 reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5); /* jfm len was 3 */ 1079 reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5); /* jfm len was 3 */
1080 switch (sd->bridge) { 1080 switch (sd->sensor) {
1081 case BRIDGE_SN9C325: 1081 case SENSOR_OV7660:
1082 reg9a = reg9a_sn9c325; 1082 case SENSOR_SP80708:
1083 reg9a = reg9a_spec;
1083 break; 1084 break;
1084 default: 1085 default:
1085 reg9a = reg9a_def; 1086 reg9a = reg9a_def;
@@ -1104,7 +1105,6 @@ static int configure_gpio(struct gspca_dev *gspca_dev,
1104 reg_w1(gspca_dev, 0x17, 0x64); 1105 reg_w1(gspca_dev, 0x17, 0x64);
1105 reg_w1(gspca_dev, 0x01, 0x42); 1106 reg_w1(gspca_dev, 0x01, 0x42);
1106 break; 1107 break;
1107/*jfm: from win trace */
1108 case SENSOR_OV7630: 1108 case SENSOR_OV7630:
1109 reg_w1(gspca_dev, 0x01, 0x61); 1109 reg_w1(gspca_dev, 0x01, 0x61);
1110 reg_w1(gspca_dev, 0x17, 0xe2); 1110 reg_w1(gspca_dev, 0x17, 0xe2);
@@ -1114,18 +1114,15 @@ static int configure_gpio(struct gspca_dev *gspca_dev,
1114 case SENSOR_OV7648: 1114 case SENSOR_OV7648:
1115 reg_w1(gspca_dev, 0x01, 0x63); 1115 reg_w1(gspca_dev, 0x01, 0x63);
1116 reg_w1(gspca_dev, 0x17, 0x20); 1116 reg_w1(gspca_dev, 0x17, 0x20);
1117 reg_w1(gspca_dev, 0x01, 0x62);
1117 reg_w1(gspca_dev, 0x01, 0x42); 1118 reg_w1(gspca_dev, 0x01, 0x42);
1118 break; 1119 break;
1119/*jfm: from win trace */
1120 case SENSOR_OV7660: 1120 case SENSOR_OV7660:
1121 if (sd->bridge == BRIDGE_SN9C120) { 1121 reg_w1(gspca_dev, 0x01, 0x61);
1122 reg_w1(gspca_dev, 0x01, 0x61); 1122 reg_w1(gspca_dev, 0x17, 0x20);
1123 reg_w1(gspca_dev, 0x17, 0x20); 1123 reg_w1(gspca_dev, 0x01, 0x60);
1124 reg_w1(gspca_dev, 0x01, 0x60); 1124 reg_w1(gspca_dev, 0x01, 0x40);
1125 reg_w1(gspca_dev, 0x01, 0x40); 1125 break;
1126 break;
1127 }
1128 /* fall thru */
1129 case SENSOR_SP80708: 1126 case SENSOR_SP80708:
1130 reg_w1(gspca_dev, 0x01, 0x63); 1127 reg_w1(gspca_dev, 0x01, 0x63);
1131 reg_w1(gspca_dev, 0x17, 0x20); 1128 reg_w1(gspca_dev, 0x17, 0x20);
@@ -1134,6 +1131,9 @@ static int configure_gpio(struct gspca_dev *gspca_dev,
1134 mdelay(100); 1131 mdelay(100);
1135 reg_w1(gspca_dev, 0x02, 0x62); 1132 reg_w1(gspca_dev, 0x02, 0x62);
1136 break; 1133 break;
1134/* case SENSOR_HV7131R: */
1135/* case SENSOR_MI0360: */
1136/* case SENSOR_MO4000: */
1137 default: 1137 default:
1138 reg_w1(gspca_dev, 0x01, 0x43); 1138 reg_w1(gspca_dev, 0x01, 0x43);
1139 reg_w1(gspca_dev, 0x17, 0x61); 1139 reg_w1(gspca_dev, 0x17, 0x61);
@@ -1280,6 +1280,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
1280 cam = &gspca_dev->cam; 1280 cam = &gspca_dev->cam;
1281 cam->cam_mode = vga_mode; 1281 cam->cam_mode = vga_mode;
1282 cam->nmodes = ARRAY_SIZE(vga_mode); 1282 cam->nmodes = ARRAY_SIZE(vga_mode);
1283 cam->npkt = 24; /* 24 packets per ISOC message */
1283 1284
1284 sd->bridge = id->driver_info >> 16; 1285 sd->bridge = id->driver_info >> 16;
1285 sd->sensor = id->driver_info >> 8; 1286 sd->sensor = id->driver_info >> 8;
@@ -1683,13 +1684,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
1683 case SENSOR_OV7648: 1684 case SENSOR_OV7648:
1684 reg17 = 0x20; 1685 reg17 = 0x20;
1685 break; 1686 break;
1686/*jfm: from win trace */
1687 case SENSOR_OV7660: 1687 case SENSOR_OV7660:
1688 if (sd->bridge == BRIDGE_SN9C120) { 1688 reg17 = 0xa0;
1689 reg17 = 0xa0; 1689 break;
1690 break;
1691 }
1692 /* fall thru */
1693 default: 1690 default:
1694 reg17 = 0x60; 1691 reg17 = 0x60;
1695 break; 1692 break;
@@ -1714,16 +1711,17 @@ static int sd_start(struct gspca_dev *gspca_dev)
1714 reg_w1(gspca_dev, 0x9a, 0x0a); 1711 reg_w1(gspca_dev, 0x9a, 0x0a);
1715 reg_w1(gspca_dev, 0x99, 0x60); 1712 reg_w1(gspca_dev, 0x99, 0x60);
1716 break; 1713 break;
1714 case SENSOR_OV7660:
1715 reg_w1(gspca_dev, 0x9a, 0x05);
1716 if (sd->bridge == BRIDGE_SN9C105)
1717 reg_w1(gspca_dev, 0x99, 0xff);
1718 else
1719 reg_w1(gspca_dev, 0x99, 0x5b);
1720 break;
1717 case SENSOR_SP80708: 1721 case SENSOR_SP80708:
1718 reg_w1(gspca_dev, 0x9a, 0x05); 1722 reg_w1(gspca_dev, 0x9a, 0x05);
1719 reg_w1(gspca_dev, 0x99, 0x59); 1723 reg_w1(gspca_dev, 0x99, 0x59);
1720 break; 1724 break;
1721 case SENSOR_OV7660:
1722 if (sd->bridge == BRIDGE_SN9C120) {
1723 reg_w1(gspca_dev, 0x9a, 0x05);
1724 break;
1725 }
1726 /* fall thru */
1727 default: 1725 default:
1728 reg_w1(gspca_dev, 0x9a, 0x08); 1726 reg_w1(gspca_dev, 0x9a, 0x08);
1729 reg_w1(gspca_dev, 0x99, 0x59); 1727 reg_w1(gspca_dev, 0x99, 0x59);
@@ -2193,6 +2191,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
2193 {USB_DEVICE(0x0471, 0x0328), BSI(SN9C105, MI0360, 0x5d)}, 2191 {USB_DEVICE(0x0471, 0x0328), BSI(SN9C105, MI0360, 0x5d)},
2194 {USB_DEVICE(0x0471, 0x0330), BSI(SN9C105, MI0360, 0x5d)}, 2192 {USB_DEVICE(0x0471, 0x0330), BSI(SN9C105, MI0360, 0x5d)},
2195 {USB_DEVICE(0x06f8, 0x3004), BSI(SN9C105, OV7660, 0x21)}, 2193 {USB_DEVICE(0x06f8, 0x3004), BSI(SN9C105, OV7660, 0x21)},
2194 {USB_DEVICE(0x06f8, 0x3008), BSI(SN9C105, OV7660, 0x21)},
2196 {USB_DEVICE(0x0c45, 0x6040), BSI(SN9C102P, HV7131R, 0x11)}, 2195 {USB_DEVICE(0x0c45, 0x6040), BSI(SN9C102P, HV7131R, 0x11)},
2197/* bw600.inf: 2196/* bw600.inf:
2198 {USB_DEVICE(0x0c45, 0x6040), BSI(SN9C102P, MI0360, 0x5d)}, */ 2197 {USB_DEVICE(0x0c45, 0x6040), BSI(SN9C102P, MI0360, 0x5d)}, */
@@ -2211,7 +2210,12 @@ static const __devinitdata struct usb_device_id device_table[] = {
2211#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE 2210#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
2212 {USB_DEVICE(0x0c45, 0x60fe), BSI(SN9C105, OV7630, 0x21)}, 2211 {USB_DEVICE(0x0c45, 0x60fe), BSI(SN9C105, OV7630, 0x21)},
2213#endif 2212#endif
2213 {USB_DEVICE(0x0c45, 0x6100), BSI(SN9C120, MI0360, 0x5d)}, /*sn9c128*/
2214/* {USB_DEVICE(0x0c45, 0x6108), BSI(SN9C120, OM6801, 0x??)}, */ 2214/* {USB_DEVICE(0x0c45, 0x6108), BSI(SN9C120, OM6801, 0x??)}, */
2215 {USB_DEVICE(0x0c45, 0x610a), BSI(SN9C120, OV7648, 0x21)}, /*sn9c128*/
2216 {USB_DEVICE(0x0c45, 0x610b), BSI(SN9C120, OV7660, 0x21)}, /*sn9c128*/
2217 {USB_DEVICE(0x0c45, 0x610c), BSI(SN9C120, HV7131R, 0x11)}, /*sn9c128*/
2218 {USB_DEVICE(0x0c45, 0x610e), BSI(SN9C120, OV7630, 0x21)}, /*sn9c128*/
2215/* {USB_DEVICE(0x0c45, 0x6122), BSI(SN9C110, ICM105C, 0x??)}, */ 2219/* {USB_DEVICE(0x0c45, 0x6122), BSI(SN9C110, ICM105C, 0x??)}, */
2216/* {USB_DEVICE(0x0c45, 0x6123), BSI(SN9C110, SanyoCCD, 0x??)}, */ 2220/* {USB_DEVICE(0x0c45, 0x6123), BSI(SN9C110, SanyoCCD, 0x??)}, */
2217 {USB_DEVICE(0x0c45, 0x6128), BSI(SN9C110, OM6802, 0x21)}, /*sn9c325?*/ 2221 {USB_DEVICE(0x0c45, 0x6128), BSI(SN9C110, OM6802, 0x21)}, /*sn9c325?*/
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index 6f38fa6d86b6..8806b2ff82be 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -32,9 +32,6 @@ MODULE_LICENSE("GPL");
32struct sd { 32struct sd {
33 struct gspca_dev gspca_dev; /* !! must be the first item */ 33 struct gspca_dev gspca_dev; /* !! must be the first item */
34 34
35 __u8 packet[ISO_MAX_SIZE + 128];
36 /* !! no more than 128 ff in an ISO packet */
37
38 unsigned char brightness; 35 unsigned char brightness;
39 unsigned char contrast; 36 unsigned char contrast;
40 unsigned char colors; 37 unsigned char colors;
@@ -906,7 +903,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
906{ 903{
907 struct sd *sd = (struct sd *) gspca_dev; 904 struct sd *sd = (struct sd *) gspca_dev;
908 int i; 905 int i;
909 __u8 *s, *d;
910 static __u8 ffd9[] = {0xff, 0xd9}; 906 static __u8 ffd9[] = {0xff, 0xd9};
911 907
912/* frames are jpeg 4.1.1 without 0xff escape */ 908/* frames are jpeg 4.1.1 without 0xff escape */
@@ -930,22 +926,19 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
930 } 926 }
931 927
932 /* add 0x00 after 0xff */ 928 /* add 0x00 after 0xff */
933 for (i = len; --i >= 0; ) 929 i = 0;
934 if (data[i] == 0xff) 930 do {
935 break; 931 if (data[i] == 0xff) {
936 if (i < 0) { /* no 0xff */ 932 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
937 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len); 933 data, i + 1);
938 return; 934 len -= i;
939 } 935 data += i;
940 s = data; 936 *data = 0x00;
941 d = sd->packet; 937 i = 0;
942 for (i = 0; i < len; i++) { 938 }
943 *d++ = *s++; 939 i++;
944 if (s[-1] == 0xff) 940 } while (i < len);
945 *d++ = 0x00; 941 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len);
946 }
947 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
948 sd->packet, d - sd->packet);
949} 942}
950 943
951static void setbrightness(struct gspca_dev *gspca_dev) 944static void setbrightness(struct gspca_dev *gspca_dev)
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index 2acec58b1b97..ea8c9fe2e961 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -637,19 +637,19 @@ static int sd_config(struct gspca_dev *gspca_dev,
637 cam->nmodes = ARRAY_SIZE(vga_mode) - 1; 637 cam->nmodes = ARRAY_SIZE(vga_mode) - 1;
638 sd->brightness = BRIGHTNESS_DEF; 638 sd->brightness = BRIGHTNESS_DEF;
639 639
640 if (sd->subtype == Nxultra) {
641 if (write_vector(gspca_dev, spca505b_init_data))
642 return -EIO;
643 } else {
644 if (write_vector(gspca_dev, spca505_init_data))
645 return -EIO;
646 }
647 return 0; 640 return 0;
648} 641}
649 642
650/* this function is called at probe and resume time */ 643/* this function is called at probe and resume time */
651static int sd_init(struct gspca_dev *gspca_dev) 644static int sd_init(struct gspca_dev *gspca_dev)
652{ 645{
646 struct sd *sd = (struct sd *) gspca_dev;
647
648 if (write_vector(gspca_dev,
649 sd->subtype == Nxultra
650 ? spca505b_init_data
651 : spca505_init_data))
652 return -EIO;
653 return 0; 653 return 0;
654} 654}
655 655
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index adacf8437661..2ed2669bac3e 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * SPCA508 chip based cameras subdriver 2 * SPCA508 chip based cameras subdriver
3 * 3 *
4 * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> 4 * Copyright (C) 2009 Jean-Francois Moine <http://moinejf.free.fr>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -30,9 +30,9 @@ MODULE_LICENSE("GPL");
30struct sd { 30struct sd {
31 struct gspca_dev gspca_dev; /* !! must be the first item */ 31 struct gspca_dev gspca_dev; /* !! must be the first item */
32 32
33 unsigned char brightness; 33 u8 brightness;
34 34
35 char subtype; 35 u8 subtype;
36#define CreativeVista 0 36#define CreativeVista 0
37#define HamaUSBSightcam 1 37#define HamaUSBSightcam 1
38#define HamaUSBSightcam2 2 38#define HamaUSBSightcam2 2
@@ -86,58 +86,34 @@ static const struct v4l2_pix_format sif_mode[] = {
86}; 86};
87 87
88/* Frame packet header offsets for the spca508 */ 88/* Frame packet header offsets for the spca508 */
89#define SPCA508_OFFSET_TYPE 1
90#define SPCA508_OFFSET_COMPRESS 2
91#define SPCA508_OFFSET_FRAMSEQ 8
92#define SPCA508_OFFSET_WIN1LUM 11
93#define SPCA508_OFFSET_DATA 37 89#define SPCA508_OFFSET_DATA 37
94 90
95#define SPCA508_SNAPBIT 0x20
96#define SPCA508_SNAPCTRL 0x40
97/*************** I2c ****************/
98#define SPCA508_INDEX_I2C_BASE 0x8800
99
100/* 91/*
101 * Initialization data: this is the first set-up data written to the 92 * Initialization data: this is the first set-up data written to the
102 * device (before the open data). 93 * device (before the open data).
103 */ 94 */
104static const u16 spca508_init_data[][2] = 95static const u16 spca508_init_data[][2] =
105{ 96{
106 /* line URB value, index */ 97 {0x0000, 0x870b},
107 /* 44274 1804 */ {0x0000, 0x870b}, 98
108 99 {0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */
109 /* 44299 1805 */ {0x0020, 0x8112}, 100 {0x0003, 0x8111}, /* Reset compression & memory */
110 /* Video drop enable, ISO streaming disable */ 101 {0x0000, 0x8110}, /* Disable all outputs */
111 /* 44324 1806 */ {0x0003, 0x8111}, 102 /* READ {0x0000, 0x8114} -> 0000: 00 */
112 /* Reset compression & memory */ 103 {0x0000, 0x8114}, /* SW GPIO data */
113 /* 44349 1807 */ {0x0000, 0x8110}, 104 {0x0008, 0x8110}, /* Enable charge pump output */
114 /* Disable all outputs */ 105 {0x0002, 0x8116}, /* 200 kHz pump clock */
115 /* 44372 1808 */ /* READ {0x0000, 0x8114} -> 0000: 00 */ 106 /* UNKNOWN DIRECTION (URB_FUNCTION_SELECT_INTERFACE:) */
116 /* 44398 1809 */ {0x0000, 0x8114}, 107 {0x0003, 0x8111}, /* Reset compression & memory */
117 /* SW GPIO data */ 108 {0x0000, 0x8111}, /* Normal mode (not reset) */
118 /* 44423 1810 */ {0x0008, 0x8110}, 109 {0x0098, 0x8110},
119 /* Enable charge pump output */ 110 /* Enable charge pump output, sync.serial,external 2x clock */
120 /* 44527 1811 */ {0x0002, 0x8116}, 111 {0x000d, 0x8114}, /* SW GPIO data */
121 /* 200 kHz pump clock */ 112 {0x0002, 0x8116}, /* 200 kHz pump clock */
122 /* 44555 1812 */ 113 {0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */
123 /* UNKNOWN DIRECTION (URB_FUNCTION_SELECT_INTERFACE:) */
124 /* 44590 1813 */ {0x0003, 0x8111},
125 /* Reset compression & memory */
126 /* 44615 1814 */ {0x0000, 0x8111},
127 /* Normal mode (not reset) */
128 /* 44640 1815 */ {0x0098, 0x8110},
129 /* Enable charge pump output, sync.serial,external 2x clock */
130 /* 44665 1816 */ {0x000d, 0x8114},
131 /* SW GPIO data */
132 /* 44690 1817 */ {0x0002, 0x8116},
133 /* 200 kHz pump clock */
134 /* 44715 1818 */ {0x0020, 0x8112},
135 /* Video drop enable, ISO streaming disable */
136/* --------------------------------------- */ 114/* --------------------------------------- */
137 /* 44740 1819 */ {0x000f, 0x8402}, 115 {0x000f, 0x8402}, /* memory bank */
138 /* memory bank */ 116 {0x0000, 0x8403}, /* ... address */
139 /* 44765 1820 */ {0x0000, 0x8403},
140 /* ... address */
141/* --------------------------------------- */ 117/* --------------------------------------- */
142/* 0x88__ is Synchronous Serial Interface. */ 118/* 0x88__ is Synchronous Serial Interface. */
143/* TBD: This table could be expressed more compactly */ 119/* TBD: This table could be expressed more compactly */
@@ -145,446 +121,384 @@ static const u16 spca508_init_data[][2] =
145/* TBD: Should see if the values in spca50x_i2c_data */ 121/* TBD: Should see if the values in spca50x_i2c_data */
146/* would work with the VQ110 instead of the values */ 122/* would work with the VQ110 instead of the values */
147/* below. */ 123/* below. */
148 /* 44790 1821 */ {0x00c0, 0x8804}, 124 {0x00c0, 0x8804}, /* SSI slave addr */
149 /* SSI slave addr */ 125 {0x0008, 0x8802}, /* 375 Khz SSI clock */
150 /* 44815 1822 */ {0x0008, 0x8802}, 126 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
151 /* 375 Khz SSI clock */ 127 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
152 /* 44838 1823 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 128 {0x0008, 0x8802}, /* 375 Khz SSI clock */
153 /* 44862 1824 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 129 {0x0012, 0x8801}, /* SSI reg addr */
154 /* 44888 1825 */ {0x0008, 0x8802}, 130 {0x0080, 0x8800}, /* SSI data to write */
155 /* 375 Khz SSI clock */ 131 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
156 /* 44913 1826 */ {0x0012, 0x8801}, 132 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
157 /* SSI reg addr */ 133 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
158 /* 44938 1827 */ {0x0080, 0x8800}, 134 {0x0008, 0x8802}, /* 375 Khz SSI clock */
159 /* SSI data to write */ 135 {0x0012, 0x8801}, /* SSI reg addr */
160 /* 44961 1828 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 136 {0x0000, 0x8800}, /* SSI data to write */
161 /* 44985 1829 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 137 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
162 /* 45009 1830 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 138 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
163 /* 45035 1831 */ {0x0008, 0x8802}, 139 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
164 /* 375 Khz SSI clock */ 140 {0x0008, 0x8802}, /* 375 Khz SSI clock */
165 /* 45060 1832 */ {0x0012, 0x8801}, 141 {0x0011, 0x8801}, /* SSI reg addr */
166 /* SSI reg addr */ 142 {0x0040, 0x8800}, /* SSI data to write */
167 /* 45085 1833 */ {0x0000, 0x8800}, 143 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
168 /* SSI data to write */ 144 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
169 /* 45108 1834 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 145 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
170 /* 45132 1835 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 146 {0x0008, 0x8802},
171 /* 45156 1836 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 147 {0x0013, 0x8801},
172 /* 45182 1837 */ {0x0008, 0x8802}, 148 {0x0000, 0x8800},
173 /* 375 Khz SSI clock */ 149 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
174 /* 45207 1838 */ {0x0011, 0x8801}, 150 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
175 /* SSI reg addr */ 151 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
176 /* 45232 1839 */ {0x0040, 0x8800}, 152 {0x0008, 0x8802},
177 /* SSI data to write */ 153 {0x0014, 0x8801},
178 /* 45255 1840 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 154 {0x0000, 0x8800},
179 /* 45279 1841 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 155 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
180 /* 45303 1842 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 156 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
181 /* 45329 1843 */ {0x0008, 0x8802}, 157 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
182 /* 45354 1844 */ {0x0013, 0x8801}, 158 {0x0008, 0x8802},
183 /* 45379 1845 */ {0x0000, 0x8800}, 159 {0x0015, 0x8801},
184 /* 45402 1846 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 160 {0x0001, 0x8800},
185 /* 45426 1847 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 161 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
186 /* 45450 1848 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 162 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
187 /* 45476 1849 */ {0x0008, 0x8802}, 163 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
188 /* 45501 1850 */ {0x0014, 0x8801}, 164 {0x0008, 0x8802},
189 /* 45526 1851 */ {0x0000, 0x8800}, 165 {0x0016, 0x8801},
190 /* 45549 1852 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 166 {0x0003, 0x8800},
191 /* 45573 1853 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 167 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
192 /* 45597 1854 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 168 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
193 /* 45623 1855 */ {0x0008, 0x8802}, 169 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
194 /* 45648 1856 */ {0x0015, 0x8801}, 170 {0x0008, 0x8802},
195 /* 45673 1857 */ {0x0001, 0x8800}, 171 {0x0017, 0x8801},
196 /* 45696 1858 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 172 {0x0036, 0x8800},
197 /* 45720 1859 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 173 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
198 /* 45744 1860 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 174 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
199 /* 45770 1861 */ {0x0008, 0x8802}, 175 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
200 /* 45795 1862 */ {0x0016, 0x8801}, 176 {0x0008, 0x8802},
201 /* 45820 1863 */ {0x0003, 0x8800}, 177 {0x0018, 0x8801},
202 /* 45843 1864 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 178 {0x00ec, 0x8800},
203 /* 45867 1865 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 179 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
204 /* 45891 1866 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 180 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
205 /* 45917 1867 */ {0x0008, 0x8802}, 181 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
206 /* 45942 1868 */ {0x0017, 0x8801}, 182 {0x0008, 0x8802},
207 /* 45967 1869 */ {0x0036, 0x8800}, 183 {0x001a, 0x8801},
208 /* 45990 1870 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 184 {0x0094, 0x8800},
209 /* 46014 1871 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 185 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
210 /* 46038 1872 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 186 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
211 /* 46064 1873 */ {0x0008, 0x8802}, 187 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
212 /* 46089 1874 */ {0x0018, 0x8801}, 188 {0x0008, 0x8802},
213 /* 46114 1875 */ {0x00ec, 0x8800}, 189 {0x001b, 0x8801},
214 /* 46137 1876 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 190 {0x0000, 0x8800},
215 /* 46161 1877 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 191 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
216 /* 46185 1878 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 192 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
217 /* 46211 1879 */ {0x0008, 0x8802}, 193 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
218 /* 46236 1880 */ {0x001a, 0x8801}, 194 {0x0008, 0x8802},
219 /* 46261 1881 */ {0x0094, 0x8800}, 195 {0x0027, 0x8801},
220 /* 46284 1882 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 196 {0x00a2, 0x8800},
221 /* 46308 1883 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 197 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
222 /* 46332 1884 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 198 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
223 /* 46358 1885 */ {0x0008, 0x8802}, 199 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
224 /* 46383 1886 */ {0x001b, 0x8801}, 200 {0x0008, 0x8802},
225 /* 46408 1887 */ {0x0000, 0x8800}, 201 {0x0028, 0x8801},
226 /* 46431 1888 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 202 {0x0040, 0x8800},
227 /* 46455 1889 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 203 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
228 /* 46479 1890 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 204 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
229 /* 46505 1891 */ {0x0008, 0x8802}, 205 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
230 /* 46530 1892 */ {0x0027, 0x8801}, 206 {0x0008, 0x8802},
231 /* 46555 1893 */ {0x00a2, 0x8800}, 207 {0x002a, 0x8801},
232 /* 46578 1894 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 208 {0x0084, 0x8800},
233 /* 46602 1895 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 209 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
234 /* 46626 1896 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 210 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
235 /* 46652 1897 */ {0x0008, 0x8802}, 211 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
236 /* 46677 1898 */ {0x0028, 0x8801}, 212 {0x0008, 0x8802},
237 /* 46702 1899 */ {0x0040, 0x8800}, 213 {0x002b, 0x8801},
238 /* 46725 1900 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 214 {0x00a8, 0x8800},
239 /* 46749 1901 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 215 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
240 /* 46773 1902 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 216 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
241 /* 46799 1903 */ {0x0008, 0x8802}, 217 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
242 /* 46824 1904 */ {0x002a, 0x8801}, 218 {0x0008, 0x8802},
243 /* 46849 1905 */ {0x0084, 0x8800}, 219 {0x002c, 0x8801},
244 /* 46872 1906 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 220 {0x00fe, 0x8800},
245 /* 46896 1907 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 221 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
246 /* 46920 1908 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 222 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
247 /* 46946 1909 */ {0x0008, 0x8802}, 223 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
248 /* 46971 1910 */ {0x002b, 0x8801}, 224 {0x0008, 0x8802},
249 /* 46996 1911 */ {0x00a8, 0x8800}, 225 {0x002d, 0x8801},
250 /* 47019 1912 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 226 {0x0003, 0x8800},
251 /* 47043 1913 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 227 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
252 /* 47067 1914 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 228 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
253 /* 47093 1915 */ {0x0008, 0x8802}, 229 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
254 /* 47118 1916 */ {0x002c, 0x8801}, 230 {0x0008, 0x8802},
255 /* 47143 1917 */ {0x00fe, 0x8800}, 231 {0x0038, 0x8801},
256 /* 47166 1918 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 232 {0x0083, 0x8800},
257 /* 47190 1919 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 233 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
258 /* 47214 1920 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 234 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
259 /* 47240 1921 */ {0x0008, 0x8802}, 235 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
260 /* 47265 1922 */ {0x002d, 0x8801}, 236 {0x0008, 0x8802},
261 /* 47290 1923 */ {0x0003, 0x8800}, 237 {0x0033, 0x8801},
262 /* 47313 1924 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 238 {0x0081, 0x8800},
263 /* 47337 1925 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 239 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
264 /* 47361 1926 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 240 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
265 /* 47387 1927 */ {0x0008, 0x8802}, 241 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
266 /* 47412 1928 */ {0x0038, 0x8801}, 242 {0x0008, 0x8802},
267 /* 47437 1929 */ {0x0083, 0x8800}, 243 {0x0034, 0x8801},
268 /* 47460 1930 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 244 {0x004a, 0x8800},
269 /* 47484 1931 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 245 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
270 /* 47508 1932 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 246 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
271 /* 47534 1933 */ {0x0008, 0x8802}, 247 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
272 /* 47559 1934 */ {0x0033, 0x8801}, 248 {0x0008, 0x8802},
273 /* 47584 1935 */ {0x0081, 0x8800}, 249 {0x0039, 0x8801},
274 /* 47607 1936 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 250 {0x0000, 0x8800},
275 /* 47631 1937 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 251 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
276 /* 47655 1938 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 252 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
277 /* 47681 1939 */ {0x0008, 0x8802}, 253 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
278 /* 47706 1940 */ {0x0034, 0x8801}, 254 {0x0008, 0x8802},
279 /* 47731 1941 */ {0x004a, 0x8800}, 255 {0x0010, 0x8801},
280 /* 47754 1942 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 256 {0x00a8, 0x8800},
281 /* 47778 1943 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 257 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
282 /* 47802 1944 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 258 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
283 /* 47828 1945 */ {0x0008, 0x8802}, 259 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
284 /* 47853 1946 */ {0x0039, 0x8801}, 260 {0x0008, 0x8802},
285 /* 47878 1947 */ {0x0000, 0x8800}, 261 {0x0006, 0x8801},
286 /* 47901 1948 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 262 {0x0058, 0x8800},
287 /* 47925 1949 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 263 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
288 /* 47949 1950 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 264 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
289 /* 47975 1951 */ {0x0008, 0x8802}, 265 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
290 /* 48000 1952 */ {0x0010, 0x8801}, 266 {0x0008, 0x8802},
291 /* 48025 1953 */ {0x00a8, 0x8800}, 267 {0x0000, 0x8801},
292 /* 48048 1954 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 268 {0x0004, 0x8800},
293 /* 48072 1955 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 269 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
294 /* 48096 1956 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 270 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
295 /* 48122 1957 */ {0x0008, 0x8802}, 271 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
296 /* 48147 1958 */ {0x0006, 0x8801}, 272 {0x0008, 0x8802},
297 /* 48172 1959 */ {0x0058, 0x8800}, 273 {0x0040, 0x8801},
298 /* 48195 1960 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 274 {0x0080, 0x8800},
299 /* 48219 1961 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 275 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
300 /* 48243 1962 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 276 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
301 /* 48269 1963 */ {0x0008, 0x8802}, 277 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
302 /* 48294 1964 */ {0x0000, 0x8801}, 278 {0x0008, 0x8802},
303 /* 48319 1965 */ {0x0004, 0x8800}, 279 {0x0041, 0x8801},
304 /* 48342 1966 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 280 {0x000c, 0x8800},
305 /* 48366 1967 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 281 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
306 /* 48390 1968 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 282 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
307 /* 48416 1969 */ {0x0008, 0x8802}, 283 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
308 /* 48441 1970 */ {0x0040, 0x8801}, 284 {0x0008, 0x8802},
309 /* 48466 1971 */ {0x0080, 0x8800}, 285 {0x0042, 0x8801},
310 /* 48489 1972 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 286 {0x000c, 0x8800},
311 /* 48513 1973 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 287 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
312 /* 48537 1974 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 288 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
313 /* 48563 1975 */ {0x0008, 0x8802}, 289 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
314 /* 48588 1976 */ {0x0041, 0x8801}, 290 {0x0008, 0x8802},
315 /* 48613 1977 */ {0x000c, 0x8800}, 291 {0x0043, 0x8801},
316 /* 48636 1978 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 292 {0x0028, 0x8800},
317 /* 48660 1979 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 293 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
318 /* 48684 1980 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 294 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
319 /* 48710 1981 */ {0x0008, 0x8802}, 295 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
320 /* 48735 1982 */ {0x0042, 0x8801}, 296 {0x0008, 0x8802},
321 /* 48760 1983 */ {0x000c, 0x8800}, 297 {0x0044, 0x8801},
322 /* 48783 1984 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 298 {0x0080, 0x8800},
323 /* 48807 1985 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 299 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
324 /* 48831 1986 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 300 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
325 /* 48857 1987 */ {0x0008, 0x8802}, 301 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
326 /* 48882 1988 */ {0x0043, 0x8801}, 302 {0x0008, 0x8802},
327 /* 48907 1989 */ {0x0028, 0x8800}, 303 {0x0045, 0x8801},
328 /* 48930 1990 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 304 {0x0020, 0x8800},
329 /* 48954 1991 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 305 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
330 /* 48978 1992 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 306 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
331 /* 49004 1993 */ {0x0008, 0x8802}, 307 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
332 /* 49029 1994 */ {0x0044, 0x8801}, 308 {0x0008, 0x8802},
333 /* 49054 1995 */ {0x0080, 0x8800}, 309 {0x0046, 0x8801},
334 /* 49077 1996 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 310 {0x0020, 0x8800},
335 /* 49101 1997 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 311 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
336 /* 49125 1998 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 312 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
337 /* 49151 1999 */ {0x0008, 0x8802}, 313 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
338 /* 49176 2000 */ {0x0045, 0x8801}, 314 {0x0008, 0x8802},
339 /* 49201 2001 */ {0x0020, 0x8800}, 315 {0x0047, 0x8801},
340 /* 49224 2002 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 316 {0x0080, 0x8800},
341 /* 49248 2003 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 317 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
342 /* 49272 2004 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 318 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
343 /* 49298 2005 */ {0x0008, 0x8802}, 319 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
344 /* 49323 2006 */ {0x0046, 0x8801}, 320 {0x0008, 0x8802},
345 /* 49348 2007 */ {0x0020, 0x8800}, 321 {0x0048, 0x8801},
346 /* 49371 2008 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 322 {0x004c, 0x8800},
347 /* 49395 2009 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 323 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
348 /* 49419 2010 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 324 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
349 /* 49445 2011 */ {0x0008, 0x8802}, 325 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
350 /* 49470 2012 */ {0x0047, 0x8801}, 326 {0x0008, 0x8802},
351 /* 49495 2013 */ {0x0080, 0x8800}, 327 {0x0049, 0x8801},
352 /* 49518 2014 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 328 {0x0084, 0x8800},
353 /* 49542 2015 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 329 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
354 /* 49566 2016 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 330 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
355 /* 49592 2017 */ {0x0008, 0x8802}, 331 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
356 /* 49617 2018 */ {0x0048, 0x8801}, 332 {0x0008, 0x8802},
357 /* 49642 2019 */ {0x004c, 0x8800}, 333 {0x004a, 0x8801},
358 /* 49665 2020 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 334 {0x0084, 0x8800},
359 /* 49689 2021 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 335 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
360 /* 49713 2022 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 336 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
361 /* 49739 2023 */ {0x0008, 0x8802}, 337 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
362 /* 49764 2024 */ {0x0049, 0x8801}, 338 {0x0008, 0x8802},
363 /* 49789 2025 */ {0x0084, 0x8800}, 339 {0x004b, 0x8801},
364 /* 49812 2026 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 340 {0x0084, 0x8800},
365 /* 49836 2027 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 341 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
366 /* 49860 2028 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
367 /* 49886 2029 */ {0x0008, 0x8802},
368 /* 49911 2030 */ {0x004a, 0x8801},
369 /* 49936 2031 */ {0x0084, 0x8800},
370 /* 49959 2032 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
371 /* 49983 2033 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
372 /* 50007 2034 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */
373 /* 50033 2035 */ {0x0008, 0x8802},
374 /* 50058 2036 */ {0x004b, 0x8801},
375 /* 50083 2037 */ {0x0084, 0x8800},
376 /* 50106 2038 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
377 /* --------------------------------------- */ 342 /* --------------------------------------- */
378 /* 50132 2039 */ {0x0012, 0x8700}, 343 {0x0012, 0x8700}, /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */
379 /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */ 344 {0x0000, 0x8701}, /* CKx1 clock delay adj */
380 /* 50157 2040 */ {0x0000, 0x8701}, 345 {0x0000, 0x8701}, /* CKx1 clock delay adj */
381 /* CKx1 clock delay adj */ 346 {0x0001, 0x870c}, /* CKOx2 output */
382 /* 50182 2041 */ {0x0000, 0x8701},
383 /* CKx1 clock delay adj */
384 /* 50207 2042 */ {0x0001, 0x870c},
385 /* CKOx2 output */
386 /* --------------------------------------- */ 347 /* --------------------------------------- */
387 /* 50232 2043 */ {0x0080, 0x8600}, 348 {0x0080, 0x8600}, /* Line memory read counter (L) */
388 /* Line memory read counter (L) */ 349 {0x0001, 0x8606}, /* reserved */
389 /* 50257 2044 */ {0x0001, 0x8606}, 350 {0x0064, 0x8607}, /* Line memory read counter (H) 0x6480=25,728 */
390 /* reserved */ 351 {0x002a, 0x8601}, /* CDSP sharp interpolation mode,
391 /* 50282 2045 */ {0x0064, 0x8607},
392 /* Line memory read counter (H) 0x6480=25,728 */
393 /* 50307 2046 */ {0x002a, 0x8601},
394 /* CDSP sharp interpolation mode,
395 * line sel for color sep, edge enhance enab */ 352 * line sel for color sep, edge enhance enab */
396 /* 50332 2047 */ {0x0000, 0x8602}, 353 {0x0000, 0x8602}, /* optical black level for user settng = 0 */
397 /* optical black level for user settng = 0 */ 354 {0x0080, 0x8600}, /* Line memory read counter (L) */
398 /* 50357 2048 */ {0x0080, 0x8600}, 355 {0x000a, 0x8603}, /* optical black level calc mode:
399 /* Line memory read counter (L) */ 356 * auto; optical black offset = 10 */
400 /* 50382 2049 */ {0x000a, 0x8603}, 357 {0x00df, 0x865b}, /* Horiz offset for valid pixels (L)=0xdf */
401 /* optical black level calc mode: auto; optical black offset = 10 */ 358 {0x0012, 0x865c}, /* Vert offset for valid lines (L)=0x12 */
402 /* 50407 2050 */ {0x00df, 0x865b},
403 /* Horiz offset for valid pixels (L)=0xdf */
404 /* 50432 2051 */ {0x0012, 0x865c},
405 /* Vert offset for valid lines (L)=0x12 */
406 359
407/* The following two lines seem to be the "wrong" resolution. */ 360/* The following two lines seem to be the "wrong" resolution. */
408/* But perhaps these indicate the actual size of the sensor */ 361/* But perhaps these indicate the actual size of the sensor */
409/* rather than the size of the current video mode. */ 362/* rather than the size of the current video mode. */
410 /* 50457 2052 */ {0x0058, 0x865d}, 363 {0x0058, 0x865d}, /* Horiz valid pixels (*4) (L) = 352 */
411 /* Horiz valid pixels (*4) (L) = 352 */ 364 {0x0048, 0x865e}, /* Vert valid lines (*4) (L) = 288 */
412 /* 50482 2053 */ {0x0048, 0x865e}, 365
413 /* Vert valid lines (*4) (L) = 288 */ 366 {0x0015, 0x8608}, /* A11 Coef ... */
414 367 {0x0030, 0x8609},
415 /* 50507 2054 */ {0x0015, 0x8608}, 368 {0x00fb, 0x860a},
416 /* A11 Coef ... */ 369 {0x003e, 0x860b},
417 /* 50532 2055 */ {0x0030, 0x8609}, 370 {0x00ce, 0x860c},
418 /* 50557 2056 */ {0x00fb, 0x860a}, 371 {0x00f4, 0x860d},
419 /* 50582 2057 */ {0x003e, 0x860b}, 372 {0x00eb, 0x860e},
420 /* 50607 2058 */ {0x00ce, 0x860c}, 373 {0x00dc, 0x860f},
421 /* 50632 2059 */ {0x00f4, 0x860d}, 374 {0x0039, 0x8610},
422 /* 50657 2060 */ {0x00eb, 0x860e}, 375 {0x0001, 0x8611}, /* R offset for white balance ... */
423 /* 50682 2061 */ {0x00dc, 0x860f}, 376 {0x0000, 0x8612},
424 /* 50707 2062 */ {0x0039, 0x8610}, 377 {0x0001, 0x8613},
425 /* 50732 2063 */ {0x0001, 0x8611}, 378 {0x0000, 0x8614},
426 /* R offset for white balance ... */ 379 {0x005b, 0x8651}, /* R gain for white balance ... */
427 /* 50757 2064 */ {0x0000, 0x8612}, 380 {0x0040, 0x8652},
428 /* 50782 2065 */ {0x0001, 0x8613}, 381 {0x0060, 0x8653},
429 /* 50807 2066 */ {0x0000, 0x8614}, 382 {0x0040, 0x8654},
430 /* 50832 2067 */ {0x005b, 0x8651}, 383 {0x0000, 0x8655},
431 /* R gain for white balance ... */ 384 {0x0001, 0x863f}, /* Fixed gamma correction enable, USB control,
432 /* 50857 2068 */ {0x0040, 0x8652}, 385 * lum filter disable, lum noise clip disable */
433 /* 50882 2069 */ {0x0060, 0x8653}, 386 {0x00a1, 0x8656}, /* Window1 size 256x256, Windows2 size 64x64,
434 /* 50907 2070 */ {0x0040, 0x8654}, 387 * gamma look-up disable,
435 /* 50932 2071 */ {0x0000, 0x8655}, 388 * new edge enhancement enable */
436 /* 50957 2072 */ {0x0001, 0x863f}, 389 {0x0018, 0x8657}, /* Edge gain high thresh */
437 /* Fixed gamma correction enable, USB control, 390 {0x0020, 0x8658}, /* Edge gain low thresh */
438 * lum filter disable, lum noise clip disable */ 391 {0x000a, 0x8659}, /* Edge bandwidth high threshold */
439 /* 50982 2073 */ {0x00a1, 0x8656}, 392 {0x0005, 0x865a}, /* Edge bandwidth low threshold */
440 /* Window1 size 256x256, Windows2 size 64x64,
441 * gamma look-up disable, new edge enhancement enable */
442 /* 51007 2074 */ {0x0018, 0x8657},
443 /* Edge gain high thresh */
444 /* 51032 2075 */ {0x0020, 0x8658},
445 /* Edge gain low thresh */
446 /* 51057 2076 */ {0x000a, 0x8659},
447 /* Edge bandwidth high threshold */
448 /* 51082 2077 */ {0x0005, 0x865a},
449 /* Edge bandwidth low threshold */
450 /* -------------------------------- */ 393 /* -------------------------------- */
451 /* 51107 2078 */ {0x0030, 0x8112}, 394 {0x0030, 0x8112}, /* Video drop enable, ISO streaming enable */
452 /* Video drop enable, ISO streaming enable */ 395 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
453 /* 51130 2079 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 396 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
454 /* 51154 2080 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 397 {0xa908, 0x8802},
455 /* 51180 2081 */ {0xa908, 0x8802}, 398 {0x0034, 0x8801}, /* SSI reg addr */
456 /* 51205 2082 */ {0x0034, 0x8801}, 399 {0x00ca, 0x8800},
457 /* SSI reg addr */
458 /* 51230 2083 */ {0x00ca, 0x8800},
459 /* SSI data to write */ 400 /* SSI data to write */
460 /* 51253 2084 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 401 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
461 /* 51277 2085 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 402 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
462 /* 51301 2086 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 403 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
463 /* 51327 2087 */ {0x1f08, 0x8802}, 404 {0x1f08, 0x8802},
464 /* 51352 2088 */ {0x0006, 0x8801}, 405 {0x0006, 0x8801},
465 /* 51377 2089 */ {0x0080, 0x8800}, 406 {0x0080, 0x8800},
466 /* 51400 2090 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 407 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
467 408
468/* ----- Read back coefs we wrote earlier. */ 409/* ----- Read back coefs we wrote earlier. */
469 /* 51424 2091 */ /* READ { 0, 0x0000, 0x8608 } -> 0000: 15 */ 410 /* READ { 0x0000, 0x8608 } -> 0000: 15 */
470 /* 51448 2092 */ /* READ { 0, 0x0000, 0x8609 } -> 0000: 30 */ 411 /* READ { 0x0000, 0x8609 } -> 0000: 30 */
471 /* 51472 2093 */ /* READ { 0, 0x0000, 0x860a } -> 0000: fb */ 412 /* READ { 0x0000, 0x860a } -> 0000: fb */
472 /* 51496 2094 */ /* READ { 0, 0x0000, 0x860b } -> 0000: 3e */ 413 /* READ { 0x0000, 0x860b } -> 0000: 3e */
473 /* 51520 2095 */ /* READ { 0, 0x0000, 0x860c } -> 0000: ce */ 414 /* READ { 0x0000, 0x860c } -> 0000: ce */
474 /* 51544 2096 */ /* READ { 0, 0x0000, 0x860d } -> 0000: f4 */ 415 /* READ { 0x0000, 0x860d } -> 0000: f4 */
475 /* 51568 2097 */ /* READ { 0, 0x0000, 0x860e } -> 0000: eb */ 416 /* READ { 0x0000, 0x860e } -> 0000: eb */
476 /* 51592 2098 */ /* READ { 0, 0x0000, 0x860f } -> 0000: dc */ 417 /* READ { 0x0000, 0x860f } -> 0000: dc */
477 /* 51616 2099 */ /* READ { 0, 0x0000, 0x8610 } -> 0000: 39 */ 418 /* READ { 0x0000, 0x8610 } -> 0000: 39 */
478 /* 51640 2100 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 419 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
479 /* 51664 2101 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 08 */ 420 /* READ { 0x0001, 0x8802 } -> 0000: 08 */
480 /* 51690 2102 */ {0xb008, 0x8802}, 421 {0xb008, 0x8802},
481 /* 51715 2103 */ {0x0006, 0x8801}, 422 {0x0006, 0x8801},
482 /* 51740 2104 */ {0x007d, 0x8800}, 423 {0x007d, 0x8800},
483 /* 51763 2105 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 424 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
484 425
485 426
486 /* This chunk is seemingly redundant with */ 427 /* This chunk is seemingly redundant with */
487 /* earlier commands (A11 Coef...), but if I disable it, */ 428 /* earlier commands (A11 Coef...), but if I disable it, */
488 /* the image appears too dark. Maybe there was some kind of */ 429 /* the image appears too dark. Maybe there was some kind of */
489 /* reset since the earlier commands, so this is necessary again. */ 430 /* reset since the earlier commands, so this is necessary again. */
490 /* 51789 2106 */ {0x0015, 0x8608}, 431 {0x0015, 0x8608},
491 /* 51814 2107 */ {0x0030, 0x8609}, 432 {0x0030, 0x8609},
492 /* 51839 2108 */ {0xfffb, 0x860a}, 433 {0xfffb, 0x860a},
493 /* 51864 2109 */ {0x003e, 0x860b}, 434 {0x003e, 0x860b},
494 /* 51889 2110 */ {0xffce, 0x860c}, 435 {0xffce, 0x860c},
495 /* 51914 2111 */ {0xfff4, 0x860d}, 436 {0xfff4, 0x860d},
496 /* 51939 2112 */ {0xffeb, 0x860e}, 437 {0xffeb, 0x860e},
497 /* 51964 2113 */ {0xffdc, 0x860f}, 438 {0xffdc, 0x860f},
498 /* 51989 2114 */ {0x0039, 0x8610}, 439 {0x0039, 0x8610},
499 /* 52014 2115 */ {0x0018, 0x8657}, 440 {0x0018, 0x8657},
500 441
501 /* 52039 2116 */ {0x0000, 0x8508}, 442 {0x0000, 0x8508}, /* Disable compression. */
502 /* Disable compression. */
503 /* Previous line was: 443 /* Previous line was:
504 * 52039 2116 * { 0, 0x0021, 0x8508 }, * Enable compression. */ 444 {0x0021, 0x8508}, * Enable compression. */
505 /* 52064 2117 */ {0x0032, 0x850b}, 445 {0x0032, 0x850b}, /* compression stuff */
506 /* compression stuff */ 446 {0x0003, 0x8509}, /* compression stuff */
507 /* 52089 2118 */ {0x0003, 0x8509}, 447 {0x0011, 0x850a}, /* compression stuff */
508 /* compression stuff */ 448 {0x0021, 0x850d}, /* compression stuff */
509 /* 52114 2119 */ {0x0011, 0x850a}, 449 {0x0010, 0x850c}, /* compression stuff */
510 /* compression stuff */ 450 {0x0003, 0x8500}, /* *** Video mode: 160x120 */
511 /* 52139 2120 */ {0x0021, 0x850d}, 451 {0x0001, 0x8501}, /* Hardware-dominated snap control */
512 /* compression stuff */ 452 {0x0061, 0x8656}, /* Window1 size 128x128, Windows2 size 128x128,
513 /* 52164 2121 */ {0x0010, 0x850c}, 453 * gamma look-up disable,
514 /* compression stuff */ 454 * new edge enhancement enable */
515 /* 52189 2122 */ {0x0003, 0x8500}, 455 {0x0018, 0x8617}, /* Window1 start X (*2) */
516 /* *** Video mode: 160x120 */ 456 {0x0008, 0x8618}, /* Window1 start Y (*2) */
517 /* 52214 2123 */ {0x0001, 0x8501}, 457 {0x0061, 0x8656}, /* Window1 size 128x128, Windows2 size 128x128,
518 /* Hardware-dominated snap control */ 458 * gamma look-up disable,
519 /* 52239 2124 */ {0x0061, 0x8656}, 459 * new edge enhancement enable */
520 /* Window1 size 128x128, Windows2 size 128x128, 460 {0x0058, 0x8619}, /* Window2 start X (*2) */
521 * gamma look-up disable, new edge enhancement enable */ 461 {0x0008, 0x861a}, /* Window2 start Y (*2) */
522 /* 52264 2125 */ {0x0018, 0x8617}, 462 {0x00ff, 0x8615}, /* High lum thresh for white balance */
523 /* Window1 start X (*2) */ 463 {0x0000, 0x8616}, /* Low lum thresh for white balance */
524 /* 52289 2126 */ {0x0008, 0x8618}, 464 {0x0012, 0x8700}, /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */
525 /* Window1 start Y (*2) */ 465 {0x0012, 0x8700}, /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */
526 /* 52314 2127 */ {0x0061, 0x8656}, 466 /* READ { 0x0000, 0x8656 } -> 0000: 61 */
527 /* Window1 size 128x128, Windows2 size 128x128, 467 {0x0028, 0x8802}, /* 375 Khz SSI clock, SSI r/w sync with VSYNC */
528 * gamma look-up disable, new edge enhancement enable */ 468 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
529 /* 52339 2128 */ {0x0058, 0x8619}, 469 /* READ { 0x0001, 0x8802 } -> 0000: 28 */
530 /* Window2 start X (*2) */ 470 {0x1f28, 0x8802}, /* 375 Khz SSI clock, SSI r/w sync with VSYNC */
531 /* 52364 2129 */ {0x0008, 0x861a}, 471 {0x0010, 0x8801}, /* SSI reg addr */
532 /* Window2 start Y (*2) */ 472 {0x003e, 0x8800}, /* SSI data to write */
533 /* 52389 2130 */ {0x00ff, 0x8615}, 473 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
534 /* High lum thresh for white balance */ 474 {0x0028, 0x8802},
535 /* 52414 2131 */ {0x0000, 0x8616}, 475 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
536 /* Low lum thresh for white balance */ 476 /* READ { 0x0001, 0x8802 } -> 0000: 28 */
537 /* 52439 2132 */ {0x0012, 0x8700}, 477 {0x1f28, 0x8802},
538 /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */ 478 {0x0000, 0x8801},
539 /* 52464 2133 */ {0x0012, 0x8700}, 479 {0x001f, 0x8800},
540 /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */ 480 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
541 /* 52487 2134 */ /* READ { 0, 0x0000, 0x8656 } -> 0000: 61 */ 481 {0x0001, 0x8602}, /* optical black level for user settning = 1 */
542 /* 52513 2135 */ {0x0028, 0x8802},
543 /* 375 Khz SSI clock, SSI r/w sync with VSYNC */
544 /* 52536 2136 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
545 /* 52560 2137 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 28 */
546 /* 52586 2138 */ {0x1f28, 0x8802},
547 /* 375 Khz SSI clock, SSI r/w sync with VSYNC */
548 /* 52611 2139 */ {0x0010, 0x8801},
549 /* SSI reg addr */
550 /* 52636 2140 */ {0x003e, 0x8800},
551 /* SSI data to write */
552 /* 52659 2141 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
553 /* 52685 2142 */ {0x0028, 0x8802},
554 /* 52708 2143 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
555 /* 52732 2144 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 28 */
556 /* 52758 2145 */ {0x1f28, 0x8802},
557 /* 52783 2146 */ {0x0000, 0x8801},
558 /* 52808 2147 */ {0x001f, 0x8800},
559 /* 52831 2148 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */
560 /* 52857 2149 */ {0x0001, 0x8602},
561 /* optical black level for user settning = 1 */
562 482
563 /* Original: */ 483 /* Original: */
564 /* 52882 2150 */ {0x0023, 0x8700}, 484 {0x0023, 0x8700}, /* Clock speed 48Mhz/(3+2)/4= 2.4 Mhz */
565 /* Clock speed 48Mhz/(3+2)/4= 2.4 Mhz */ 485 {0x000f, 0x8602}, /* optical black level for user settning = 15 */
566 /* 52907 2151 */ {0x000f, 0x8602}, 486
567 /* optical black level for user settning = 15 */ 487 {0x0028, 0x8802},
568 488 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
569 /* 52932 2152 */ {0x0028, 0x8802}, 489 /* READ { 0x0001, 0x8802 } -> 0000: 28 */
570 /* 52955 2153 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 490 {0x1f28, 0x8802},
571 /* 52979 2154 */ /* READ { 0, 0x0001, 0x8802 } -> 0000: 28 */ 491 {0x0010, 0x8801},
572 /* 53005 2155 */ {0x1f28, 0x8802}, 492 {0x007b, 0x8800},
573 /* 53030 2156 */ {0x0010, 0x8801}, 493 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
574 /* 53055 2157 */ {0x007b, 0x8800}, 494 {0x002f, 0x8651}, /* R gain for white balance ... */
575 /* 53078 2158 */ /* READ { 0, 0x0001, 0x8803 } -> 0000: 00 */ 495 {0x0080, 0x8653},
576 /* 53104 2159 */ {0x002f, 0x8651}, 496 /* READ { 0x0000, 0x8655 } -> 0000: 00 */
577 /* R gain for white balance ... */ 497 {0x0000, 0x8655},
578 /* 53129 2160 */ {0x0080, 0x8653}, 498
579 /* 53152 2161 */ /* READ { 0, 0x0000, 0x8655 } -> 0000: 00 */ 499 {0x0030, 0x8112}, /* Video drop enable, ISO streaming enable */
580 /* 53178 2162 */ {0x0000, 0x8655}, 500 {0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */
581 501 /* UNKNOWN DIRECTION (URB_FUNCTION_SELECT_INTERFACE: (ALT=0) ) */
582 /* 53203 2163 */ {0x0030, 0x8112},
583 /* Video drop enable, ISO streaming enable */
584 /* 53228 2164 */ {0x0020, 0x8112},
585 /* Video drop enable, ISO streaming disable */
586 /* 53252 2165 */
587 /* UNKNOWN DIRECTION (URB_FUNCTION_SELECT_INTERFACE: (ALT=0) ) */
588 {} 502 {}
589}; 503};
590 504
@@ -592,27 +506,27 @@ static const u16 spca508_init_data[][2] =
592 * Initialization data for Intel EasyPC Camera CS110 506 * Initialization data for Intel EasyPC Camera CS110
593 */ 507 */
594static const u16 spca508cs110_init_data[][2] = { 508static const u16 spca508cs110_init_data[][2] = {
595 {0x0000, 0x870b}, /* Reset CTL3 */ 509 {0x0000, 0x870b}, /* Reset CTL3 */
596 {0x0003, 0x8111}, /* Soft Reset compression, memory, TG & CDSP */ 510 {0x0003, 0x8111}, /* Soft Reset compression, memory, TG & CDSP */
597 {0x0000, 0x8111}, /* Normal operation on reset */ 511 {0x0000, 0x8111}, /* Normal operation on reset */
598 {0x0090, 0x8110}, 512 {0x0090, 0x8110},
599 /* External Clock 2x & Synchronous Serial Interface Output */ 513 /* External Clock 2x & Synchronous Serial Interface Output */
600 {0x0020, 0x8112}, /* Video Drop packet enable */ 514 {0x0020, 0x8112}, /* Video Drop packet enable */
601 {0x0000, 0x8114}, /* Software GPIO output data */ 515 {0x0000, 0x8114}, /* Software GPIO output data */
602 {0x0001, 0x8114}, 516 {0x0001, 0x8114},
603 {0x0001, 0x8114}, 517 {0x0001, 0x8114},
604 {0x0001, 0x8114}, 518 {0x0001, 0x8114},
605 {0x0003, 0x8114}, 519 {0x0003, 0x8114},
606 520
607 /* Initial sequence Synchronous Serial Interface */ 521 /* Initial sequence Synchronous Serial Interface */
608 {0x000f, 0x8402}, /* Memory bank Address */ 522 {0x000f, 0x8402}, /* Memory bank Address */
609 {0x0000, 0x8403}, /* Memory bank Address */ 523 {0x0000, 0x8403}, /* Memory bank Address */
610 {0x00ba, 0x8804}, /* SSI Slave address */ 524 {0x00ba, 0x8804}, /* SSI Slave address */
611 {0x0010, 0x8802}, /* 93.75kHz SSI Clock Two DataByte */ 525 {0x0010, 0x8802}, /* 93.75kHz SSI Clock Two DataByte */
612 {0x0010, 0x8802}, /* 93.75kHz SSI Clock two DataByte */ 526 {0x0010, 0x8802}, /* 93.75kHz SSI Clock two DataByte */
613 527
614 {0x0001, 0x8801}, 528 {0x0001, 0x8801},
615 {0x000a, 0x8805},/* a - NWG: Dunno what this is about */ 529 {0x000a, 0x8805}, /* a - NWG: Dunno what this is about */
616 {0x0000, 0x8800}, 530 {0x0000, 0x8800},
617 {0x0010, 0x8802}, 531 {0x0010, 0x8802},
618 532
@@ -646,459 +560,459 @@ static const u16 spca508cs110_init_data[][2] = {
646 {0x0000, 0x8800}, 560 {0x0000, 0x8800},
647 {0x0010, 0x8802}, 561 {0x0010, 0x8802},
648 562
649 {0x0002, 0x8704}, /* External input CKIx1 */ 563 {0x0002, 0x8704}, /* External input CKIx1 */
650 {0x0001, 0x8606}, /* 1 Line memory Read Counter (H) Result: (d)410 */ 564 {0x0001, 0x8606}, /* 1 Line memory Read Counter (H) Result: (d)410 */
651 {0x009a, 0x8600}, /* Line memory Read Counter (L) */ 565 {0x009a, 0x8600}, /* Line memory Read Counter (L) */
652 {0x0001, 0x865b}, /* 1 Horizontal Offset for Valid Pixel(L) */ 566 {0x0001, 0x865b}, /* 1 Horizontal Offset for Valid Pixel(L) */
653 {0x0003, 0x865c}, /* 3 Vertical Offset for Valid Lines(L) */ 567 {0x0003, 0x865c}, /* 3 Vertical Offset for Valid Lines(L) */
654 {0x0058, 0x865d}, /* 58 Horizontal Valid Pixel Window(L) */ 568 {0x0058, 0x865d}, /* 58 Horizontal Valid Pixel Window(L) */
655 569
656 {0x0006, 0x8660}, /* Nibble data + input order */ 570 {0x0006, 0x8660}, /* Nibble data + input order */
657 571
658 {0x000a, 0x8602}, /* Optical black level set to 0x0a */ 572 {0x000a, 0x8602}, /* Optical black level set to 0x0a */
659/* 1945 */ {0x0000, 0x8603}, /* Optical black level Offset */ 573 {0x0000, 0x8603}, /* Optical black level Offset */
660 574
661/* 1962 * {0, 0x0000, 0x8611}, * 0 R Offset for white Balance */ 575/* {0x0000, 0x8611}, * 0 R Offset for white Balance */
662/* 1963 * {0, 0x0000, 0x8612}, * 1 Gr Offset for white Balance */ 576/* {0x0000, 0x8612}, * 1 Gr Offset for white Balance */
663/* 1964 * {0, 0x0000, 0x8613}, * 1f B Offset for white Balance */ 577/* {0x0000, 0x8613}, * 1f B Offset for white Balance */
664/* 1965 * {0, 0x0000, 0x8614}, * f0 Gb Offset for white Balance */ 578/* {0x0000, 0x8614}, * f0 Gb Offset for white Balance */
665 579
666 {0x0040, 0x8651}, /* 2b BLUE gain for white balance good at all 60 */ 580 {0x0040, 0x8651}, /* 2b BLUE gain for white balance good at all 60 */
667 {0x0030, 0x8652}, /* 41 Gr Gain for white Balance (L) */ 581 {0x0030, 0x8652}, /* 41 Gr Gain for white Balance (L) */
668 {0x0035, 0x8653}, /* 26 RED gain for white balance */ 582 {0x0035, 0x8653}, /* 26 RED gain for white balance */
669 {0x0035, 0x8654}, /* 40Gb Gain for white Balance (L) */ 583 {0x0035, 0x8654}, /* 40Gb Gain for white Balance (L) */
670 {0x0041, 0x863f}, 584 {0x0041, 0x863f},
671 /* Fixed Gamma correction enabled (makes colours look better) */ 585 /* Fixed Gamma correction enabled (makes colours look better) */
672 586
673/* 2422 */ {0x0000, 0x8655}, 587 {0x0000, 0x8655},
674 /* High bits for white balance*****brightness control*** */ 588 /* High bits for white balance*****brightness control*** */
675 {} 589 {}
676}; 590};
677 591
678static const u16 spca508_sightcam_init_data[][2] = { 592static const u16 spca508_sightcam_init_data[][2] = {
679/* This line seems to setup the frame/canvas */ 593/* This line seems to setup the frame/canvas */
680 /*368 */ {0x000f, 0x8402}, 594 {0x000f, 0x8402},
681 595
682/* Theese 6 lines are needed to startup the webcam */ 596/* Theese 6 lines are needed to startup the webcam */
683 /*398 */ {0x0090, 0x8110}, 597 {0x0090, 0x8110},
684 /*399 */ {0x0001, 0x8114}, 598 {0x0001, 0x8114},
685 /*400 */ {0x0001, 0x8114}, 599 {0x0001, 0x8114},
686 /*401 */ {0x0001, 0x8114}, 600 {0x0001, 0x8114},
687 /*402 */ {0x0003, 0x8114}, 601 {0x0003, 0x8114},
688 /*403 */ {0x0080, 0x8804}, 602 {0x0080, 0x8804},
689 603
690/* This part seems to make the pictures darker? (autobrightness?) */ 604/* This part seems to make the pictures darker? (autobrightness?) */
691 /*436 */ {0x0001, 0x8801}, 605 {0x0001, 0x8801},
692 /*437 */ {0x0004, 0x8800}, 606 {0x0004, 0x8800},
693 /*439 */ {0x0003, 0x8801}, 607 {0x0003, 0x8801},
694 /*440 */ {0x00e0, 0x8800}, 608 {0x00e0, 0x8800},
695 /*442 */ {0x0004, 0x8801}, 609 {0x0004, 0x8801},
696 /*443 */ {0x00b4, 0x8800}, 610 {0x00b4, 0x8800},
697 /*445 */ {0x0005, 0x8801}, 611 {0x0005, 0x8801},
698 /*446 */ {0x0000, 0x8800}, 612 {0x0000, 0x8800},
699 613
700 /*448 */ {0x0006, 0x8801}, 614 {0x0006, 0x8801},
701 /*449 */ {0x00e0, 0x8800}, 615 {0x00e0, 0x8800},
702 /*451 */ {0x0007, 0x8801}, 616 {0x0007, 0x8801},
703 /*452 */ {0x000c, 0x8800}, 617 {0x000c, 0x8800},
704 618
705/* This section is just needed, it probably 619/* This section is just needed, it probably
706 * does something like the previous section, 620 * does something like the previous section,
707 * but the cam won't start if it's not included. 621 * but the cam won't start if it's not included.
708 */ 622 */
709 /*484 */ {0x0014, 0x8801}, 623 {0x0014, 0x8801},
710 /*485 */ {0x0008, 0x8800}, 624 {0x0008, 0x8800},
711 /*487 */ {0x0015, 0x8801}, 625 {0x0015, 0x8801},
712 /*488 */ {0x0067, 0x8800}, 626 {0x0067, 0x8800},
713 /*490 */ {0x0016, 0x8801}, 627 {0x0016, 0x8801},
714 /*491 */ {0x0000, 0x8800}, 628 {0x0000, 0x8800},
715 /*493 */ {0x0017, 0x8801}, 629 {0x0017, 0x8801},
716 /*494 */ {0x0020, 0x8800}, 630 {0x0020, 0x8800},
717 /*496 */ {0x0018, 0x8801}, 631 {0x0018, 0x8801},
718 /*497 */ {0x0044, 0x8800}, 632 {0x0044, 0x8800},
719 633
720/* Makes the picture darker - and the 634/* Makes the picture darker - and the
721 * cam won't start if not included 635 * cam won't start if not included
722 */ 636 */
723 /*505 */ {0x001e, 0x8801}, 637 {0x001e, 0x8801},
724 /*506 */ {0x00ea, 0x8800}, 638 {0x00ea, 0x8800},
725 /*508 */ {0x001f, 0x8801}, 639 {0x001f, 0x8801},
726 /*509 */ {0x0001, 0x8800}, 640 {0x0001, 0x8800},
727 /*511 */ {0x0003, 0x8801}, 641 {0x0003, 0x8801},
728 /*512 */ {0x00e0, 0x8800}, 642 {0x00e0, 0x8800},
729 643
730/* seems to place the colors ontop of each other #1 */ 644/* seems to place the colors ontop of each other #1 */
731 /*517 */ {0x0006, 0x8704}, 645 {0x0006, 0x8704},
732 /*518 */ {0x0001, 0x870c}, 646 {0x0001, 0x870c},
733 /*519 */ {0x0016, 0x8600}, 647 {0x0016, 0x8600},
734 /*520 */ {0x0002, 0x8606}, 648 {0x0002, 0x8606},
735 649
736/* if not included the pictures becomes _very_ dark */ 650/* if not included the pictures becomes _very_ dark */
737 /*521 */ {0x0064, 0x8607}, 651 {0x0064, 0x8607},
738 /*522 */ {0x003a, 0x8601}, 652 {0x003a, 0x8601},
739 /*523 */ {0x0000, 0x8602}, 653 {0x0000, 0x8602},
740 654
741/* seems to place the colors ontop of each other #2 */ 655/* seems to place the colors ontop of each other #2 */
742 /*524 */ {0x0016, 0x8600}, 656 {0x0016, 0x8600},
743 /*525 */ {0x0018, 0x8617}, 657 {0x0018, 0x8617},
744 /*526 */ {0x0008, 0x8618}, 658 {0x0008, 0x8618},
745 /*527 */ {0x00a1, 0x8656}, 659 {0x00a1, 0x8656},
746 660
747/* webcam won't start if not included */ 661/* webcam won't start if not included */
748 /*528 */ {0x0007, 0x865b}, 662 {0x0007, 0x865b},
749 /*529 */ {0x0001, 0x865c}, 663 {0x0001, 0x865c},
750 /*530 */ {0x0058, 0x865d}, 664 {0x0058, 0x865d},
751 /*531 */ {0x0048, 0x865e}, 665 {0x0048, 0x865e},
752 666
753/* adjusts the colors */ 667/* adjusts the colors */
754 /*541 */ {0x0049, 0x8651}, 668 {0x0049, 0x8651},
755 /*542 */ {0x0040, 0x8652}, 669 {0x0040, 0x8652},
756 /*543 */ {0x004c, 0x8653}, 670 {0x004c, 0x8653},
757 /*544 */ {0x0040, 0x8654}, 671 {0x0040, 0x8654},
758 {} 672 {}
759}; 673};
760 674
761static const u16 spca508_sightcam2_init_data[][2] = { 675static const u16 spca508_sightcam2_init_data[][2] = {
762/* 35 */ {0x0020, 0x8112}, 676 {0x0020, 0x8112},
763 677
764/* 36 */ {0x000f, 0x8402}, 678 {0x000f, 0x8402},
765/* 37 */ {0x0000, 0x8403}, 679 {0x0000, 0x8403},
766 680
767/* 38 */ {0x0008, 0x8201}, 681 {0x0008, 0x8201},
768/* 39 */ {0x0008, 0x8200}, 682 {0x0008, 0x8200},
769/* 40 */ {0x0001, 0x8200}, 683 {0x0001, 0x8200},
770/* 43 */ {0x0009, 0x8201}, 684 {0x0009, 0x8201},
771/* 44 */ {0x0008, 0x8200}, 685 {0x0008, 0x8200},
772/* 45 */ {0x0001, 0x8200}, 686 {0x0001, 0x8200},
773/* 48 */ {0x000a, 0x8201}, 687 {0x000a, 0x8201},
774/* 49 */ {0x0008, 0x8200}, 688 {0x0008, 0x8200},
775/* 50 */ {0x0001, 0x8200}, 689 {0x0001, 0x8200},
776/* 53 */ {0x000b, 0x8201}, 690 {0x000b, 0x8201},
777/* 54 */ {0x0008, 0x8200}, 691 {0x0008, 0x8200},
778/* 55 */ {0x0001, 0x8200}, 692 {0x0001, 0x8200},
779/* 58 */ {0x000c, 0x8201}, 693 {0x000c, 0x8201},
780/* 59 */ {0x0008, 0x8200}, 694 {0x0008, 0x8200},
781/* 60 */ {0x0001, 0x8200}, 695 {0x0001, 0x8200},
782/* 63 */ {0x000d, 0x8201}, 696 {0x000d, 0x8201},
783/* 64 */ {0x0008, 0x8200}, 697 {0x0008, 0x8200},
784/* 65 */ {0x0001, 0x8200}, 698 {0x0001, 0x8200},
785/* 68 */ {0x000e, 0x8201}, 699 {0x000e, 0x8201},
786/* 69 */ {0x0008, 0x8200}, 700 {0x0008, 0x8200},
787/* 70 */ {0x0001, 0x8200}, 701 {0x0001, 0x8200},
788/* 73 */ {0x0007, 0x8201}, 702 {0x0007, 0x8201},
789/* 74 */ {0x0008, 0x8200}, 703 {0x0008, 0x8200},
790/* 75 */ {0x0001, 0x8200}, 704 {0x0001, 0x8200},
791/* 78 */ {0x000f, 0x8201}, 705 {0x000f, 0x8201},
792/* 79 */ {0x0008, 0x8200}, 706 {0x0008, 0x8200},
793/* 80 */ {0x0001, 0x8200}, 707 {0x0001, 0x8200},
794 708
795/* 84 */ {0x0018, 0x8660}, 709 {0x0018, 0x8660},
796/* 85 */ {0x0010, 0x8201}, 710 {0x0010, 0x8201},
797 711
798/* 86 */ {0x0008, 0x8200}, 712 {0x0008, 0x8200},
799/* 87 */ {0x0001, 0x8200}, 713 {0x0001, 0x8200},
800/* 90 */ {0x0011, 0x8201}, 714 {0x0011, 0x8201},
801/* 91 */ {0x0008, 0x8200}, 715 {0x0008, 0x8200},
802/* 92 */ {0x0001, 0x8200}, 716 {0x0001, 0x8200},
803 717
804/* 95 */ {0x0000, 0x86b0}, 718 {0x0000, 0x86b0},
805/* 96 */ {0x0034, 0x86b1}, 719 {0x0034, 0x86b1},
806/* 97 */ {0x0000, 0x86b2}, 720 {0x0000, 0x86b2},
807/* 98 */ {0x0049, 0x86b3}, 721 {0x0049, 0x86b3},
808/* 99 */ {0x0000, 0x86b4}, 722 {0x0000, 0x86b4},
809/* 100 */ {0x0000, 0x86b4}, 723 {0x0000, 0x86b4},
810 724
811/* 101 */ {0x0012, 0x8201}, 725 {0x0012, 0x8201},
812/* 102 */ {0x0008, 0x8200}, 726 {0x0008, 0x8200},
813/* 103 */ {0x0001, 0x8200}, 727 {0x0001, 0x8200},
814/* 106 */ {0x0013, 0x8201}, 728 {0x0013, 0x8201},
815/* 107 */ {0x0008, 0x8200}, 729 {0x0008, 0x8200},
816/* 108 */ {0x0001, 0x8200}, 730 {0x0001, 0x8200},
817 731
818/* 111 */ {0x0001, 0x86b0}, 732 {0x0001, 0x86b0},
819/* 112 */ {0x00aa, 0x86b1}, 733 {0x00aa, 0x86b1},
820/* 113 */ {0x0000, 0x86b2}, 734 {0x0000, 0x86b2},
821/* 114 */ {0x00e4, 0x86b3}, 735 {0x00e4, 0x86b3},
822/* 115 */ {0x0000, 0x86b4}, 736 {0x0000, 0x86b4},
823/* 116 */ {0x0000, 0x86b4}, 737 {0x0000, 0x86b4},
824 738
825/* 118 */ {0x0018, 0x8660}, 739 {0x0018, 0x8660},
826 740
827/* 119 */ {0x0090, 0x8110}, 741 {0x0090, 0x8110},
828/* 120 */ {0x0001, 0x8114}, 742 {0x0001, 0x8114},
829/* 121 */ {0x0001, 0x8114}, 743 {0x0001, 0x8114},
830/* 122 */ {0x0001, 0x8114}, 744 {0x0001, 0x8114},
831/* 123 */ {0x0003, 0x8114}, 745 {0x0003, 0x8114},
832 746
833/* 124 */ {0x0080, 0x8804}, 747 {0x0080, 0x8804},
834/* 157 */ {0x0003, 0x8801}, 748 {0x0003, 0x8801},
835/* 158 */ {0x0012, 0x8800}, 749 {0x0012, 0x8800},
836/* 160 */ {0x0004, 0x8801}, 750 {0x0004, 0x8801},
837/* 161 */ {0x0005, 0x8800}, 751 {0x0005, 0x8800},
838/* 163 */ {0x0005, 0x8801}, 752 {0x0005, 0x8801},
839/* 164 */ {0x0000, 0x8800}, 753 {0x0000, 0x8800},
840/* 166 */ {0x0006, 0x8801}, 754 {0x0006, 0x8801},
841/* 167 */ {0x0000, 0x8800}, 755 {0x0000, 0x8800},
842/* 169 */ {0x0007, 0x8801}, 756 {0x0007, 0x8801},
843/* 170 */ {0x0000, 0x8800}, 757 {0x0000, 0x8800},
844/* 172 */ {0x0008, 0x8801}, 758 {0x0008, 0x8801},
845/* 173 */ {0x0005, 0x8800}, 759 {0x0005, 0x8800},
846/* 175 */ {0x000a, 0x8700}, 760 {0x000a, 0x8700},
847/* 176 */ {0x000e, 0x8801}, 761 {0x000e, 0x8801},
848/* 177 */ {0x0004, 0x8800}, 762 {0x0004, 0x8800},
849/* 179 */ {0x0005, 0x8801}, 763 {0x0005, 0x8801},
850/* 180 */ {0x0047, 0x8800}, 764 {0x0047, 0x8800},
851/* 182 */ {0x0006, 0x8801}, 765 {0x0006, 0x8801},
852/* 183 */ {0x0000, 0x8800}, 766 {0x0000, 0x8800},
853/* 185 */ {0x0007, 0x8801}, 767 {0x0007, 0x8801},
854/* 186 */ {0x00c0, 0x8800}, 768 {0x00c0, 0x8800},
855/* 188 */ {0x0008, 0x8801}, 769 {0x0008, 0x8801},
856/* 189 */ {0x0003, 0x8800}, 770 {0x0003, 0x8800},
857/* 191 */ {0x0013, 0x8801}, 771 {0x0013, 0x8801},
858/* 192 */ {0x0001, 0x8800}, 772 {0x0001, 0x8800},
859/* 194 */ {0x0009, 0x8801}, 773 {0x0009, 0x8801},
860/* 195 */ {0x0000, 0x8800}, 774 {0x0000, 0x8800},
861/* 197 */ {0x000a, 0x8801}, 775 {0x000a, 0x8801},
862/* 198 */ {0x0000, 0x8800}, 776 {0x0000, 0x8800},
863/* 200 */ {0x000b, 0x8801}, 777 {0x000b, 0x8801},
864/* 201 */ {0x0000, 0x8800}, 778 {0x0000, 0x8800},
865/* 203 */ {0x000c, 0x8801}, 779 {0x000c, 0x8801},
866/* 204 */ {0x0000, 0x8800}, 780 {0x0000, 0x8800},
867/* 206 */ {0x000e, 0x8801}, 781 {0x000e, 0x8801},
868/* 207 */ {0x0004, 0x8800}, 782 {0x0004, 0x8800},
869/* 209 */ {0x000f, 0x8801}, 783 {0x000f, 0x8801},
870/* 210 */ {0x0000, 0x8800}, 784 {0x0000, 0x8800},
871/* 212 */ {0x0010, 0x8801}, 785 {0x0010, 0x8801},
872/* 213 */ {0x0006, 0x8800}, 786 {0x0006, 0x8800},
873/* 215 */ {0x0011, 0x8801}, 787 {0x0011, 0x8801},
874/* 216 */ {0x0006, 0x8800}, 788 {0x0006, 0x8800},
875/* 218 */ {0x0012, 0x8801}, 789 {0x0012, 0x8801},
876/* 219 */ {0x0000, 0x8800}, 790 {0x0000, 0x8800},
877/* 221 */ {0x0013, 0x8801}, 791 {0x0013, 0x8801},
878/* 222 */ {0x0001, 0x8800}, 792 {0x0001, 0x8800},
879 793
880/* 224 */ {0x000a, 0x8700}, 794 {0x000a, 0x8700},
881/* 225 */ {0x0000, 0x8702}, 795 {0x0000, 0x8702},
882/* 226 */ {0x0000, 0x8703}, 796 {0x0000, 0x8703},
883/* 227 */ {0x00c2, 0x8704}, 797 {0x00c2, 0x8704},
884/* 228 */ {0x0001, 0x870c}, 798 {0x0001, 0x870c},
885 799
886/* 229 */ {0x0044, 0x8600}, 800 {0x0044, 0x8600},
887/* 230 */ {0x0002, 0x8606}, 801 {0x0002, 0x8606},
888/* 231 */ {0x0064, 0x8607}, 802 {0x0064, 0x8607},
889/* 232 */ {0x003a, 0x8601}, 803 {0x003a, 0x8601},
890/* 233 */ {0x0008, 0x8602}, 804 {0x0008, 0x8602},
891/* 234 */ {0x0044, 0x8600}, 805 {0x0044, 0x8600},
892/* 235 */ {0x0018, 0x8617}, 806 {0x0018, 0x8617},
893/* 236 */ {0x0008, 0x8618}, 807 {0x0008, 0x8618},
894/* 237 */ {0x00a1, 0x8656}, 808 {0x00a1, 0x8656},
895/* 238 */ {0x0004, 0x865b}, 809 {0x0004, 0x865b},
896/* 239 */ {0x0002, 0x865c}, 810 {0x0002, 0x865c},
897/* 240 */ {0x0058, 0x865d}, 811 {0x0058, 0x865d},
898/* 241 */ {0x0048, 0x865e}, 812 {0x0048, 0x865e},
899/* 242 */ {0x0012, 0x8608}, 813 {0x0012, 0x8608},
900/* 243 */ {0x002c, 0x8609}, 814 {0x002c, 0x8609},
901/* 244 */ {0x0002, 0x860a}, 815 {0x0002, 0x860a},
902/* 245 */ {0x002c, 0x860b}, 816 {0x002c, 0x860b},
903/* 246 */ {0x00db, 0x860c}, 817 {0x00db, 0x860c},
904/* 247 */ {0x00f9, 0x860d}, 818 {0x00f9, 0x860d},
905/* 248 */ {0x00f1, 0x860e}, 819 {0x00f1, 0x860e},
906/* 249 */ {0x00e3, 0x860f}, 820 {0x00e3, 0x860f},
907/* 250 */ {0x002c, 0x8610}, 821 {0x002c, 0x8610},
908/* 251 */ {0x006c, 0x8651}, 822 {0x006c, 0x8651},
909/* 252 */ {0x0041, 0x8652}, 823 {0x0041, 0x8652},
910/* 253 */ {0x0059, 0x8653}, 824 {0x0059, 0x8653},
911/* 254 */ {0x0040, 0x8654}, 825 {0x0040, 0x8654},
912/* 255 */ {0x00fa, 0x8611}, 826 {0x00fa, 0x8611},
913/* 256 */ {0x00ff, 0x8612}, 827 {0x00ff, 0x8612},
914/* 257 */ {0x00f8, 0x8613}, 828 {0x00f8, 0x8613},
915/* 258 */ {0x0000, 0x8614}, 829 {0x0000, 0x8614},
916/* 259 */ {0x0001, 0x863f}, 830 {0x0001, 0x863f},
917/* 260 */ {0x0000, 0x8640}, 831 {0x0000, 0x8640},
918/* 261 */ {0x0026, 0x8641}, 832 {0x0026, 0x8641},
919/* 262 */ {0x0045, 0x8642}, 833 {0x0045, 0x8642},
920/* 263 */ {0x0060, 0x8643}, 834 {0x0060, 0x8643},
921/* 264 */ {0x0075, 0x8644}, 835 {0x0075, 0x8644},
922/* 265 */ {0x0088, 0x8645}, 836 {0x0088, 0x8645},
923/* 266 */ {0x009b, 0x8646}, 837 {0x009b, 0x8646},
924/* 267 */ {0x00b0, 0x8647}, 838 {0x00b0, 0x8647},
925/* 268 */ {0x00c5, 0x8648}, 839 {0x00c5, 0x8648},
926/* 269 */ {0x00d2, 0x8649}, 840 {0x00d2, 0x8649},
927/* 270 */ {0x00dc, 0x864a}, 841 {0x00dc, 0x864a},
928/* 271 */ {0x00e5, 0x864b}, 842 {0x00e5, 0x864b},
929/* 272 */ {0x00eb, 0x864c}, 843 {0x00eb, 0x864c},
930/* 273 */ {0x00f0, 0x864d}, 844 {0x00f0, 0x864d},
931/* 274 */ {0x00f6, 0x864e}, 845 {0x00f6, 0x864e},
932/* 275 */ {0x00fa, 0x864f}, 846 {0x00fa, 0x864f},
933/* 276 */ {0x00ff, 0x8650}, 847 {0x00ff, 0x8650},
934/* 277 */ {0x0060, 0x8657}, 848 {0x0060, 0x8657},
935/* 278 */ {0x0010, 0x8658}, 849 {0x0010, 0x8658},
936/* 279 */ {0x0018, 0x8659}, 850 {0x0018, 0x8659},
937/* 280 */ {0x0005, 0x865a}, 851 {0x0005, 0x865a},
938/* 281 */ {0x0018, 0x8660}, 852 {0x0018, 0x8660},
939/* 282 */ {0x0003, 0x8509}, 853 {0x0003, 0x8509},
940/* 283 */ {0x0011, 0x850a}, 854 {0x0011, 0x850a},
941/* 284 */ {0x0032, 0x850b}, 855 {0x0032, 0x850b},
942/* 285 */ {0x0010, 0x850c}, 856 {0x0010, 0x850c},
943/* 286 */ {0x0021, 0x850d}, 857 {0x0021, 0x850d},
944/* 287 */ {0x0001, 0x8500}, 858 {0x0001, 0x8500},
945/* 288 */ {0x0000, 0x8508}, 859 {0x0000, 0x8508},
946/* 289 */ {0x0012, 0x8608}, 860 {0x0012, 0x8608},
947/* 290 */ {0x002c, 0x8609}, 861 {0x002c, 0x8609},
948/* 291 */ {0x0002, 0x860a}, 862 {0x0002, 0x860a},
949/* 292 */ {0x0039, 0x860b}, 863 {0x0039, 0x860b},
950/* 293 */ {0x00d0, 0x860c}, 864 {0x00d0, 0x860c},
951/* 294 */ {0x00f7, 0x860d}, 865 {0x00f7, 0x860d},
952/* 295 */ {0x00ed, 0x860e}, 866 {0x00ed, 0x860e},
953/* 296 */ {0x00db, 0x860f}, 867 {0x00db, 0x860f},
954/* 297 */ {0x0039, 0x8610}, 868 {0x0039, 0x8610},
955/* 298 */ {0x0012, 0x8657}, 869 {0x0012, 0x8657},
956/* 299 */ {0x000c, 0x8619}, 870 {0x000c, 0x8619},
957/* 300 */ {0x0004, 0x861a}, 871 {0x0004, 0x861a},
958/* 301 */ {0x00a1, 0x8656}, 872 {0x00a1, 0x8656},
959/* 302 */ {0x00c8, 0x8615}, 873 {0x00c8, 0x8615},
960/* 303 */ {0x0032, 0x8616}, 874 {0x0032, 0x8616},
961 875
962/* 306 */ {0x0030, 0x8112}, 876 {0x0030, 0x8112},
963/* 313 */ {0x0020, 0x8112}, 877 {0x0020, 0x8112},
964/* 314 */ {0x0020, 0x8112}, 878 {0x0020, 0x8112},
965/* 315 */ {0x000f, 0x8402}, 879 {0x000f, 0x8402},
966/* 316 */ {0x0000, 0x8403}, 880 {0x0000, 0x8403},
967 881
968/* 317 */ {0x0090, 0x8110}, 882 {0x0090, 0x8110},
969/* 318 */ {0x0001, 0x8114}, 883 {0x0001, 0x8114},
970/* 319 */ {0x0001, 0x8114}, 884 {0x0001, 0x8114},
971/* 320 */ {0x0001, 0x8114}, 885 {0x0001, 0x8114},
972/* 321 */ {0x0003, 0x8114}, 886 {0x0003, 0x8114},
973/* 322 */ {0x0080, 0x8804}, 887 {0x0080, 0x8804},
974 888
975/* 355 */ {0x0003, 0x8801}, 889 {0x0003, 0x8801},
976/* 356 */ {0x0012, 0x8800}, 890 {0x0012, 0x8800},
977/* 358 */ {0x0004, 0x8801}, 891 {0x0004, 0x8801},
978/* 359 */ {0x0005, 0x8800}, 892 {0x0005, 0x8800},
979/* 361 */ {0x0005, 0x8801}, 893 {0x0005, 0x8801},
980/* 362 */ {0x0047, 0x8800}, 894 {0x0047, 0x8800},
981/* 364 */ {0x0006, 0x8801}, 895 {0x0006, 0x8801},
982/* 365 */ {0x0000, 0x8800}, 896 {0x0000, 0x8800},
983/* 367 */ {0x0007, 0x8801}, 897 {0x0007, 0x8801},
984/* 368 */ {0x00c0, 0x8800}, 898 {0x00c0, 0x8800},
985/* 370 */ {0x0008, 0x8801}, 899 {0x0008, 0x8801},
986/* 371 */ {0x0003, 0x8800}, 900 {0x0003, 0x8800},
987/* 373 */ {0x000a, 0x8700}, 901 {0x000a, 0x8700},
988/* 374 */ {0x000e, 0x8801}, 902 {0x000e, 0x8801},
989/* 375 */ {0x0004, 0x8800}, 903 {0x0004, 0x8800},
990/* 377 */ {0x0005, 0x8801}, 904 {0x0005, 0x8801},
991/* 378 */ {0x0047, 0x8800}, 905 {0x0047, 0x8800},
992/* 380 */ {0x0006, 0x8801}, 906 {0x0006, 0x8801},
993/* 381 */ {0x0000, 0x8800}, 907 {0x0000, 0x8800},
994/* 383 */ {0x0007, 0x8801}, 908 {0x0007, 0x8801},
995/* 384 */ {0x00c0, 0x8800}, 909 {0x00c0, 0x8800},
996/* 386 */ {0x0008, 0x8801}, 910 {0x0008, 0x8801},
997/* 387 */ {0x0003, 0x8800}, 911 {0x0003, 0x8800},
998/* 389 */ {0x0013, 0x8801}, 912 {0x0013, 0x8801},
999/* 390 */ {0x0001, 0x8800}, 913 {0x0001, 0x8800},
1000/* 392 */ {0x0009, 0x8801}, 914 {0x0009, 0x8801},
1001/* 393 */ {0x0000, 0x8800}, 915 {0x0000, 0x8800},
1002/* 395 */ {0x000a, 0x8801}, 916 {0x000a, 0x8801},
1003/* 396 */ {0x0000, 0x8800}, 917 {0x0000, 0x8800},
1004/* 398 */ {0x000b, 0x8801}, 918 {0x000b, 0x8801},
1005/* 399 */ {0x0000, 0x8800}, 919 {0x0000, 0x8800},
1006/* 401 */ {0x000c, 0x8801}, 920 {0x000c, 0x8801},
1007/* 402 */ {0x0000, 0x8800}, 921 {0x0000, 0x8800},
1008/* 404 */ {0x000e, 0x8801}, 922 {0x000e, 0x8801},
1009/* 405 */ {0x0004, 0x8800}, 923 {0x0004, 0x8800},
1010/* 407 */ {0x000f, 0x8801}, 924 {0x000f, 0x8801},
1011/* 408 */ {0x0000, 0x8800}, 925 {0x0000, 0x8800},
1012/* 410 */ {0x0010, 0x8801}, 926 {0x0010, 0x8801},
1013/* 411 */ {0x0006, 0x8800}, 927 {0x0006, 0x8800},
1014/* 413 */ {0x0011, 0x8801}, 928 {0x0011, 0x8801},
1015/* 414 */ {0x0006, 0x8800}, 929 {0x0006, 0x8800},
1016/* 416 */ {0x0012, 0x8801}, 930 {0x0012, 0x8801},
1017/* 417 */ {0x0000, 0x8800}, 931 {0x0000, 0x8800},
1018/* 419 */ {0x0013, 0x8801}, 932 {0x0013, 0x8801},
1019/* 420 */ {0x0001, 0x8800}, 933 {0x0001, 0x8800},
1020/* 422 */ {0x000a, 0x8700}, 934 {0x000a, 0x8700},
1021/* 423 */ {0x0000, 0x8702}, 935 {0x0000, 0x8702},
1022/* 424 */ {0x0000, 0x8703}, 936 {0x0000, 0x8703},
1023/* 425 */ {0x00c2, 0x8704}, 937 {0x00c2, 0x8704},
1024/* 426 */ {0x0001, 0x870c}, 938 {0x0001, 0x870c},
1025/* 427 */ {0x0044, 0x8600}, 939 {0x0044, 0x8600},
1026/* 428 */ {0x0002, 0x8606}, 940 {0x0002, 0x8606},
1027/* 429 */ {0x0064, 0x8607}, 941 {0x0064, 0x8607},
1028/* 430 */ {0x003a, 0x8601}, 942 {0x003a, 0x8601},
1029/* 431 */ {0x0008, 0x8602}, 943 {0x0008, 0x8602},
1030/* 432 */ {0x0044, 0x8600}, 944 {0x0044, 0x8600},
1031/* 433 */ {0x0018, 0x8617}, 945 {0x0018, 0x8617},
1032/* 434 */ {0x0008, 0x8618}, 946 {0x0008, 0x8618},
1033/* 435 */ {0x00a1, 0x8656}, 947 {0x00a1, 0x8656},
1034/* 436 */ {0x0004, 0x865b}, 948 {0x0004, 0x865b},
1035/* 437 */ {0x0002, 0x865c}, 949 {0x0002, 0x865c},
1036/* 438 */ {0x0058, 0x865d}, 950 {0x0058, 0x865d},
1037/* 439 */ {0x0048, 0x865e}, 951 {0x0048, 0x865e},
1038/* 440 */ {0x0012, 0x8608}, 952 {0x0012, 0x8608},
1039/* 441 */ {0x002c, 0x8609}, 953 {0x002c, 0x8609},
1040/* 442 */ {0x0002, 0x860a}, 954 {0x0002, 0x860a},
1041/* 443 */ {0x002c, 0x860b}, 955 {0x002c, 0x860b},
1042/* 444 */ {0x00db, 0x860c}, 956 {0x00db, 0x860c},
1043/* 445 */ {0x00f9, 0x860d}, 957 {0x00f9, 0x860d},
1044/* 446 */ {0x00f1, 0x860e}, 958 {0x00f1, 0x860e},
1045/* 447 */ {0x00e3, 0x860f}, 959 {0x00e3, 0x860f},
1046/* 448 */ {0x002c, 0x8610}, 960 {0x002c, 0x8610},
1047/* 449 */ {0x006c, 0x8651}, 961 {0x006c, 0x8651},
1048/* 450 */ {0x0041, 0x8652}, 962 {0x0041, 0x8652},
1049/* 451 */ {0x0059, 0x8653}, 963 {0x0059, 0x8653},
1050/* 452 */ {0x0040, 0x8654}, 964 {0x0040, 0x8654},
1051/* 453 */ {0x00fa, 0x8611}, 965 {0x00fa, 0x8611},
1052/* 454 */ {0x00ff, 0x8612}, 966 {0x00ff, 0x8612},
1053/* 455 */ {0x00f8, 0x8613}, 967 {0x00f8, 0x8613},
1054/* 456 */ {0x0000, 0x8614}, 968 {0x0000, 0x8614},
1055/* 457 */ {0x0001, 0x863f}, 969 {0x0001, 0x863f},
1056/* 458 */ {0x0000, 0x8640}, 970 {0x0000, 0x8640},
1057/* 459 */ {0x0026, 0x8641}, 971 {0x0026, 0x8641},
1058/* 460 */ {0x0045, 0x8642}, 972 {0x0045, 0x8642},
1059/* 461 */ {0x0060, 0x8643}, 973 {0x0060, 0x8643},
1060/* 462 */ {0x0075, 0x8644}, 974 {0x0075, 0x8644},
1061/* 463 */ {0x0088, 0x8645}, 975 {0x0088, 0x8645},
1062/* 464 */ {0x009b, 0x8646}, 976 {0x009b, 0x8646},
1063/* 465 */ {0x00b0, 0x8647}, 977 {0x00b0, 0x8647},
1064/* 466 */ {0x00c5, 0x8648}, 978 {0x00c5, 0x8648},
1065/* 467 */ {0x00d2, 0x8649}, 979 {0x00d2, 0x8649},
1066/* 468 */ {0x00dc, 0x864a}, 980 {0x00dc, 0x864a},
1067/* 469 */ {0x00e5, 0x864b}, 981 {0x00e5, 0x864b},
1068/* 470 */ {0x00eb, 0x864c}, 982 {0x00eb, 0x864c},
1069/* 471 */ {0x00f0, 0x864d}, 983 {0x00f0, 0x864d},
1070/* 472 */ {0x00f6, 0x864e}, 984 {0x00f6, 0x864e},
1071/* 473 */ {0x00fa, 0x864f}, 985 {0x00fa, 0x864f},
1072/* 474 */ {0x00ff, 0x8650}, 986 {0x00ff, 0x8650},
1073/* 475 */ {0x0060, 0x8657}, 987 {0x0060, 0x8657},
1074/* 476 */ {0x0010, 0x8658}, 988 {0x0010, 0x8658},
1075/* 477 */ {0x0018, 0x8659}, 989 {0x0018, 0x8659},
1076/* 478 */ {0x0005, 0x865a}, 990 {0x0005, 0x865a},
1077/* 479 */ {0x0018, 0x8660}, 991 {0x0018, 0x8660},
1078/* 480 */ {0x0003, 0x8509}, 992 {0x0003, 0x8509},
1079/* 481 */ {0x0011, 0x850a}, 993 {0x0011, 0x850a},
1080/* 482 */ {0x0032, 0x850b}, 994 {0x0032, 0x850b},
1081/* 483 */ {0x0010, 0x850c}, 995 {0x0010, 0x850c},
1082/* 484 */ {0x0021, 0x850d}, 996 {0x0021, 0x850d},
1083/* 485 */ {0x0001, 0x8500}, 997 {0x0001, 0x8500},
1084/* 486 */ {0x0000, 0x8508}, 998 {0x0000, 0x8508},
1085 999
1086/* 487 */ {0x0012, 0x8608}, 1000 {0x0012, 0x8608},
1087/* 488 */ {0x002c, 0x8609}, 1001 {0x002c, 0x8609},
1088/* 489 */ {0x0002, 0x860a}, 1002 {0x0002, 0x860a},
1089/* 490 */ {0x0039, 0x860b}, 1003 {0x0039, 0x860b},
1090/* 491 */ {0x00d0, 0x860c}, 1004 {0x00d0, 0x860c},
1091/* 492 */ {0x00f7, 0x860d}, 1005 {0x00f7, 0x860d},
1092/* 493 */ {0x00ed, 0x860e}, 1006 {0x00ed, 0x860e},
1093/* 494 */ {0x00db, 0x860f}, 1007 {0x00db, 0x860f},
1094/* 495 */ {0x0039, 0x8610}, 1008 {0x0039, 0x8610},
1095/* 496 */ {0x0012, 0x8657}, 1009 {0x0012, 0x8657},
1096/* 497 */ {0x0064, 0x8619}, 1010 {0x0064, 0x8619},
1097 1011
1098/* This line starts it all, it is not needed here */ 1012/* This line starts it all, it is not needed here */
1099/* since it has been build into the driver */ 1013/* since it has been build into the driver */
1100/* jfm: don't start now */ 1014/* jfm: don't start now */
1101/* 590 * {0x0030, 0x8112}, */ 1015/* {0x0030, 0x8112}, */
1102 {} 1016 {}
1103}; 1017};
1104 1018
@@ -1109,14 +1023,14 @@ static const u16 spca508_vista_init_data[][2] = {
1109 {0x0008, 0x8200}, /* Clear register */ 1023 {0x0008, 0x8200}, /* Clear register */
1110 {0x0000, 0x870b}, /* Reset CTL3 */ 1024 {0x0000, 0x870b}, /* Reset CTL3 */
1111 {0x0020, 0x8112}, /* Video Drop packet enable */ 1025 {0x0020, 0x8112}, /* Video Drop packet enable */
1112 {0x0003, 0x8111}, /* Soft Reset compression, memory, TG & CDSP */ 1026 {0x0003, 0x8111}, /* Soft Reset compression, memory, TG & CDSP */
1113 {0x0000, 0x8110}, /* Disable everything */ 1027 {0x0000, 0x8110}, /* Disable everything */
1114 {0x0000, 0x8114}, /* Software GPIO output data */ 1028 {0x0000, 0x8114}, /* Software GPIO output data */
1115 {0x0000, 0x8114}, 1029 {0x0000, 0x8114},
1116 1030
1117 {0x0003, 0x8111}, 1031 {0x0003, 0x8111},
1118 {0x0000, 0x8111}, 1032 {0x0000, 0x8111},
1119 {0x0090, 0x8110}, /* Enable: SSI output, External 2X clock output */ 1033 {0x0090, 0x8110}, /* Enable: SSI output, External 2X clock output */
1120 {0x0020, 0x8112}, 1034 {0x0020, 0x8112},
1121 {0x0000, 0x8114}, 1035 {0x0000, 0x8114},
1122 {0x0001, 0x8114}, 1036 {0x0001, 0x8114},
@@ -1129,191 +1043,143 @@ static const u16 spca508_vista_init_data[][2] = {
1129 {0x00ba, 0x8804}, /* SSI Slave address */ 1043 {0x00ba, 0x8804}, /* SSI Slave address */
1130 {0x0010, 0x8802}, /* 93.75kHz SSI Clock Two DataByte */ 1044 {0x0010, 0x8802}, /* 93.75kHz SSI Clock Two DataByte */
1131 1045
1132 /* READ { 0, 0x0001, 0x8803 } -> 1046 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1133 0000: 00 */ 1047 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1134 /* READ { 0, 0x0001, 0x8802 } ->
1135 0000: 10 */
1136 {0x0010, 0x8802}, /* Will write 2 bytes (DATA1+DATA2) */ 1048 {0x0010, 0x8802}, /* Will write 2 bytes (DATA1+DATA2) */
1137 {0x0020, 0x8801}, /* Register address for SSI read/write */ 1049 {0x0020, 0x8801}, /* Register address for SSI read/write */
1138 {0x0044, 0x8805}, /* DATA2 */ 1050 {0x0044, 0x8805}, /* DATA2 */
1139 {0x0004, 0x8800}, /* DATA1 -> write triggered */ 1051 {0x0004, 0x8800}, /* DATA1 -> write triggered */
1140 /* READ { 0, 0x0001, 0x8803 } -> 1052 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1141 0000: 00 */
1142 1053
1143 /* READ { 0, 0x0001, 0x8803 } -> 1054 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1144 0000: 00 */ 1055 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1145 /* READ { 0, 0x0001, 0x8802 } ->
1146 0000: 10 */
1147 {0x0010, 0x8802}, 1056 {0x0010, 0x8802},
1148 {0x0009, 0x8801}, 1057 {0x0009, 0x8801},
1149 {0x0042, 0x8805}, 1058 {0x0042, 0x8805},
1150 {0x0001, 0x8800}, 1059 {0x0001, 0x8800},
1151 /* READ { 0, 0x0001, 0x8803 } -> 1060 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1152 0000: 00 */
1153 1061
1154 /* READ { 0, 0x0001, 0x8803 } -> 1062 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1155 0000: 00 */ 1063 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1156 /* READ { 0, 0x0001, 0x8802 } ->
1157 0000: 10 */
1158 {0x0010, 0x8802}, 1064 {0x0010, 0x8802},
1159 {0x003c, 0x8801}, 1065 {0x003c, 0x8801},
1160 {0x0001, 0x8805}, 1066 {0x0001, 0x8805},
1161 {0x0000, 0x8800}, 1067 {0x0000, 0x8800},
1162 /* READ { 0, 0x0001, 0x8803 } -> 1068 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1163 0000: 00 */
1164 1069
1165 /* READ { 0, 0x0001, 0x8803 } -> 1070 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1166 0000: 00 */ 1071 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1167 /* READ { 0, 0x0001, 0x8802 } ->
1168 0000: 10 */
1169 {0x0010, 0x8802}, 1072 {0x0010, 0x8802},
1170 {0x0001, 0x8801}, 1073 {0x0001, 0x8801},
1171 {0x000a, 0x8805}, 1074 {0x000a, 0x8805},
1172 {0x0000, 0x8800}, 1075 {0x0000, 0x8800},
1173 /* READ { 0, 0x0001, 0x8803 } -> 1076 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1174 0000: 00 */
1175 1077
1176 /* READ { 0, 0x0001, 0x8803 } -> 1078 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1177 0000: 00 */ 1079 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1178 /* READ { 0, 0x0001, 0x8802 } ->
1179 0000: 10 */
1180 {0x0010, 0x8802}, 1080 {0x0010, 0x8802},
1181 {0x0002, 0x8801}, 1081 {0x0002, 0x8801},
1182 {0x0000, 0x8805}, 1082 {0x0000, 0x8805},
1183 {0x0000, 0x8800}, 1083 {0x0000, 0x8800},
1184 /* READ { 0, 0x0001, 0x8803 } -> 1084 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1185 0000: 00 */
1186 1085
1187 /* READ { 0, 0x0001, 0x8803 } -> 1086 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1188 0000: 00 */ 1087 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1189 /* READ { 0, 0x0001, 0x8802 } ->
1190 0000: 10 */
1191 {0x0010, 0x8802}, 1088 {0x0010, 0x8802},
1192 {0x0003, 0x8801}, 1089 {0x0003, 0x8801},
1193 {0x0027, 0x8805}, 1090 {0x0027, 0x8805},
1194 {0x0001, 0x8800}, 1091 {0x0001, 0x8800},
1195 /* READ { 0, 0x0001, 0x8803 } -> 1092 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1196 0000: 00 */
1197 1093
1198 /* READ { 0, 0x0001, 0x8803 } -> 1094 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1199 0000: 00 */ 1095 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1200 /* READ { 0, 0x0001, 0x8802 } ->
1201 0000: 10 */
1202 {0x0010, 0x8802}, 1096 {0x0010, 0x8802},
1203 {0x0004, 0x8801}, 1097 {0x0004, 0x8801},
1204 {0x0065, 0x8805}, 1098 {0x0065, 0x8805},
1205 {0x0001, 0x8800}, 1099 {0x0001, 0x8800},
1206 /* READ { 0, 0x0001, 0x8803 } -> 1100 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1207 0000: 00 */
1208 1101
1209 /* READ { 0, 0x0001, 0x8803 } -> 1102 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1210 0000: 00 */ 1103 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1211 /* READ { 0, 0x0001, 0x8802 } ->
1212 0000: 10 */
1213 {0x0010, 0x8802}, 1104 {0x0010, 0x8802},
1214 {0x0005, 0x8801}, 1105 {0x0005, 0x8801},
1215 {0x0003, 0x8805}, 1106 {0x0003, 0x8805},
1216 {0x0000, 0x8800}, 1107 {0x0000, 0x8800},
1217 /* READ { 0, 0x0001, 0x8803 } -> 1108 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1218 0000: 00 */
1219 1109
1220 /* READ { 0, 0x0001, 0x8803 } -> 1110 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1221 0000: 00 */ 1111 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1222 /* READ { 0, 0x0001, 0x8802 } ->
1223 0000: 10 */
1224 {0x0010, 0x8802}, 1112 {0x0010, 0x8802},
1225 {0x0006, 0x8801}, 1113 {0x0006, 0x8801},
1226 {0x001c, 0x8805}, 1114 {0x001c, 0x8805},
1227 {0x0000, 0x8800}, 1115 {0x0000, 0x8800},
1228 /* READ { 0, 0x0001, 0x8803 } -> 1116 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1229 0000: 00 */
1230 1117
1231 /* READ { 0, 0x0001, 0x8803 } -> 1118 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1232 0000: 00 */ 1119 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1233 /* READ { 0, 0x0001, 0x8802 } ->
1234 0000: 10 */
1235 {0x0010, 0x8802}, 1120 {0x0010, 0x8802},
1236 {0x0007, 0x8801}, 1121 {0x0007, 0x8801},
1237 {0x002a, 0x8805}, 1122 {0x002a, 0x8805},
1238 {0x0000, 0x8800}, 1123 {0x0000, 0x8800},
1239 /* READ { 0, 0x0001, 0x8803 } -> 1124 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1240 0000: 00 */
1241 1125
1242 /* READ { 0, 0x0001, 0x8803 } -> 1126 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1243 0000: 00 */ 1127 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1244 /* READ { 0, 0x0001, 0x8802 } ->
1245 0000: 10 */
1246 {0x0010, 0x8802}, 1128 {0x0010, 0x8802},
1247 {0x000e, 0x8801}, 1129 {0x000e, 0x8801},
1248 {0x0000, 0x8805}, 1130 {0x0000, 0x8805},
1249 {0x0000, 0x8800}, 1131 {0x0000, 0x8800},
1250 /* READ { 0, 0x0001, 0x8803 } -> 1132 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1251 0000: 00 */
1252 1133
1253 /* READ { 0, 0x0001, 0x8803 } -> 1134 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1254 0000: 00 */ 1135 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1255 /* READ { 0, 0x0001, 0x8802 } ->
1256 0000: 10 */
1257 {0x0010, 0x8802}, 1136 {0x0010, 0x8802},
1258 {0x0028, 0x8801}, 1137 {0x0028, 0x8801},
1259 {0x002e, 0x8805}, 1138 {0x002e, 0x8805},
1260 {0x0000, 0x8800}, 1139 {0x0000, 0x8800},
1261 /* READ { 0, 0x0001, 0x8803 } -> 1140 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1262 0000: 00 */
1263 1141
1264 /* READ { 0, 0x0001, 0x8803 } -> 1142 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1265 0000: 00 */ 1143 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1266 /* READ { 0, 0x0001, 0x8802 } ->
1267 0000: 10 */
1268 {0x0010, 0x8802}, 1144 {0x0010, 0x8802},
1269 {0x0039, 0x8801}, 1145 {0x0039, 0x8801},
1270 {0x0013, 0x8805}, 1146 {0x0013, 0x8805},
1271 {0x0000, 0x8800}, 1147 {0x0000, 0x8800},
1272 /* READ { 0, 0x0001, 0x8803 } -> 1148 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1273 0000: 00 */
1274 1149
1275 /* READ { 0, 0x0001, 0x8803 } -> 1150 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1276 0000: 00 */ 1151 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1277 /* READ { 0, 0x0001, 0x8802 } ->
1278 0000: 10 */
1279 {0x0010, 0x8802}, 1152 {0x0010, 0x8802},
1280 {0x003b, 0x8801}, 1153 {0x003b, 0x8801},
1281 {0x000c, 0x8805}, 1154 {0x000c, 0x8805},
1282 {0x0000, 0x8800}, 1155 {0x0000, 0x8800},
1283 /* READ { 0, 0x0001, 0x8803 } -> 1156 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1284 0000: 00 */
1285 1157
1286 /* READ { 0, 0x0001, 0x8803 } -> 1158 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1287 0000: 00 */ 1159 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1288 /* READ { 0, 0x0001, 0x8802 } ->
1289 0000: 10 */
1290 {0x0010, 0x8802}, 1160 {0x0010, 0x8802},
1291 {0x0035, 0x8801}, 1161 {0x0035, 0x8801},
1292 {0x0028, 0x8805}, 1162 {0x0028, 0x8805},
1293 {0x0000, 0x8800}, 1163 {0x0000, 0x8800},
1294 /* READ { 0, 0x0001, 0x8803 } -> 1164 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1295 0000: 00 */
1296 1165
1297 /* READ { 0, 0x0001, 0x8803 } -> 1166 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1298 0000: 00 */ 1167 /* READ { 0x0001, 0x8802 } -> 0000: 10 */
1299 /* READ { 0, 0x0001, 0x8802 } ->
1300 0000: 10 */
1301 {0x0010, 0x8802}, 1168 {0x0010, 0x8802},
1302 {0x0009, 0x8801}, 1169 {0x0009, 0x8801},
1303 {0x0042, 0x8805}, 1170 {0x0042, 0x8805},
1304 {0x0001, 0x8800}, 1171 {0x0001, 0x8800},
1305 /* READ { 0, 0x0001, 0x8803 } -> 1172 /* READ { 0x0001, 0x8803 } -> 0000: 00 */
1306 0000: 00 */
1307 1173
1308 {0x0050, 0x8703}, 1174 {0x0050, 0x8703},
1309 {0x0002, 0x8704}, /* External input CKIx1 */ 1175 {0x0002, 0x8704}, /* External input CKIx1 */
1310 {0x0001, 0x870c}, /* Select CKOx2 output */ 1176 {0x0001, 0x870c}, /* Select CKOx2 output */
1311 {0x009a, 0x8600}, /* Line memory Read Counter (L) */ 1177 {0x009a, 0x8600}, /* Line memory Read Counter (L) */
1312 {0x0001, 0x8606}, /* 1 Line memory Read Counter (H) Result: (d)410 */ 1178 {0x0001, 0x8606}, /* 1 Line memory Read Counter (H) Result: (d)410 */
1313 {0x0023, 0x8601}, 1179 {0x0023, 0x8601},
1314 {0x0010, 0x8602}, 1180 {0x0010, 0x8602},
1315 {0x000a, 0x8603}, 1181 {0x000a, 0x8603},
1316 {0x009A, 0x8600}, 1182 {0x009a, 0x8600},
1317 {0x0001, 0x865b}, /* 1 Horizontal Offset for Valid Pixel(L) */ 1183 {0x0001, 0x865b}, /* 1 Horizontal Offset for Valid Pixel(L) */
1318 {0x0003, 0x865c}, /* Vertical offset for valid lines (L) */ 1184 {0x0003, 0x865c}, /* Vertical offset for valid lines (L) */
1319 {0x0058, 0x865d}, /* Horizontal valid pixels window (L) */ 1185 {0x0058, 0x865d}, /* Horizontal valid pixels window (L) */
@@ -1329,7 +1195,7 @@ static const u16 spca508_vista_init_data[][2] = {
1329 {0x0005, 0x860a}, /* ... */ 1195 {0x0005, 0x860a}, /* ... */
1330 {0x0025, 0x860b}, 1196 {0x0025, 0x860b},
1331 {0x00e1, 0x860c}, 1197 {0x00e1, 0x860c},
1332 {0x00fa, 0x860D}, 1198 {0x00fa, 0x860d},
1333 {0x00f4, 0x860e}, 1199 {0x00f4, 0x860e},
1334 {0x00e8, 0x860f}, 1200 {0x00e8, 0x860f},
1335 {0x0025, 0x8610}, /* A33 Coef. */ 1201 {0x0025, 0x8610}, /* A33 Coef. */
@@ -1344,11 +1210,12 @@ static const u16 spca508_vista_init_data[][2] = {
1344 {0x0040, 0x8654}, /* Gb gain for white balance (L) */ 1210 {0x0040, 0x8654}, /* Gb gain for white balance (L) */
1345 {0x0001, 0x863f}, /* Enable fixed gamma correction */ 1211 {0x0001, 0x863f}, /* Enable fixed gamma correction */
1346 1212
1347 {0x00a1, 0x8656}, /* Size - Window1: 256x256, Window2: 128x128 */ 1213 {0x00a1, 0x8656}, /* Size - Window1: 256x256, Window2: 128x128,
1348 /* UV division: UV no change, Enable New edge enhancement */ 1214 * UV division: UV no change,
1215 * Enable New edge enhancement */
1349 {0x0018, 0x8657}, /* Edge gain high threshold */ 1216 {0x0018, 0x8657}, /* Edge gain high threshold */
1350 {0x0020, 0x8658}, /* Edge gain low threshold */ 1217 {0x0020, 0x8658}, /* Edge gain low threshold */
1351 {0x000A, 0x8659}, /* Edge bandwidth high threshold */ 1218 {0x000a, 0x8659}, /* Edge bandwidth high threshold */
1352 {0x0005, 0x865a}, /* Edge bandwidth low threshold */ 1219 {0x0005, 0x865a}, /* Edge bandwidth low threshold */
1353 {0x0064, 0x8607}, /* UV filter enable */ 1220 {0x0064, 0x8607}, /* UV filter enable */
1354 1221
@@ -1384,29 +1251,20 @@ static const u16 spca508_vista_init_data[][2] = {
1384 {0x0000, 0x86b4}, 1251 {0x0000, 0x86b4},
1385 {0x001e, 0x8660}, 1252 {0x001e, 0x8660},
1386 1253
1387 /* READ { 0, 0x0000, 0x8608 } -> 1254 /* READ { 0x0000, 0x8608 } -> 0000: 13 */
1388 0000: 13 */ 1255 /* READ { 0x0000, 0x8609 } -> 0000: 28 */
1389 /* READ { 0, 0x0000, 0x8609 } -> 1256 /* READ { 0x0000, 0x8610 } -> 0000: 05 */
1390 0000: 28 */ 1257 /* READ { 0x0000, 0x8611 } -> 0000: 25 */
1391 /* READ { 0, 0x0000, 0x8610 } -> 1258 /* READ { 0x0000, 0x8612 } -> 0000: e1 */
1392 0000: 05 */ 1259 /* READ { 0x0000, 0x8613 } -> 0000: fa */
1393 /* READ { 0, 0x0000, 0x8611 } -> 1260 /* READ { 0x0000, 0x8614 } -> 0000: f4 */
1394 0000: 25 */ 1261 /* READ { 0x0000, 0x8615 } -> 0000: e8 */
1395 /* READ { 0, 0x0000, 0x8612 } -> 1262 /* READ { 0x0000, 0x8616 } -> 0000: 25 */
1396 0000: e1 */
1397 /* READ { 0, 0x0000, 0x8613 } ->
1398 0000: fa */
1399 /* READ { 0, 0x0000, 0x8614 } ->
1400 0000: f4 */
1401 /* READ { 0, 0x0000, 0x8615 } ->
1402 0000: e8 */
1403 /* READ { 0, 0x0000, 0x8616 } ->
1404 0000: 25 */
1405 {} 1263 {}
1406}; 1264};
1407 1265
1408static int reg_write(struct usb_device *dev, 1266static int reg_write(struct usb_device *dev,
1409 __u16 index, __u16 value) 1267 u16 index, u16 value)
1410{ 1268{
1411 int ret; 1269 int ret;
1412 1270
@@ -1425,7 +1283,7 @@ static int reg_write(struct usb_device *dev,
1425/* read 1 byte */ 1283/* read 1 byte */
1426/* returns: negative is error, pos or zero is data */ 1284/* returns: negative is error, pos or zero is data */
1427static int reg_read(struct gspca_dev *gspca_dev, 1285static int reg_read(struct gspca_dev *gspca_dev,
1428 __u16 index) /* wIndex */ 1286 u16 index) /* wIndex */
1429{ 1287{
1430 int ret; 1288 int ret;
1431 1289
@@ -1447,16 +1305,16 @@ static int reg_read(struct gspca_dev *gspca_dev,
1447} 1305}
1448 1306
1449static int write_vector(struct gspca_dev *gspca_dev, 1307static int write_vector(struct gspca_dev *gspca_dev,
1450 const u16 data[][2]) 1308 const u16 (*data)[2])
1451{ 1309{
1452 struct usb_device *dev = gspca_dev->dev; 1310 struct usb_device *dev = gspca_dev->dev;
1453 int ret, i = 0; 1311 int ret;
1454 1312
1455 while (data[i][1] != 0) { 1313 while ((*data)[1] != 0) {
1456 ret = reg_write(dev, data[i][1], data[i][0]); 1314 ret = reg_write(dev, (*data)[1], (*data)[0]);
1457 if (ret < 0) 1315 if (ret < 0)
1458 return ret; 1316 return ret;
1459 i++; 1317 data++;
1460 } 1318 }
1461 return 0; 1319 return 0;
1462} 1320}
@@ -1468,6 +1326,15 @@ static int sd_config(struct gspca_dev *gspca_dev,
1468 struct sd *sd = (struct sd *) gspca_dev; 1326 struct sd *sd = (struct sd *) gspca_dev;
1469 struct cam *cam; 1327 struct cam *cam;
1470 int data1, data2; 1328 int data1, data2;
1329 const u16 (*init_data)[2];
1330 static const u16 (*(init_data_tb[]))[2] = {
1331 spca508_vista_init_data, /* CreativeVista 0 */
1332 spca508_sightcam_init_data, /* HamaUSBSightcam 1 */
1333 spca508_sightcam2_init_data, /* HamaUSBSightcam2 2 */
1334 spca508cs110_init_data, /* IntelEasyPCCamera 3 */
1335 spca508cs110_init_data, /* MicroInnovationIC200 4 */
1336 spca508_init_data, /* ViewQuestVQ110 5 */
1337 };
1471 1338
1472 /* Read from global register the USB product and vendor IDs, just to 1339 /* Read from global register the USB product and vendor IDs, just to
1473 * prove that we can communicate with the device. This works, which 1340 * prove that we can communicate with the device. This works, which
@@ -1491,37 +1358,13 @@ static int sd_config(struct gspca_dev *gspca_dev,
1491 sd->subtype = id->driver_info; 1358 sd->subtype = id->driver_info;
1492 sd->brightness = BRIGHTNESS_DEF; 1359 sd->brightness = BRIGHTNESS_DEF;
1493 1360
1494 switch (sd->subtype) { 1361 init_data = init_data_tb[sd->subtype];
1495 case ViewQuestVQ110: 1362 return write_vector(gspca_dev, init_data);
1496 if (write_vector(gspca_dev, spca508_init_data))
1497 return -1;
1498 break;
1499 default:
1500/* case MicroInnovationIC200: */
1501/* case IntelEasyPCCamera: */
1502 if (write_vector(gspca_dev, spca508cs110_init_data))
1503 return -1;
1504 break;
1505 case HamaUSBSightcam:
1506 if (write_vector(gspca_dev, spca508_sightcam_init_data))
1507 return -1;
1508 break;
1509 case HamaUSBSightcam2:
1510 if (write_vector(gspca_dev, spca508_sightcam2_init_data))
1511 return -1;
1512 break;
1513 case CreativeVista:
1514 if (write_vector(gspca_dev, spca508_vista_init_data))
1515 return -1;
1516 break;
1517 }
1518 return 0; /* success */
1519} 1363}
1520 1364
1521/* this function is called at probe and resume time */ 1365/* this function is called at probe and resume time */
1522static int sd_init(struct gspca_dev *gspca_dev) 1366static int sd_init(struct gspca_dev *gspca_dev)
1523{ 1367{
1524/* write_vector(gspca_dev, spca508_open_data); */
1525 return 0; 1368 return 0;
1526} 1369}
1527 1370
@@ -1529,7 +1372,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
1529{ 1372{
1530 int mode; 1373 int mode;
1531 1374
1532 mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; 1375 mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
1533 reg_write(gspca_dev->dev, 0x8500, mode); 1376 reg_write(gspca_dev->dev, 0x8500, mode);
1534 switch (mode) { 1377 switch (mode) {
1535 case 0: 1378 case 0:
@@ -1554,7 +1397,7 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
1554 1397
1555static void sd_pkt_scan(struct gspca_dev *gspca_dev, 1398static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1556 struct gspca_frame *frame, /* target */ 1399 struct gspca_frame *frame, /* target */
1557 __u8 *data, /* isoc packet */ 1400 u8 *data, /* isoc packet */
1558 int len) /* iso packet length */ 1401 int len) /* iso packet length */
1559{ 1402{
1560 switch (data[0]) { 1403 switch (data[0]) {
@@ -1567,7 +1410,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1567 data, len); 1410 data, len);
1568 break; 1411 break;
1569 case 0xff: /* drop */ 1412 case 0xff: /* drop */
1570/* gspca_dev->last_packet_type = DISCARD_PACKET; */
1571 break; 1413 break;
1572 default: 1414 default:
1573 data += 1; 1415 data += 1;
@@ -1581,7 +1423,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1581static void setbrightness(struct gspca_dev *gspca_dev) 1423static void setbrightness(struct gspca_dev *gspca_dev)
1582{ 1424{
1583 struct sd *sd = (struct sd *) gspca_dev; 1425 struct sd *sd = (struct sd *) gspca_dev;
1584 __u8 brightness = sd->brightness; 1426 u8 brightness = sd->brightness;
1585 1427
1586 /* MX seem contrast */ 1428 /* MX seem contrast */
1587 reg_write(gspca_dev->dev, 0x8651, brightness); 1429 reg_write(gspca_dev->dev, 0x8651, brightness);
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index c99c5e34e211..27e82b35f3e7 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -34,8 +34,8 @@ struct sd {
34 34
35 __u16 exposure; /* rev12a only */ 35 __u16 exposure; /* rev12a only */
36#define EXPOSURE_MIN 1 36#define EXPOSURE_MIN 1
37#define EXPOSURE_DEF 200 37#define EXPOSURE_DEF 700 /* == 10 fps */
38#define EXPOSURE_MAX (4095 - 900) /* see set_exposure */ 38#define EXPOSURE_MAX (2047 + 325) /* see setexposure */
39 39
40 __u8 contrast; /* rev72a only */ 40 __u8 contrast; /* rev72a only */
41#define CONTRAST_MIN 0x00 41#define CONTRAST_MIN 0x00
@@ -48,9 +48,9 @@ struct sd {
48#define BRIGHTNESS_MAX 0x3f 48#define BRIGHTNESS_MAX 0x3f
49 49
50 __u8 white; 50 __u8 white;
51#define WHITE_MIN 1 51#define HUE_MIN 1
52#define WHITE_DEF 0x40 52#define HUE_DEF 0x40
53#define WHITE_MAX 0x7f 53#define HUE_MAX 0x7f
54 54
55 __u8 autogain; 55 __u8 autogain;
56#define AUTOGAIN_MIN 0 56#define AUTOGAIN_MIN 0
@@ -58,9 +58,9 @@ struct sd {
58#define AUTOGAIN_MAX 1 58#define AUTOGAIN_MAX 1
59 59
60 __u8 gain; /* rev12a only */ 60 __u8 gain; /* rev12a only */
61#define GAIN_MIN 0x0 61#define GAIN_MIN 0
62#define GAIN_DEF 0x24 62#define GAIN_DEF 63
63#define GAIN_MAX 0x24 63#define GAIN_MAX 255
64 64
65#define EXPO12A_DEF 3 65#define EXPO12A_DEF 3
66 __u8 expo12a; /* expo/gain? for rev 12a */ 66 __u8 expo12a; /* expo/gain? for rev 12a */
@@ -461,7 +461,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
461 } 461 }
462 sd->brightness = BRIGHTNESS_DEF; 462 sd->brightness = BRIGHTNESS_DEF;
463 sd->contrast = CONTRAST_DEF; 463 sd->contrast = CONTRAST_DEF;
464 sd->white = WHITE_DEF; 464 sd->white = HUE_DEF;
465 sd->exposure = EXPOSURE_DEF; 465 sd->exposure = EXPOSURE_DEF;
466 sd->autogain = AUTOGAIN_DEF; 466 sd->autogain = AUTOGAIN_DEF;
467 sd->gain = GAIN_DEF; 467 sd->gain = GAIN_DEF;
@@ -549,8 +549,7 @@ static void setcontrast(struct gspca_dev *gspca_dev)
549static void setexposure(struct gspca_dev *gspca_dev) 549static void setexposure(struct gspca_dev *gspca_dev)
550{ 550{
551 struct sd *sd = (struct sd *) gspca_dev; 551 struct sd *sd = (struct sd *) gspca_dev;
552 int expo; 552 int i, expo = 0;
553 int clock_divider;
554 553
555 /* Register 0x8309 controls exposure for the spca561, 554 /* Register 0x8309 controls exposure for the spca561,
556 the basic exposure setting goes from 1-2047, where 1 is completely 555 the basic exposure setting goes from 1-2047, where 1 is completely
@@ -564,16 +563,22 @@ static void setexposure(struct gspca_dev *gspca_dev)
564 configure a divider for the base framerate which us used at the 563 configure a divider for the base framerate which us used at the
565 exposure setting of 1-300. These bits configure the base framerate 564 exposure setting of 1-300. These bits configure the base framerate
566 according to the following formula: fps = 60 / (value + 2) */ 565 according to the following formula: fps = 60 / (value + 2) */
567 if (sd->exposure < 2048) { 566
568 expo = sd->exposure; 567 /* We choose to use the high bits setting the fixed framerate divisor
569 clock_divider = 0; 568 asap, as setting high basic exposure setting without the fixed
570 } else { 569 divider in combination with high gains makes the cam stop */
571 /* Add 900 to make the 0 setting of the second part of the 570 int table[] = { 0, 450, 550, 625, EXPOSURE_MAX };
572 exposure equal to the 2047 setting of the first part. */ 571
573 expo = (sd->exposure - 2048) + 900; 572 for (i = 0; i < ARRAY_SIZE(table) - 1; i++) {
574 clock_divider = 3; 573 if (sd->exposure <= table[i + 1]) {
574 expo = sd->exposure - table[i];
575 if (i)
576 expo += 300;
577 expo |= i << 11;
578 break;
579 }
575 } 580 }
576 expo |= clock_divider << 11; 581
577 gspca_dev->usb_buf[0] = expo; 582 gspca_dev->usb_buf[0] = expo;
578 gspca_dev->usb_buf[1] = expo >> 8; 583 gspca_dev->usb_buf[1] = expo >> 8;
579 reg_w_buf(gspca_dev, 0x8309, 2); 584 reg_w_buf(gspca_dev, 0x8309, 2);
@@ -584,7 +589,16 @@ static void setgain(struct gspca_dev *gspca_dev)
584{ 589{
585 struct sd *sd = (struct sd *) gspca_dev; 590 struct sd *sd = (struct sd *) gspca_dev;
586 591
587 gspca_dev->usb_buf[0] = sd->gain; 592 /* gain reg low 6 bits 0-63 gain, bit 6 and 7, both double the
593 sensitivity when set, so 31 + one of them set == 63, and 15
594 with both of them set == 63 */
595 if (sd->gain < 64)
596 gspca_dev->usb_buf[0] = sd->gain;
597 else if (sd->gain < 128)
598 gspca_dev->usb_buf[0] = (sd->gain / 2) | 0x40;
599 else
600 gspca_dev->usb_buf[0] = (sd->gain / 4) | 0xC0;
601
588 gspca_dev->usb_buf[1] = 0; 602 gspca_dev->usb_buf[1] = 0;
589 reg_w_buf(gspca_dev, 0x8335, 2); 603 reg_w_buf(gspca_dev, 0x8335, 2);
590} 604}
@@ -629,8 +643,7 @@ static int sd_start_12a(struct gspca_dev *gspca_dev)
629 reg_w_buf(gspca_dev, 0x8391, 8); 643 reg_w_buf(gspca_dev, 0x8391, 8);
630 reg_w_buf(gspca_dev, 0x8390, 8); 644 reg_w_buf(gspca_dev, 0x8390, 8);
631 setwhite(gspca_dev); 645 setwhite(gspca_dev);
632 setautogain(gspca_dev); 646 setgain(gspca_dev);
633/* setgain(gspca_dev); */
634 setexposure(gspca_dev); 647 setexposure(gspca_dev);
635 return 0; 648 return 0;
636} 649}
@@ -762,18 +775,6 @@ static void do_autogain(struct gspca_dev *gspca_dev)
762 i2c_write(gspca_dev, expotimes | pixelclk, 0x09); 775 i2c_write(gspca_dev, expotimes | pixelclk, 0x09);
763 } 776 }
764 break; 777 break;
765 case Rev012A:
766 reg_r(gspca_dev, 0x8330, 2);
767 if (gspca_dev->usb_buf[1] > 0x08) {
768 gspca_dev->usb_buf[0] = ++sd->expo12a;
769 gspca_dev->usb_buf[1] = 0;
770 reg_w_buf(gspca_dev, 0x8339, 2);
771 } else if (gspca_dev->usb_buf[1] < 0x02) {
772 gspca_dev->usb_buf[0] = --sd->expo12a;
773 gspca_dev->usb_buf[1] = 0;
774 reg_w_buf(gspca_dev, 0x8339, 2);
775 }
776 break;
777 } 778 }
778} 779}
779 780
@@ -928,13 +929,13 @@ static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val)
928static struct ctrl sd_ctrls_12a[] = { 929static struct ctrl sd_ctrls_12a[] = {
929 { 930 {
930 { 931 {
931 .id = V4L2_CID_DO_WHITE_BALANCE, 932 .id = V4L2_CID_HUE,
932 .type = V4L2_CTRL_TYPE_INTEGER, 933 .type = V4L2_CTRL_TYPE_INTEGER,
933 .name = "White Balance", 934 .name = "Hue",
934 .minimum = WHITE_MIN, 935 .minimum = HUE_MIN,
935 .maximum = WHITE_MAX, 936 .maximum = HUE_MAX,
936 .step = 1, 937 .step = 1,
937 .default_value = WHITE_DEF, 938 .default_value = HUE_DEF,
938 }, 939 },
939 .set = sd_setwhite, 940 .set = sd_setwhite,
940 .get = sd_getwhite, 941 .get = sd_getwhite,
@@ -954,19 +955,6 @@ static struct ctrl sd_ctrls_12a[] = {
954 }, 955 },
955 { 956 {
956 { 957 {
957 .id = V4L2_CID_AUTOGAIN,
958 .type = V4L2_CTRL_TYPE_BOOLEAN,
959 .name = "Auto Gain",
960 .minimum = AUTOGAIN_MIN,
961 .maximum = AUTOGAIN_MAX,
962 .step = 1,
963 .default_value = AUTOGAIN_DEF,
964 },
965 .set = sd_setautogain,
966 .get = sd_getautogain,
967 },
968 {
969 {
970 .id = V4L2_CID_GAIN, 958 .id = V4L2_CID_GAIN,
971 .type = V4L2_CTRL_TYPE_INTEGER, 959 .type = V4L2_CTRL_TYPE_INTEGER,
972 .name = "Gain", 960 .name = "Gain",
@@ -983,13 +971,13 @@ static struct ctrl sd_ctrls_12a[] = {
983static struct ctrl sd_ctrls_72a[] = { 971static struct ctrl sd_ctrls_72a[] = {
984 { 972 {
985 { 973 {
986 .id = V4L2_CID_DO_WHITE_BALANCE, 974 .id = V4L2_CID_HUE,
987 .type = V4L2_CTRL_TYPE_INTEGER, 975 .type = V4L2_CTRL_TYPE_INTEGER,
988 .name = "White Balance", 976 .name = "Hue",
989 .minimum = WHITE_MIN, 977 .minimum = HUE_MIN,
990 .maximum = WHITE_MAX, 978 .maximum = HUE_MAX,
991 .step = 1, 979 .step = 1,
992 .default_value = WHITE_DEF, 980 .default_value = HUE_DEF,
993 }, 981 },
994 .set = sd_setwhite, 982 .set = sd_setwhite,
995 .get = sd_getwhite, 983 .get = sd_getwhite,
@@ -1046,7 +1034,6 @@ static const struct sd_desc sd_desc_12a = {
1046 .stopN = sd_stopN, 1034 .stopN = sd_stopN,
1047 .stop0 = sd_stop0, 1035 .stop0 = sd_stop0,
1048 .pkt_scan = sd_pkt_scan, 1036 .pkt_scan = sd_pkt_scan,
1049/* .dq_callback = do_autogain, * fixme */
1050}; 1037};
1051static const struct sd_desc sd_desc_72a = { 1038static const struct sd_desc sd_desc_72a = {
1052 .name = MODULE_NAME, 1039 .name = MODULE_NAME,
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index 2e1cdf068fda..715a68f0156e 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -309,6 +309,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
309 struct sd *dev = (struct sd *) gspca_dev; 309 struct sd *dev = (struct sd *) gspca_dev;
310 310
311 /* We don't use the buffer gspca allocates so make it small. */ 311 /* We don't use the buffer gspca allocates so make it small. */
312 cam->bulk = 1;
312 cam->bulk_size = 64; 313 cam->bulk_size = 64;
313 314
314 INIT_WORK(&dev->work_struct, sq905_dostream); 315 INIT_WORK(&dev->work_struct, sq905_dostream);
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index 0bcb74a1b143..916892505432 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -206,6 +206,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
206 cam->nmodes = 1; 206 cam->nmodes = 1;
207 /* We don't use the buffer gspca allocates so make it small. */ 207 /* We don't use the buffer gspca allocates so make it small. */
208 cam->bulk_size = 32; 208 cam->bulk_size = 32;
209 cam->bulk = 1;
209 INIT_WORK(&dev->work_struct, sq905c_dostream); 210 INIT_WORK(&dev->work_struct, sq905c_dostream);
210 return 0; 211 return 0;
211} 212}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index 9dff2e65b116..e573c3406324 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -293,8 +293,6 @@ static void stv06xx_stopN(struct gspca_dev *gspca_dev)
293 goto out; 293 goto out;
294 294
295 err = sd->sensor->stop(sd); 295 err = sd->sensor->stop(sd);
296 if (err < 0)
297 goto out;
298 296
299out: 297out:
300 if (err < 0) 298 if (err < 0)
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
index 69c77c932fc0..11a0c002f5dc 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c
@@ -80,12 +80,26 @@ static const struct ctrl vv6410_ctrl[] = {
80 .minimum = 0, 80 .minimum = 0,
81 .maximum = 15, 81 .maximum = 15,
82 .step = 1, 82 .step = 1,
83 .default_value = 0 83 .default_value = 10
84 }, 84 },
85 .set = vv6410_set_analog_gain, 85 .set = vv6410_set_analog_gain,
86 .get = vv6410_get_analog_gain 86 .get = vv6410_get_analog_gain
87 },
88#define EXPOSURE_IDX 3
89 {
90 {
91 .id = V4L2_CID_EXPOSURE,
92 .type = V4L2_CTRL_TYPE_INTEGER,
93 .name = "exposure",
94 .minimum = 0,
95 .maximum = 32768,
96 .step = 1,
97 .default_value = 20000
98 },
99 .set = vv6410_set_exposure,
100 .get = vv6410_get_exposure
87 } 101 }
88}; 102 };
89 103
90static int vv6410_probe(struct sd *sd) 104static int vv6410_probe(struct sd *sd)
91{ 105{
@@ -121,6 +135,7 @@ static int vv6410_probe(struct sd *sd)
121static int vv6410_init(struct sd *sd) 135static int vv6410_init(struct sd *sd)
122{ 136{
123 int err = 0, i; 137 int err = 0, i;
138 s32 *sensor_settings = sd->sensor_priv;
124 139
125 for (i = 0; i < ARRAY_SIZE(stv_bridge_init); i++) { 140 for (i = 0; i < ARRAY_SIZE(stv_bridge_init); i++) {
126 /* if NULL then len contains single value */ 141 /* if NULL then len contains single value */
@@ -142,6 +157,16 @@ static int vv6410_init(struct sd *sd)
142 157
143 err = stv06xx_write_sensor_bytes(sd, (u8 *) vv6410_sensor_init, 158 err = stv06xx_write_sensor_bytes(sd, (u8 *) vv6410_sensor_init,
144 ARRAY_SIZE(vv6410_sensor_init)); 159 ARRAY_SIZE(vv6410_sensor_init));
160 if (err < 0)
161 return err;
162
163 err = vv6410_set_exposure(&sd->gspca_dev,
164 sensor_settings[EXPOSURE_IDX]);
165 if (err < 0)
166 return err;
167
168 err = vv6410_set_analog_gain(&sd->gspca_dev,
169 sensor_settings[GAIN_IDX]);
145 170
146 return (err < 0) ? err : 0; 171 return (err < 0) ? err : 0;
147} 172}
@@ -318,3 +343,50 @@ static int vv6410_set_analog_gain(struct gspca_dev *gspca_dev, __s32 val)
318 343
319 return (err < 0) ? err : 0; 344 return (err < 0) ? err : 0;
320} 345}
346
347static int vv6410_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
348{
349 struct sd *sd = (struct sd *) gspca_dev;
350 s32 *sensor_settings = sd->sensor_priv;
351
352 *val = sensor_settings[EXPOSURE_IDX];
353
354 PDEBUG(D_V4L2, "Read exposure %d", *val);
355
356 return 0;
357}
358
359static int vv6410_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
360{
361 int err;
362 struct sd *sd = (struct sd *) gspca_dev;
363 s32 *sensor_settings = sd->sensor_priv;
364 unsigned int fine, coarse;
365
366 sensor_settings[EXPOSURE_IDX] = val;
367
368 val = (val * val >> 14) + val / 4;
369
370 fine = val % VV6410_CIF_LINELENGTH;
371 coarse = min(512, val / VV6410_CIF_LINELENGTH);
372
373 PDEBUG(D_V4L2, "Set coarse exposure to %d, fine expsure to %d",
374 coarse, fine);
375
376 err = stv06xx_write_sensor(sd, VV6410_FINEH, fine >> 8);
377 if (err < 0)
378 goto out;
379
380 err = stv06xx_write_sensor(sd, VV6410_FINEL, fine & 0xff);
381 if (err < 0)
382 goto out;
383
384 err = stv06xx_write_sensor(sd, VV6410_COARSEH, coarse >> 8);
385 if (err < 0)
386 goto out;
387
388 err = stv06xx_write_sensor(sd, VV6410_COARSEL, coarse & 0xff);
389
390out:
391 return err;
392}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
index 95ac55891bd4..487d40555343 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_vv6410.h
@@ -173,6 +173,8 @@
173#define VV6410_SUBSAMPLE 0x01 173#define VV6410_SUBSAMPLE 0x01
174#define VV6410_CROP_TO_QVGA 0x02 174#define VV6410_CROP_TO_QVGA 0x02
175 175
176#define VV6410_CIF_LINELENGTH 415
177
176static int vv6410_probe(struct sd *sd); 178static int vv6410_probe(struct sd *sd);
177static int vv6410_start(struct sd *sd); 179static int vv6410_start(struct sd *sd);
178static int vv6410_init(struct sd *sd); 180static int vv6410_init(struct sd *sd);
@@ -187,6 +189,8 @@ static int vv6410_get_vflip(struct gspca_dev *gspca_dev, __s32 *val);
187static int vv6410_set_vflip(struct gspca_dev *gspca_dev, __s32 val); 189static int vv6410_set_vflip(struct gspca_dev *gspca_dev, __s32 val);
188static int vv6410_get_analog_gain(struct gspca_dev *gspca_dev, __s32 *val); 190static int vv6410_get_analog_gain(struct gspca_dev *gspca_dev, __s32 *val);
189static int vv6410_set_analog_gain(struct gspca_dev *gspca_dev, __s32 val); 191static int vv6410_set_analog_gain(struct gspca_dev *gspca_dev, __s32 val);
192static int vv6410_get_exposure(struct gspca_dev *gspca_dev, __s32 *val);
193static int vv6410_set_exposure(struct gspca_dev *gspca_dev, __s32 val);
190 194
191const struct stv06xx_sensor stv06xx_sensor_vv6410 = { 195const struct stv06xx_sensor stv06xx_sensor_vv6410 = {
192 .name = "ST VV6410", 196 .name = "ST VV6410",
@@ -242,12 +246,6 @@ static const u8 vv6410_sensor_init[][2] = {
242 /* Pre-clock generator divide off */ 246 /* Pre-clock generator divide off */
243 {VV6410_DATAFORMAT, BIT(7) | BIT(0)}, 247 {VV6410_DATAFORMAT, BIT(7) | BIT(0)},
244 248
245 /* Exposure registers */
246 {VV6410_FINEH, VV6410_FINE_EXPOSURE >> 8},
247 {VV6410_FINEL, VV6410_FINE_EXPOSURE & 0xff},
248 {VV6410_COARSEH, VV6410_COARSE_EXPOSURE >> 8},
249 {VV6410_COARSEL, VV6410_COARSE_EXPOSURE & 0xff},
250 {VV6410_ANALOGGAIN, 0xf0 | VV6410_DEFAULT_GAIN},
251 {VV6410_CLKDIV, VV6410_CLK_DIV_2}, 249 {VV6410_CLKDIV, VV6410_CLK_DIV_2},
252 250
253 /* System registers */ 251 /* System registers */
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index c2b8c10c075a..9623f294bdac 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -32,9 +32,6 @@ MODULE_LICENSE("GPL");
32struct sd { 32struct sd {
33 struct gspca_dev gspca_dev; /* !! must be the first item */ 33 struct gspca_dev gspca_dev; /* !! must be the first item */
34 34
35 __u8 packet[ISO_MAX_SIZE + 128];
36 /* !! no more than 128 ff in an ISO packet */
37
38 unsigned char brightness; 35 unsigned char brightness;
39 unsigned char contrast; 36 unsigned char contrast;
40 unsigned char colors; 37 unsigned char colors;
@@ -1103,7 +1100,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1103{ 1100{
1104 struct sd *sd = (struct sd *) gspca_dev; 1101 struct sd *sd = (struct sd *) gspca_dev;
1105 int i, sof = 0; 1102 int i, sof = 0;
1106 unsigned char *s, *d;
1107 static unsigned char ffd9[] = {0xff, 0xd9}; 1103 static unsigned char ffd9[] = {0xff, 0xd9};
1108 1104
1109/* frames are jpeg 4.1.1 without 0xff escape */ 1105/* frames are jpeg 4.1.1 without 0xff escape */
@@ -1177,22 +1173,19 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
1177 } 1173 }
1178 1174
1179 /* add 0x00 after 0xff */ 1175 /* add 0x00 after 0xff */
1180 for (i = len; --i >= 0; ) 1176 i = 0;
1181 if (data[i] == 0xff) 1177 do {
1182 break; 1178 if (data[i] == 0xff) {
1183 if (i < 0) { /* no 0xff */ 1179 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
1184 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len); 1180 data, i + 1);
1185 return; 1181 len -= i;
1186 } 1182 data += i;
1187 s = data; 1183 *data = 0x00;
1188 d = sd->packet; 1184 i = 0;
1189 for (i = 0; i < len; i++) { 1185 }
1190 *d++ = *s++; 1186 i++;
1191 if (s[-1] == 0xff) 1187 } while (i < len);
1192 *d++ = 0x00; 1188 gspca_frame_add(gspca_dev, INTER_PACKET, frame, data, len);
1193 }
1194 gspca_frame_add(gspca_dev, INTER_PACKET, frame,
1195 sd->packet, d - sd->packet);
1196} 1189}
1197 1190
1198static void setbrightness(struct gspca_dev *gspca_dev) 1191static void setbrightness(struct gspca_dev *gspca_dev)
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index f63e37e2e4fd..404214b8cd2b 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -697,7 +697,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
697 return -EINVAL; 697 return -EINVAL;
698 } 698 }
699 699
700 if (sd->sensor != SENSOR_OTHER) { 700 if (sd->sensor == SENSOR_OM6802) {
701 reg_w_buf(gspca_dev, n1, sizeof n1); 701 reg_w_buf(gspca_dev, n1, sizeof n1);
702 i = 5; 702 i = 5;
703 while (--i >= 0) { 703 while (--i >= 0) {
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index e4e933c400bc..26dd155efcc3 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -42,7 +42,7 @@ struct sd {
42 char bridge; 42 char bridge;
43#define BRIDGE_VC0321 0 43#define BRIDGE_VC0321 0
44#define BRIDGE_VC0323 1 44#define BRIDGE_VC0323 1
45 char sensor; 45 u8 sensor;
46#define SENSOR_HV7131R 0 46#define SENSOR_HV7131R 0
47#define SENSOR_MI0360 1 47#define SENSOR_MI0360 1
48#define SENSOR_MI1310_SOC 2 48#define SENSOR_MI1310_SOC 2
@@ -159,17 +159,17 @@ static const struct v4l2_pix_format vc0323_mode[] = {
159 .priv = 2}, 159 .priv = 2},
160}; 160};
161static const struct v4l2_pix_format bi_mode[] = { 161static const struct v4l2_pix_format bi_mode[] = {
162 {320, 240, V4L2_PIX_FMT_YVYU, V4L2_FIELD_NONE, 162 {320, 240, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE,
163 .bytesperline = 320, 163 .bytesperline = 320,
164 .sizeimage = 320 * 240 * 2, 164 .sizeimage = 320 * 240 * 2,
165 .colorspace = V4L2_COLORSPACE_SRGB, 165 .colorspace = V4L2_COLORSPACE_SRGB,
166 .priv = 2}, 166 .priv = 2},
167 {640, 480, V4L2_PIX_FMT_YVYU, V4L2_FIELD_NONE, 167 {640, 480, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE,
168 .bytesperline = 640, 168 .bytesperline = 640,
169 .sizeimage = 640 * 480 * 2, 169 .sizeimage = 640 * 480 * 2,
170 .colorspace = V4L2_COLORSPACE_SRGB, 170 .colorspace = V4L2_COLORSPACE_SRGB,
171 .priv = 1}, 171 .priv = 1},
172 {1280, 1024, V4L2_PIX_FMT_YVYU, V4L2_FIELD_NONE, 172 {1280, 1024, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE,
173 .bytesperline = 1280, 173 .bytesperline = 1280,
174 .sizeimage = 1280 * 1024 * 2, 174 .sizeimage = 1280 * 1024 * 2,
175 .colorspace = V4L2_COLORSPACE_SRGB, 175 .colorspace = V4L2_COLORSPACE_SRGB,
@@ -2453,6 +2453,17 @@ static int sd_config(struct gspca_dev *gspca_dev,
2453 struct usb_device *dev = gspca_dev->dev; 2453 struct usb_device *dev = gspca_dev->dev;
2454 struct cam *cam; 2454 struct cam *cam;
2455 int sensor; 2455 int sensor;
2456 static u8 npkt[] = { /* number of packets per ISOC message */
2457 64, /* HV7131R 0 */
2458 32, /* MI0360 1 */
2459 32, /* MI1310_SOC 2 */
2460 64, /* MI1320 3 */
2461 128, /* MI1320_SOC 4 */
2462 32, /* OV7660 5 */
2463 64, /* OV7670 6 */
2464 128, /* PO1200 7 */
2465 128, /* PO3130NC 8 */
2466 };
2456 2467
2457 cam = &gspca_dev->cam; 2468 cam = &gspca_dev->cam;
2458 sd->bridge = id->driver_info; 2469 sd->bridge = id->driver_info;
@@ -2508,6 +2519,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
2508 case SENSOR_MI1320_SOC: 2519 case SENSOR_MI1320_SOC:
2509 cam->cam_mode = bi_mode; 2520 cam->cam_mode = bi_mode;
2510 cam->nmodes = ARRAY_SIZE(bi_mode); 2521 cam->nmodes = ARRAY_SIZE(bi_mode);
2522 cam->input_flags = V4L2_IN_ST_VFLIP |
2523 V4L2_IN_ST_HFLIP;
2511 break; 2524 break;
2512 default: 2525 default:
2513 cam->cam_mode = vc0323_mode; 2526 cam->cam_mode = vc0323_mode;
@@ -2515,6 +2528,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
2515 break; 2528 break;
2516 } 2529 }
2517 } 2530 }
2531 cam->npkt = npkt[sd->sensor];
2518 2532
2519 sd->hflip = HFLIP_DEF; 2533 sd->hflip = HFLIP_DEF;
2520 sd->vflip = VFLIP_DEF; 2534 sd->vflip = VFLIP_DEF;
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 4fe01d8b6c87..08422d315e68 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -6307,7 +6307,7 @@ static __u16 i2c_read(struct gspca_dev *gspca_dev,
6307 retbyte = reg_r_i(gspca_dev, 0x0091); /* read status */ 6307 retbyte = reg_r_i(gspca_dev, 0x0091); /* read status */
6308 retval = reg_r_i(gspca_dev, 0x0095); /* read Lowbyte */ 6308 retval = reg_r_i(gspca_dev, 0x0095); /* read Lowbyte */
6309 retval |= reg_r_i(gspca_dev, 0x0096) << 8; /* read Hightbyte */ 6309 retval |= reg_r_i(gspca_dev, 0x0096) << 8; /* read Hightbyte */
6310 PDEBUG(D_USBO, "i2c r [%02x] -> %04x (%02x)", 6310 PDEBUG(D_USBI, "i2c r [%02x] -> %04x (%02x)",
6311 reg, retval, retbyte); 6311 reg, retval, retbyte);
6312 return retval; 6312 return retval;
6313} 6313}
@@ -6868,7 +6868,6 @@ static const struct sensor_by_chipset_revision chipset_revision_sensor[] = {
6868 {0x8001, 0x13}, 6868 {0x8001, 0x13},
6869 {0x8000, 0x14}, /* CS2102K */ 6869 {0x8000, 0x14}, /* CS2102K */
6870 {0x8400, 0x15}, /* TAS5130K */ 6870 {0x8400, 0x15}, /* TAS5130K */
6871 {0x4001, 0x16}, /* ADCM2700 */
6872}; 6871};
6873 6872
6874static int vga_3wr_probe(struct gspca_dev *gspca_dev) 6873static int vga_3wr_probe(struct gspca_dev *gspca_dev)
@@ -6904,12 +6903,15 @@ static int vga_3wr_probe(struct gspca_dev *gspca_dev)
6904 retword |= reg_r(gspca_dev, 0x000a); 6903 retword |= reg_r(gspca_dev, 0x000a);
6905 PDEBUG(D_PROBE, "probe 3wr vga 1 0x%04x", retword); 6904 PDEBUG(D_PROBE, "probe 3wr vga 1 0x%04x", retword);
6906 reg_r(gspca_dev, 0x0010); 6905 reg_r(gspca_dev, 0x0010);
6907 /* this is tested only once anyway */ 6906 /* value 0x4001 is meaningless */
6908 for (i = 0; i < ARRAY_SIZE(chipset_revision_sensor); i++) { 6907 if (retword != 0x4001) {
6909 if (chipset_revision_sensor[i].revision == retword) { 6908 for (i = 0; i < ARRAY_SIZE(chipset_revision_sensor); i++) {
6910 sd->chip_revision = retword; 6909 if (chipset_revision_sensor[i].revision == retword) {
6911 send_unknown(dev, SENSOR_PB0330); 6910 sd->chip_revision = retword;
6912 return chipset_revision_sensor[i].internal_sensor_id; 6911 send_unknown(dev, SENSOR_PB0330);
6912 return chipset_revision_sensor[i]
6913 .internal_sensor_id;
6914 }
6913 } 6915 }
6914 } 6916 }
6915 6917
@@ -6980,12 +6982,12 @@ static int vga_3wr_probe(struct gspca_dev *gspca_dev)
6980 reg_w(dev, 0x01, 0x0001); 6982 reg_w(dev, 0x01, 0x0001);
6981 reg_w(dev, 0x03, 0x0012); 6983 reg_w(dev, 0x03, 0x0012);
6982 reg_w(dev, 0x01, 0x0012); 6984 reg_w(dev, 0x01, 0x0012);
6983 reg_w(dev, 0x05, 0x0001); 6985 reg_w(dev, 0x05, 0x0012);
6984 reg_w(dev, 0xd3, 0x008b); 6986 reg_w(dev, 0xd3, 0x008b);
6985 retword = i2c_read(gspca_dev, 0x01); 6987 retword = i2c_read(gspca_dev, 0x01);
6986 if (retword != 0) { 6988 if (retword != 0) {
6987 PDEBUG(D_PROBE, "probe 3wr vga type 0a ? ret: %04x", retword); 6989 PDEBUG(D_PROBE, "probe 3wr vga type 0a ? ret: %04x", retword);
6988 return retword; 6990 return 0x16; /* adcm2700 (6100/6200) */
6989 } 6991 }
6990 return -1; 6992 return -1;
6991} 6993}
diff --git a/drivers/media/video/hexium_gemini.c b/drivers/media/video/hexium_gemini.c
index 8e1463ee1b64..71c211402eb5 100644
--- a/drivers/media/video/hexium_gemini.c
+++ b/drivers/media/video/hexium_gemini.c
@@ -224,7 +224,7 @@ static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
224{ 224{
225 DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index)); 225 DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index));
226 226
227 if (i->index < 0 || i->index >= HEXIUM_INPUTS) 227 if (i->index >= HEXIUM_INPUTS)
228 return -EINVAL; 228 return -EINVAL;
229 229
230 memcpy(i, &hexium_inputs[i->index], sizeof(struct v4l2_input)); 230 memcpy(i, &hexium_inputs[i->index], sizeof(struct v4l2_input));
diff --git a/drivers/media/video/hexium_orion.c b/drivers/media/video/hexium_orion.c
index 2bc39f628455..39d65ca41c62 100644
--- a/drivers/media/video/hexium_orion.c
+++ b/drivers/media/video/hexium_orion.c
@@ -325,7 +325,7 @@ static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
325{ 325{
326 DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index)); 326 DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index));
327 327
328 if (i->index < 0 || i->index >= HEXIUM_INPUTS) 328 if (i->index >= HEXIUM_INPUTS)
329 return -EINVAL; 329 return -EINVAL;
330 330
331 memcpy(i, &hexium_inputs[i->index], sizeof(struct v4l2_input)); 331 memcpy(i, &hexium_inputs[i->index], sizeof(struct v4l2_input));
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 092c7da0f37a..86f2fefe1edf 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -74,7 +74,7 @@ static int get_key_haup_common(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw,
74 int start, range, toggle, dev, code, ircode; 74 int start, range, toggle, dev, code, ircode;
75 75
76 /* poll IR chip */ 76 /* poll IR chip */
77 if (size != i2c_master_recv(&ir->c,buf,size)) 77 if (size != i2c_master_recv(ir->c, buf, size))
78 return -EIO; 78 return -EIO;
79 79
80 /* split rc5 data block ... */ 80 /* split rc5 data block ... */
@@ -137,7 +137,7 @@ static int get_key_pixelview(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
137 unsigned char b; 137 unsigned char b;
138 138
139 /* poll IR chip */ 139 /* poll IR chip */
140 if (1 != i2c_master_recv(&ir->c,&b,1)) { 140 if (1 != i2c_master_recv(ir->c, &b, 1)) {
141 dprintk(1,"read error\n"); 141 dprintk(1,"read error\n");
142 return -EIO; 142 return -EIO;
143 } 143 }
@@ -151,7 +151,7 @@ static int get_key_pv951(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
151 unsigned char b; 151 unsigned char b;
152 152
153 /* poll IR chip */ 153 /* poll IR chip */
154 if (1 != i2c_master_recv(&ir->c,&b,1)) { 154 if (1 != i2c_master_recv(ir->c, &b, 1)) {
155 dprintk(1,"read error\n"); 155 dprintk(1,"read error\n");
156 return -EIO; 156 return -EIO;
157 } 157 }
@@ -171,7 +171,7 @@ static int get_key_fusionhdtv(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
171 unsigned char buf[4]; 171 unsigned char buf[4];
172 172
173 /* poll IR chip */ 173 /* poll IR chip */
174 if (4 != i2c_master_recv(&ir->c,buf,4)) { 174 if (4 != i2c_master_recv(ir->c, buf, 4)) {
175 dprintk(1,"read error\n"); 175 dprintk(1,"read error\n");
176 return -EIO; 176 return -EIO;
177 } 177 }
@@ -195,7 +195,7 @@ static int get_key_knc1(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
195 unsigned char b; 195 unsigned char b;
196 196
197 /* poll IR chip */ 197 /* poll IR chip */
198 if (1 != i2c_master_recv(&ir->c,&b,1)) { 198 if (1 != i2c_master_recv(ir->c, &b, 1)) {
199 dprintk(1,"read error\n"); 199 dprintk(1,"read error\n");
200 return -EIO; 200 return -EIO;
201 } 201 }
@@ -222,12 +222,12 @@ static int get_key_avermedia_cardbus(struct IR_i2c *ir,
222 u32 *ir_key, u32 *ir_raw) 222 u32 *ir_key, u32 *ir_raw)
223{ 223{
224 unsigned char subaddr, key, keygroup; 224 unsigned char subaddr, key, keygroup;
225 struct i2c_msg msg[] = { { .addr = ir->c.addr, .flags = 0, 225 struct i2c_msg msg[] = { { .addr = ir->c->addr, .flags = 0,
226 .buf = &subaddr, .len = 1}, 226 .buf = &subaddr, .len = 1},
227 { .addr = ir->c.addr, .flags = I2C_M_RD, 227 { .addr = ir->c->addr, .flags = I2C_M_RD,
228 .buf = &key, .len = 1} }; 228 .buf = &key, .len = 1} };
229 subaddr = 0x0d; 229 subaddr = 0x0d;
230 if (2 != i2c_transfer(ir->c.adapter, msg, 2)) { 230 if (2 != i2c_transfer(ir->c->adapter, msg, 2)) {
231 dprintk(1, "read error\n"); 231 dprintk(1, "read error\n");
232 return -EIO; 232 return -EIO;
233 } 233 }
@@ -237,7 +237,7 @@ static int get_key_avermedia_cardbus(struct IR_i2c *ir,
237 237
238 subaddr = 0x0b; 238 subaddr = 0x0b;
239 msg[1].buf = &keygroup; 239 msg[1].buf = &keygroup;
240 if (2 != i2c_transfer(ir->c.adapter, msg, 2)) { 240 if (2 != i2c_transfer(ir->c->adapter, msg, 2)) {
241 dprintk(1, "read error\n"); 241 dprintk(1, "read error\n");
242 return -EIO; 242 return -EIO;
243 } 243 }
@@ -286,7 +286,7 @@ static void ir_work(struct work_struct *work)
286 286
287 /* MSI TV@nywhere Plus requires more frequent polling 287 /* MSI TV@nywhere Plus requires more frequent polling
288 otherwise it will miss some keypresses */ 288 otherwise it will miss some keypresses */
289 if (ir->c.adapter->id == I2C_HW_SAA7134 && ir->c.addr == 0x30) 289 if (ir->c->adapter->id == I2C_HW_SAA7134 && ir->c->addr == 0x30)
290 polling_interval = 50; 290 polling_interval = 50;
291 291
292 ir_key_poll(ir); 292 ir_key_poll(ir);
@@ -295,34 +295,15 @@ static void ir_work(struct work_struct *work)
295 295
296/* ----------------------------------------------------------------------- */ 296/* ----------------------------------------------------------------------- */
297 297
298static int ir_attach(struct i2c_adapter *adap, int addr, 298static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
299 unsigned short flags, int kind);
300static int ir_detach(struct i2c_client *client);
301static int ir_probe(struct i2c_adapter *adap);
302
303static struct i2c_driver driver = {
304 .driver = {
305 .name = "ir-kbd-i2c",
306 },
307 .id = I2C_DRIVERID_INFRARED,
308 .attach_adapter = ir_probe,
309 .detach_client = ir_detach,
310};
311
312static struct i2c_client client_template =
313{
314 .name = "unset",
315 .driver = &driver
316};
317
318static int ir_attach(struct i2c_adapter *adap, int addr,
319 unsigned short flags, int kind)
320{ 299{
321 IR_KEYTAB_TYPE *ir_codes = NULL; 300 IR_KEYTAB_TYPE *ir_codes = NULL;
322 char *name; 301 const char *name = NULL;
323 int ir_type; 302 int ir_type;
324 struct IR_i2c *ir; 303 struct IR_i2c *ir;
325 struct input_dev *input_dev; 304 struct input_dev *input_dev;
305 struct i2c_adapter *adap = client->adapter;
306 unsigned short addr = client->addr;
326 int err; 307 int err;
327 308
328 ir = kzalloc(sizeof(struct IR_i2c),GFP_KERNEL); 309 ir = kzalloc(sizeof(struct IR_i2c),GFP_KERNEL);
@@ -332,13 +313,9 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
332 goto err_out_free; 313 goto err_out_free;
333 } 314 }
334 315
335 ir->c = client_template; 316 ir->c = client;
336 ir->input = input_dev; 317 ir->input = input_dev;
337 318 i2c_set_clientdata(client, ir);
338 ir->c.adapter = adap;
339 ir->c.addr = addr;
340
341 i2c_set_clientdata(&ir->c, ir);
342 319
343 switch(addr) { 320 switch(addr) {
344 case 0x64: 321 case 0x64:
@@ -403,44 +380,46 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
403 ir_codes = ir_codes_avermedia_cardbus; 380 ir_codes = ir_codes_avermedia_cardbus;
404 break; 381 break;
405 default: 382 default:
406 /* shouldn't happen */ 383 dprintk(1, DEVNAME ": Unsupported i2c address 0x%02x\n", addr);
407 printk(DEVNAME ": Huh? unknown i2c address (0x%02x)?\n", addr);
408 err = -ENODEV; 384 err = -ENODEV;
409 goto err_out_free; 385 goto err_out_free;
410 } 386 }
411 387
412 /* Sets name */ 388 /* Let the caller override settings */
413 snprintf(ir->c.name, sizeof(ir->c.name), "i2c IR (%s)", name); 389 if (client->dev.platform_data) {
414 ir->ir_codes = ir_codes; 390 const struct IR_i2c_init_data *init_data =
391 client->dev.platform_data;
415 392
416 /* register i2c device 393 ir_codes = init_data->ir_codes;
417 * At device register, IR codes may be changed to be 394 name = init_data->name;
418 * board dependent. 395 ir->get_key = init_data->get_key;
419 */ 396 }
420 err = i2c_attach_client(&ir->c);
421 if (err)
422 goto err_out_free;
423 397
424 /* If IR not supported or disabled, unregisters driver */ 398 /* Make sure we are all setup before going on */
425 if (ir->get_key == NULL) { 399 if (!name || !ir->get_key || !ir_codes) {
400 dprintk(1, DEVNAME ": Unsupported device at address 0x%02x\n",
401 addr);
426 err = -ENODEV; 402 err = -ENODEV;
427 goto err_out_detach; 403 goto err_out_free;
428 } 404 }
429 405
430 /* Phys addr can only be set after attaching (for ir->c.dev) */ 406 /* Sets name */
407 snprintf(ir->name, sizeof(ir->name), "i2c IR (%s)", name);
408 ir->ir_codes = ir_codes;
409
431 snprintf(ir->phys, sizeof(ir->phys), "%s/%s/ir0", 410 snprintf(ir->phys, sizeof(ir->phys), "%s/%s/ir0",
432 dev_name(&ir->c.adapter->dev), 411 dev_name(&adap->dev),
433 dev_name(&ir->c.dev)); 412 dev_name(&client->dev));
434 413
435 /* init + register input device */ 414 /* init + register input device */
436 ir_input_init(input_dev, &ir->ir, ir_type, ir->ir_codes); 415 ir_input_init(input_dev, &ir->ir, ir_type, ir->ir_codes);
437 input_dev->id.bustype = BUS_I2C; 416 input_dev->id.bustype = BUS_I2C;
438 input_dev->name = ir->c.name; 417 input_dev->name = ir->name;
439 input_dev->phys = ir->phys; 418 input_dev->phys = ir->phys;
440 419
441 err = input_register_device(ir->input); 420 err = input_register_device(ir->input);
442 if (err) 421 if (err)
443 goto err_out_detach; 422 goto err_out_free;
444 423
445 printk(DEVNAME ": %s detected at %s [%s]\n", 424 printk(DEVNAME ": %s detected at %s [%s]\n",
446 ir->input->name, ir->input->phys, adap->name); 425 ir->input->name, ir->input->phys, adap->name);
@@ -451,135 +430,42 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
451 430
452 return 0; 431 return 0;
453 432
454 err_out_detach:
455 i2c_detach_client(&ir->c);
456 err_out_free: 433 err_out_free:
457 input_free_device(input_dev); 434 input_free_device(input_dev);
458 kfree(ir); 435 kfree(ir);
459 return err; 436 return err;
460} 437}
461 438
462static int ir_detach(struct i2c_client *client) 439static int ir_remove(struct i2c_client *client)
463{ 440{
464 struct IR_i2c *ir = i2c_get_clientdata(client); 441 struct IR_i2c *ir = i2c_get_clientdata(client);
465 442
466 /* kill outstanding polls */ 443 /* kill outstanding polls */
467 cancel_delayed_work_sync(&ir->work); 444 cancel_delayed_work_sync(&ir->work);
468 445
469 /* unregister devices */ 446 /* unregister device */
470 input_unregister_device(ir->input); 447 input_unregister_device(ir->input);
471 i2c_detach_client(&ir->c);
472 448
473 /* free memory */ 449 /* free memory */
474 kfree(ir); 450 kfree(ir);
475 return 0; 451 return 0;
476} 452}
477 453
478static int ir_probe(struct i2c_adapter *adap) 454static const struct i2c_device_id ir_kbd_id[] = {
479{ 455 /* Generic entry for any IR receiver */
480 456 { "ir_video", 0 },
481 /* The external IR receiver is at i2c address 0x34 (0x35 for 457 /* IR device specific entries could be added here */
482 reads). Future Hauppauge cards will have an internal 458 { }
483 receiver at 0x30 (0x31 for reads). In theory, both can be 459};
484 fitted, and Hauppauge suggest an external overrides an
485 internal.
486
487 That's why we probe 0x1a (~0x34) first. CB
488 */
489
490 static const int probe_bttv[] = { 0x1a, 0x18, 0x4b, 0x64, 0x30, -1};
491 static const int probe_saa7134[] = { 0x7a, 0x47, 0x71, 0x2d, -1 };
492 static const int probe_em28XX[] = { 0x30, 0x47, -1 };
493 static const int probe_cx88[] = { 0x18, 0x6b, 0x71, -1 };
494 static const int probe_cx23885[] = { 0x6b, -1 };
495 const int *probe;
496 struct i2c_msg msg = {
497 .flags = I2C_M_RD,
498 .len = 0,
499 .buf = NULL,
500 };
501 int i, rc;
502
503 switch (adap->id) {
504 case I2C_HW_B_BT848:
505 probe = probe_bttv;
506 break;
507 case I2C_HW_B_CX2341X:
508 probe = probe_bttv;
509 break;
510 case I2C_HW_SAA7134:
511 probe = probe_saa7134;
512 break;
513 case I2C_HW_B_EM28XX:
514 probe = probe_em28XX;
515 break;
516 case I2C_HW_B_CX2388x:
517 probe = probe_cx88;
518 break;
519 case I2C_HW_B_CX23885:
520 probe = probe_cx23885;
521 break;
522 default:
523 return 0;
524 }
525
526 for (i = 0; -1 != probe[i]; i++) {
527 msg.addr = probe[i];
528 rc = i2c_transfer(adap, &msg, 1);
529 dprintk(1,"probe 0x%02x @ %s: %s\n",
530 probe[i], adap->name,
531 (1 == rc) ? "yes" : "no");
532 if (1 == rc) {
533 ir_attach(adap, probe[i], 0, 0);
534 return 0;
535 }
536 }
537
538 /* Special case for MSI TV@nywhere Plus remote */
539 if (adap->id == I2C_HW_SAA7134) {
540 u8 temp;
541
542 /* MSI TV@nywhere Plus controller doesn't seem to
543 respond to probes unless we read something from
544 an existing device. Weird... */
545
546 msg.addr = 0x50;
547 rc = i2c_transfer(adap, &msg, 1);
548 dprintk(1, "probe 0x%02x @ %s: %s\n",
549 msg.addr, adap->name,
550 (1 == rc) ? "yes" : "no");
551
552 /* Now do the probe. The controller does not respond
553 to 0-byte reads, so we use a 1-byte read instead. */
554 msg.addr = 0x30;
555 msg.len = 1;
556 msg.buf = &temp;
557 rc = i2c_transfer(adap, &msg, 1);
558 dprintk(1, "probe 0x%02x @ %s: %s\n",
559 msg.addr, adap->name,
560 (1 == rc) ? "yes" : "no");
561 if (1 == rc)
562 ir_attach(adap, msg.addr, 0, 0);
563 }
564
565 /* Special case for AVerMedia Cardbus remote */
566 if (adap->id == I2C_HW_SAA7134) {
567 unsigned char subaddr, data;
568 struct i2c_msg msg[] = { { .addr = 0x40, .flags = 0,
569 .buf = &subaddr, .len = 1},
570 { .addr = 0x40, .flags = I2C_M_RD,
571 .buf = &data, .len = 1} };
572 subaddr = 0x0d;
573 rc = i2c_transfer(adap, msg, 2);
574 dprintk(1, "probe 0x%02x/0x%02x @ %s: %s\n",
575 msg[0].addr, subaddr, adap->name,
576 (2 == rc) ? "yes" : "no");
577 if (2 == rc)
578 ir_attach(adap, msg[0].addr, 0, 0);
579 }
580 460
581 return 0; 461static struct i2c_driver driver = {
582} 462 .driver = {
463 .name = "ir-kbd-i2c",
464 },
465 .probe = ir_probe,
466 .remove = ir_remove,
467 .id_table = ir_kbd_id,
468};
583 469
584/* ----------------------------------------------------------------------- */ 470/* ----------------------------------------------------------------------- */
585 471
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index db2ac9a99acd..558f8a837ff4 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -455,7 +455,7 @@ static void ivtv_process_eeprom(struct ivtv *itv)
455 break; 455 break;
456 } 456 }
457 if (tv.tuner_type == TUNER_ABSENT) 457 if (tv.tuner_type == TUNER_ABSENT)
458 IVTV_ERR("tveeprom cannot autodetect tuner!"); 458 IVTV_ERR("tveeprom cannot autodetect tuner!\n");
459 459
460 if (itv->options.tuner == -1) 460 if (itv->options.tuner == -1)
461 itv->options.tuner = tv.tuner_type; 461 itv->options.tuner = tv.tuner_type;
@@ -946,17 +946,14 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
946 if (itv == NULL) 946 if (itv == NULL)
947 return -ENOMEM; 947 return -ENOMEM;
948 itv->pdev = pdev; 948 itv->pdev = pdev;
949 itv->instance = atomic_inc_return(&ivtv_instance) - 1; 949 itv->instance = v4l2_device_set_name(&itv->v4l2_dev, "ivtv",
950 &ivtv_instance);
950 951
951 retval = v4l2_device_register(&pdev->dev, &itv->v4l2_dev); 952 retval = v4l2_device_register(&pdev->dev, &itv->v4l2_dev);
952 if (retval) { 953 if (retval) {
953 kfree(itv); 954 kfree(itv);
954 return retval; 955 return retval;
955 } 956 }
956 /* "ivtv + PCI ID" is a bit of a mouthful, so use
957 "ivtv + instance" instead. */
958 snprintf(itv->v4l2_dev.name, sizeof(itv->v4l2_dev.name),
959 "ivtv%d", itv->instance);
960 IVTV_INFO("Initializing card %d\n", itv->instance); 957 IVTV_INFO("Initializing card %d\n", itv->instance);
961 958
962 ivtv_process_options(itv); 959 ivtv_process_options(itv);
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index 9e3d32b8004c..e52aa322b134 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -579,9 +579,11 @@ static struct i2c_client ivtv_i2c_client_template = {
579 .name = "ivtv internal", 579 .name = "ivtv internal",
580}; 580};
581 581
582/* init + register i2c algo-bit adapter */ 582/* init + register i2c adapter + instantiate IR receiver */
583int init_ivtv_i2c(struct ivtv *itv) 583int init_ivtv_i2c(struct ivtv *itv)
584{ 584{
585 int retval;
586
585 IVTV_DEBUG_I2C("i2c init\n"); 587 IVTV_DEBUG_I2C("i2c init\n");
586 588
587 /* Sanity checks for the I2C hardware arrays. They must be the 589 /* Sanity checks for the I2C hardware arrays. They must be the
@@ -619,9 +621,37 @@ int init_ivtv_i2c(struct ivtv *itv)
619 ivtv_setsda(itv, 1); 621 ivtv_setsda(itv, 1);
620 622
621 if (itv->options.newi2c > 0) 623 if (itv->options.newi2c > 0)
622 return i2c_add_adapter(&itv->i2c_adap); 624 retval = i2c_add_adapter(&itv->i2c_adap);
623 else 625 else
624 return i2c_bit_add_bus(&itv->i2c_adap); 626 retval = i2c_bit_add_bus(&itv->i2c_adap);
627
628 /* Instantiate the IR receiver device, if present */
629 if (retval == 0) {
630 struct i2c_board_info info;
631 /* The external IR receiver is at i2c address 0x34 (0x35 for
632 reads). Future Hauppauge cards will have an internal
633 receiver at 0x30 (0x31 for reads). In theory, both can be
634 fitted, and Hauppauge suggest an external overrides an
635 internal.
636
637 That's why we probe 0x1a (~0x34) first. CB
638 */
639 const unsigned short addr_list[] = {
640 0x1a, /* Hauppauge IR external */
641 0x18, /* Hauppauge IR internal */
642 0x71, /* Hauppauge IR (PVR150) */
643 0x64, /* Pixelview IR */
644 0x30, /* KNC ONE IR */
645 0x6b, /* Adaptec IR */
646 I2C_CLIENT_END
647 };
648
649 memset(&info, 0, sizeof(struct i2c_board_info));
650 strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
651 i2c_new_probed_device(&itv->i2c_adap, &info, addr_list);
652 }
653
654 return retval;
625} 655}
626 656
627void exit_ivtv_i2c(struct ivtv *itv) 657void exit_ivtv_i2c(struct ivtv *itv)
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index c342a9fe983e..99f3c39a118b 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -709,7 +709,7 @@ static int ivtv_itvc(struct ivtv *itv, unsigned int cmd, void *arg)
709 else if (itv->has_cx23415 && regs->reg >= IVTV_DECODER_OFFSET && 709 else if (itv->has_cx23415 && regs->reg >= IVTV_DECODER_OFFSET &&
710 regs->reg < IVTV_DECODER_OFFSET + IVTV_DECODER_SIZE) 710 regs->reg < IVTV_DECODER_OFFSET + IVTV_DECODER_SIZE)
711 reg_start = itv->dec_mem - IVTV_DECODER_OFFSET; 711 reg_start = itv->dec_mem - IVTV_DECODER_OFFSET;
712 else if (regs->reg >= 0 && regs->reg < IVTV_ENCODER_SIZE) 712 else if (regs->reg < IVTV_ENCODER_SIZE)
713 reg_start = itv->enc_mem; 713 reg_start = itv->enc_mem;
714 else 714 else
715 return -EINVAL; 715 return -EINVAL;
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index 684f62fa7897..459c04cbf69d 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -75,53 +75,50 @@ struct mt9m001 {
75 unsigned char autoexposure; 75 unsigned char autoexposure;
76}; 76};
77 77
78static int reg_read(struct soc_camera_device *icd, const u8 reg) 78static int reg_read(struct i2c_client *client, const u8 reg)
79{ 79{
80 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
81 struct i2c_client *client = mt9m001->client;
82 s32 data = i2c_smbus_read_word_data(client, reg); 80 s32 data = i2c_smbus_read_word_data(client, reg);
83 return data < 0 ? data : swab16(data); 81 return data < 0 ? data : swab16(data);
84} 82}
85 83
86static int reg_write(struct soc_camera_device *icd, const u8 reg, 84static int reg_write(struct i2c_client *client, const u8 reg,
87 const u16 data) 85 const u16 data)
88{ 86{
89 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); 87 return i2c_smbus_write_word_data(client, reg, swab16(data));
90 return i2c_smbus_write_word_data(mt9m001->client, reg, swab16(data));
91} 88}
92 89
93static int reg_set(struct soc_camera_device *icd, const u8 reg, 90static int reg_set(struct i2c_client *client, const u8 reg,
94 const u16 data) 91 const u16 data)
95{ 92{
96 int ret; 93 int ret;
97 94
98 ret = reg_read(icd, reg); 95 ret = reg_read(client, reg);
99 if (ret < 0) 96 if (ret < 0)
100 return ret; 97 return ret;
101 return reg_write(icd, reg, ret | data); 98 return reg_write(client, reg, ret | data);
102} 99}
103 100
104static int reg_clear(struct soc_camera_device *icd, const u8 reg, 101static int reg_clear(struct i2c_client *client, const u8 reg,
105 const u16 data) 102 const u16 data)
106{ 103{
107 int ret; 104 int ret;
108 105
109 ret = reg_read(icd, reg); 106 ret = reg_read(client, reg);
110 if (ret < 0) 107 if (ret < 0)
111 return ret; 108 return ret;
112 return reg_write(icd, reg, ret & ~data); 109 return reg_write(client, reg, ret & ~data);
113} 110}
114 111
115static int mt9m001_init(struct soc_camera_device *icd) 112static int mt9m001_init(struct soc_camera_device *icd)
116{ 113{
117 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); 114 struct i2c_client *client = to_i2c_client(icd->control);
118 struct soc_camera_link *icl = mt9m001->client->dev.platform_data; 115 struct soc_camera_link *icl = client->dev.platform_data;
119 int ret; 116 int ret;
120 117
121 dev_dbg(icd->vdev->parent, "%s\n", __func__); 118 dev_dbg(icd->vdev->parent, "%s\n", __func__);
122 119
123 if (icl->power) { 120 if (icl->power) {
124 ret = icl->power(&mt9m001->client->dev, 1); 121 ret = icl->power(&client->dev, 1);
125 if (ret < 0) { 122 if (ret < 0) {
126 dev_err(icd->vdev->parent, 123 dev_err(icd->vdev->parent,
127 "Platform failed to power-on the camera.\n"); 124 "Platform failed to power-on the camera.\n");
@@ -131,49 +128,53 @@ static int mt9m001_init(struct soc_camera_device *icd)
131 128
132 /* The camera could have been already on, we reset it additionally */ 129 /* The camera could have been already on, we reset it additionally */
133 if (icl->reset) 130 if (icl->reset)
134 ret = icl->reset(&mt9m001->client->dev); 131 ret = icl->reset(&client->dev);
135 else 132 else
136 ret = -ENODEV; 133 ret = -ENODEV;
137 134
138 if (ret < 0) { 135 if (ret < 0) {
139 /* Either no platform reset, or platform reset failed */ 136 /* Either no platform reset, or platform reset failed */
140 ret = reg_write(icd, MT9M001_RESET, 1); 137 ret = reg_write(client, MT9M001_RESET, 1);
141 if (!ret) 138 if (!ret)
142 ret = reg_write(icd, MT9M001_RESET, 0); 139 ret = reg_write(client, MT9M001_RESET, 0);
143 } 140 }
144 /* Disable chip, synchronous option update */ 141 /* Disable chip, synchronous option update */
145 if (!ret) 142 if (!ret)
146 ret = reg_write(icd, MT9M001_OUTPUT_CONTROL, 0); 143 ret = reg_write(client, MT9M001_OUTPUT_CONTROL, 0);
147 144
148 return ret; 145 return ret;
149} 146}
150 147
151static int mt9m001_release(struct soc_camera_device *icd) 148static int mt9m001_release(struct soc_camera_device *icd)
152{ 149{
153 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); 150 struct i2c_client *client = to_i2c_client(icd->control);
154 struct soc_camera_link *icl = mt9m001->client->dev.platform_data; 151 struct soc_camera_link *icl = client->dev.platform_data;
155 152
156 /* Disable the chip */ 153 /* Disable the chip */
157 reg_write(icd, MT9M001_OUTPUT_CONTROL, 0); 154 reg_write(client, MT9M001_OUTPUT_CONTROL, 0);
158 155
159 if (icl->power) 156 if (icl->power)
160 icl->power(&mt9m001->client->dev, 0); 157 icl->power(&client->dev, 0);
161 158
162 return 0; 159 return 0;
163} 160}
164 161
165static int mt9m001_start_capture(struct soc_camera_device *icd) 162static int mt9m001_start_capture(struct soc_camera_device *icd)
166{ 163{
164 struct i2c_client *client = to_i2c_client(icd->control);
165
167 /* Switch to master "normal" mode */ 166 /* Switch to master "normal" mode */
168 if (reg_write(icd, MT9M001_OUTPUT_CONTROL, 2) < 0) 167 if (reg_write(client, MT9M001_OUTPUT_CONTROL, 2) < 0)
169 return -EIO; 168 return -EIO;
170 return 0; 169 return 0;
171} 170}
172 171
173static int mt9m001_stop_capture(struct soc_camera_device *icd) 172static int mt9m001_stop_capture(struct soc_camera_device *icd)
174{ 173{
174 struct i2c_client *client = to_i2c_client(icd->control);
175
175 /* Stop sensor readout */ 176 /* Stop sensor readout */
176 if (reg_write(icd, MT9M001_OUTPUT_CONTROL, 0) < 0) 177 if (reg_write(client, MT9M001_OUTPUT_CONTROL, 0) < 0)
177 return -EIO; 178 return -EIO;
178 return 0; 179 return 0;
179} 180}
@@ -222,28 +223,29 @@ static unsigned long mt9m001_query_bus_param(struct soc_camera_device *icd)
222static int mt9m001_set_crop(struct soc_camera_device *icd, 223static int mt9m001_set_crop(struct soc_camera_device *icd,
223 struct v4l2_rect *rect) 224 struct v4l2_rect *rect)
224{ 225{
226 struct i2c_client *client = to_i2c_client(icd->control);
225 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); 227 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
226 int ret; 228 int ret;
227 const u16 hblank = 9, vblank = 25; 229 const u16 hblank = 9, vblank = 25;
228 230
229 /* Blanking and start values - default... */ 231 /* Blanking and start values - default... */
230 ret = reg_write(icd, MT9M001_HORIZONTAL_BLANKING, hblank); 232 ret = reg_write(client, MT9M001_HORIZONTAL_BLANKING, hblank);
231 if (!ret) 233 if (!ret)
232 ret = reg_write(icd, MT9M001_VERTICAL_BLANKING, vblank); 234 ret = reg_write(client, MT9M001_VERTICAL_BLANKING, vblank);
233 235
234 /* The caller provides a supported format, as verified per 236 /* The caller provides a supported format, as verified per
235 * call to icd->try_fmt() */ 237 * call to icd->try_fmt() */
236 if (!ret) 238 if (!ret)
237 ret = reg_write(icd, MT9M001_COLUMN_START, rect->left); 239 ret = reg_write(client, MT9M001_COLUMN_START, rect->left);
238 if (!ret) 240 if (!ret)
239 ret = reg_write(icd, MT9M001_ROW_START, rect->top); 241 ret = reg_write(client, MT9M001_ROW_START, rect->top);
240 if (!ret) 242 if (!ret)
241 ret = reg_write(icd, MT9M001_WINDOW_WIDTH, rect->width - 1); 243 ret = reg_write(client, MT9M001_WINDOW_WIDTH, rect->width - 1);
242 if (!ret) 244 if (!ret)
243 ret = reg_write(icd, MT9M001_WINDOW_HEIGHT, 245 ret = reg_write(client, MT9M001_WINDOW_HEIGHT,
244 rect->height + icd->y_skip_top - 1); 246 rect->height + icd->y_skip_top - 1);
245 if (!ret && mt9m001->autoexposure) { 247 if (!ret && mt9m001->autoexposure) {
246 ret = reg_write(icd, MT9M001_SHUTTER_WIDTH, 248 ret = reg_write(client, MT9M001_SHUTTER_WIDTH,
247 rect->height + icd->y_skip_top + vblank); 249 rect->height + icd->y_skip_top + vblank);
248 if (!ret) { 250 if (!ret) {
249 const struct v4l2_queryctrl *qctrl = 251 const struct v4l2_queryctrl *qctrl =
@@ -312,16 +314,16 @@ static int mt9m001_get_chip_id(struct soc_camera_device *icd,
312static int mt9m001_get_register(struct soc_camera_device *icd, 314static int mt9m001_get_register(struct soc_camera_device *icd,
313 struct v4l2_dbg_register *reg) 315 struct v4l2_dbg_register *reg)
314{ 316{
315 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); 317 struct i2c_client *client = to_i2c_client(icd->control);
316 318
317 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) 319 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
318 return -EINVAL; 320 return -EINVAL;
319 321
320 if (reg->match.addr != mt9m001->client->addr) 322 if (reg->match.addr != client->addr)
321 return -ENODEV; 323 return -ENODEV;
322 324
323 reg->size = 2; 325 reg->size = 2;
324 reg->val = reg_read(icd, reg->reg); 326 reg->val = reg_read(client, reg->reg);
325 327
326 if (reg->val > 0xffff) 328 if (reg->val > 0xffff)
327 return -EIO; 329 return -EIO;
@@ -332,15 +334,15 @@ static int mt9m001_get_register(struct soc_camera_device *icd,
332static int mt9m001_set_register(struct soc_camera_device *icd, 334static int mt9m001_set_register(struct soc_camera_device *icd,
333 struct v4l2_dbg_register *reg) 335 struct v4l2_dbg_register *reg)
334{ 336{
335 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); 337 struct i2c_client *client = to_i2c_client(icd->control);
336 338
337 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) 339 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
338 return -EINVAL; 340 return -EINVAL;
339 341
340 if (reg->match.addr != mt9m001->client->addr) 342 if (reg->match.addr != client->addr)
341 return -ENODEV; 343 return -ENODEV;
342 344
343 if (reg_write(icd, reg->reg, reg->val) < 0) 345 if (reg_write(client, reg->reg, reg->val) < 0)
344 return -EIO; 346 return -EIO;
345 347
346 return 0; 348 return 0;
@@ -416,12 +418,13 @@ static struct soc_camera_ops mt9m001_ops = {
416 418
417static int mt9m001_get_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) 419static int mt9m001_get_control(struct soc_camera_device *icd, struct v4l2_control *ctrl)
418{ 420{
421 struct i2c_client *client = to_i2c_client(icd->control);
419 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); 422 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
420 int data; 423 int data;
421 424
422 switch (ctrl->id) { 425 switch (ctrl->id) {
423 case V4L2_CID_VFLIP: 426 case V4L2_CID_VFLIP:
424 data = reg_read(icd, MT9M001_READ_OPTIONS2); 427 data = reg_read(client, MT9M001_READ_OPTIONS2);
425 if (data < 0) 428 if (data < 0)
426 return -EIO; 429 return -EIO;
427 ctrl->value = !!(data & 0x8000); 430 ctrl->value = !!(data & 0x8000);
@@ -435,6 +438,7 @@ static int mt9m001_get_control(struct soc_camera_device *icd, struct v4l2_contro
435 438
436static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) 439static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_control *ctrl)
437{ 440{
441 struct i2c_client *client = to_i2c_client(icd->control);
438 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); 442 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
439 const struct v4l2_queryctrl *qctrl; 443 const struct v4l2_queryctrl *qctrl;
440 int data; 444 int data;
@@ -447,9 +451,9 @@ static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_contro
447 switch (ctrl->id) { 451 switch (ctrl->id) {
448 case V4L2_CID_VFLIP: 452 case V4L2_CID_VFLIP:
449 if (ctrl->value) 453 if (ctrl->value)
450 data = reg_set(icd, MT9M001_READ_OPTIONS2, 0x8000); 454 data = reg_set(client, MT9M001_READ_OPTIONS2, 0x8000);
451 else 455 else
452 data = reg_clear(icd, MT9M001_READ_OPTIONS2, 0x8000); 456 data = reg_clear(client, MT9M001_READ_OPTIONS2, 0x8000);
453 if (data < 0) 457 if (data < 0)
454 return -EIO; 458 return -EIO;
455 break; 459 break;
@@ -463,7 +467,7 @@ static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_contro
463 data = ((ctrl->value - qctrl->minimum) * 8 + range / 2) / range; 467 data = ((ctrl->value - qctrl->minimum) * 8 + range / 2) / range;
464 468
465 dev_dbg(&icd->dev, "Setting gain %d\n", data); 469 dev_dbg(&icd->dev, "Setting gain %d\n", data);
466 data = reg_write(icd, MT9M001_GLOBAL_GAIN, data); 470 data = reg_write(client, MT9M001_GLOBAL_GAIN, data);
467 if (data < 0) 471 if (data < 0)
468 return -EIO; 472 return -EIO;
469 } else { 473 } else {
@@ -481,8 +485,8 @@ static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_contro
481 data = ((gain - 64) * 7 + 28) / 56 + 96; 485 data = ((gain - 64) * 7 + 28) / 56 + 96;
482 486
483 dev_dbg(&icd->dev, "Setting gain from %d to %d\n", 487 dev_dbg(&icd->dev, "Setting gain from %d to %d\n",
484 reg_read(icd, MT9M001_GLOBAL_GAIN), data); 488 reg_read(client, MT9M001_GLOBAL_GAIN), data);
485 data = reg_write(icd, MT9M001_GLOBAL_GAIN, data); 489 data = reg_write(client, MT9M001_GLOBAL_GAIN, data);
486 if (data < 0) 490 if (data < 0)
487 return -EIO; 491 return -EIO;
488 } 492 }
@@ -500,8 +504,8 @@ static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_contro
500 range / 2) / range + 1; 504 range / 2) / range + 1;
501 505
502 dev_dbg(&icd->dev, "Setting shutter width from %d to %lu\n", 506 dev_dbg(&icd->dev, "Setting shutter width from %d to %lu\n",
503 reg_read(icd, MT9M001_SHUTTER_WIDTH), shutter); 507 reg_read(client, MT9M001_SHUTTER_WIDTH), shutter);
504 if (reg_write(icd, MT9M001_SHUTTER_WIDTH, shutter) < 0) 508 if (reg_write(client, MT9M001_SHUTTER_WIDTH, shutter) < 0)
505 return -EIO; 509 return -EIO;
506 icd->exposure = ctrl->value; 510 icd->exposure = ctrl->value;
507 mt9m001->autoexposure = 0; 511 mt9m001->autoexposure = 0;
@@ -510,7 +514,7 @@ static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_contro
510 case V4L2_CID_EXPOSURE_AUTO: 514 case V4L2_CID_EXPOSURE_AUTO:
511 if (ctrl->value) { 515 if (ctrl->value) {
512 const u16 vblank = 25; 516 const u16 vblank = 25;
513 if (reg_write(icd, MT9M001_SHUTTER_WIDTH, icd->height + 517 if (reg_write(client, MT9M001_SHUTTER_WIDTH, icd->height +
514 icd->y_skip_top + vblank) < 0) 518 icd->y_skip_top + vblank) < 0)
515 return -EIO; 519 return -EIO;
516 qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); 520 qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE);
@@ -529,8 +533,9 @@ static int mt9m001_set_control(struct soc_camera_device *icd, struct v4l2_contro
529 * this wasn't our capture interface, so, we wait for the right one */ 533 * this wasn't our capture interface, so, we wait for the right one */
530static int mt9m001_video_probe(struct soc_camera_device *icd) 534static int mt9m001_video_probe(struct soc_camera_device *icd)
531{ 535{
536 struct i2c_client *client = to_i2c_client(icd->control);
532 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); 537 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
533 struct soc_camera_link *icl = mt9m001->client->dev.platform_data; 538 struct soc_camera_link *icl = client->dev.platform_data;
534 s32 data; 539 s32 data;
535 int ret; 540 int ret;
536 unsigned long flags; 541 unsigned long flags;
@@ -542,11 +547,11 @@ static int mt9m001_video_probe(struct soc_camera_device *icd)
542 return -ENODEV; 547 return -ENODEV;
543 548
544 /* Enable the chip */ 549 /* Enable the chip */
545 data = reg_write(icd, MT9M001_CHIP_ENABLE, 1); 550 data = reg_write(client, MT9M001_CHIP_ENABLE, 1);
546 dev_dbg(&icd->dev, "write: %d\n", data); 551 dev_dbg(&icd->dev, "write: %d\n", data);
547 552
548 /* Read out the chip version register */ 553 /* Read out the chip version register */
549 data = reg_read(icd, MT9M001_CHIP_VERSION); 554 data = reg_read(client, MT9M001_CHIP_VERSION);
550 555
551 /* must be 0x8411 or 0x8421 for colour sensor and 8431 for bw */ 556 /* must be 0x8411 or 0x8421 for colour sensor and 8431 for bw */
552 switch (data) { 557 switch (data) {
@@ -604,10 +609,13 @@ ei2c:
604static void mt9m001_video_remove(struct soc_camera_device *icd) 609static void mt9m001_video_remove(struct soc_camera_device *icd)
605{ 610{
606 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd); 611 struct mt9m001 *mt9m001 = container_of(icd, struct mt9m001, icd);
612 struct soc_camera_link *icl = mt9m001->client->dev.platform_data;
607 613
608 dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", mt9m001->client->addr, 614 dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", mt9m001->client->addr,
609 icd->dev.parent, icd->vdev); 615 icd->dev.parent, icd->vdev);
610 soc_camera_video_stop(icd); 616 soc_camera_video_stop(icd);
617 if (icl->free_bus)
618 icl->free_bus(icl);
611} 619}
612 620
613static int mt9m001_probe(struct i2c_client *client, 621static int mt9m001_probe(struct i2c_client *client,
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index cdd1ddb51388..fc5e2de03766 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -113,10 +113,10 @@
113 * mt9m111: Camera control register addresses (0x200..0x2ff not implemented) 113 * mt9m111: Camera control register addresses (0x200..0x2ff not implemented)
114 */ 114 */
115 115
116#define reg_read(reg) mt9m111_reg_read(icd, MT9M111_##reg) 116#define reg_read(reg) mt9m111_reg_read(client, MT9M111_##reg)
117#define reg_write(reg, val) mt9m111_reg_write(icd, MT9M111_##reg, (val)) 117#define reg_write(reg, val) mt9m111_reg_write(client, MT9M111_##reg, (val))
118#define reg_set(reg, val) mt9m111_reg_set(icd, MT9M111_##reg, (val)) 118#define reg_set(reg, val) mt9m111_reg_set(client, MT9M111_##reg, (val))
119#define reg_clear(reg, val) mt9m111_reg_clear(icd, MT9M111_##reg, (val)) 119#define reg_clear(reg, val) mt9m111_reg_clear(client, MT9M111_##reg, (val))
120 120
121#define MT9M111_MIN_DARK_ROWS 8 121#define MT9M111_MIN_DARK_ROWS 8
122#define MT9M111_MIN_DARK_COLS 24 122#define MT9M111_MIN_DARK_COLS 24
@@ -184,58 +184,55 @@ static int reg_page_map_set(struct i2c_client *client, const u16 reg)
184 return ret; 184 return ret;
185} 185}
186 186
187static int mt9m111_reg_read(struct soc_camera_device *icd, const u16 reg) 187static int mt9m111_reg_read(struct i2c_client *client, const u16 reg)
188{ 188{
189 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
190 struct i2c_client *client = mt9m111->client;
191 int ret; 189 int ret;
192 190
193 ret = reg_page_map_set(client, reg); 191 ret = reg_page_map_set(client, reg);
194 if (!ret) 192 if (!ret)
195 ret = swab16(i2c_smbus_read_word_data(client, (reg & 0xff))); 193 ret = swab16(i2c_smbus_read_word_data(client, (reg & 0xff)));
196 194
197 dev_dbg(&icd->dev, "read reg.%03x -> %04x\n", reg, ret); 195 dev_dbg(&client->dev, "read reg.%03x -> %04x\n", reg, ret);
198 return ret; 196 return ret;
199} 197}
200 198
201static int mt9m111_reg_write(struct soc_camera_device *icd, const u16 reg, 199static int mt9m111_reg_write(struct i2c_client *client, const u16 reg,
202 const u16 data) 200 const u16 data)
203{ 201{
204 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
205 struct i2c_client *client = mt9m111->client;
206 int ret; 202 int ret;
207 203
208 ret = reg_page_map_set(client, reg); 204 ret = reg_page_map_set(client, reg);
209 if (!ret) 205 if (!ret)
210 ret = i2c_smbus_write_word_data(mt9m111->client, (reg & 0xff), 206 ret = i2c_smbus_write_word_data(client, (reg & 0xff),
211 swab16(data)); 207 swab16(data));
212 dev_dbg(&icd->dev, "write reg.%03x = %04x -> %d\n", reg, data, ret); 208 dev_dbg(&client->dev, "write reg.%03x = %04x -> %d\n", reg, data, ret);
213 return ret; 209 return ret;
214} 210}
215 211
216static int mt9m111_reg_set(struct soc_camera_device *icd, const u16 reg, 212static int mt9m111_reg_set(struct i2c_client *client, const u16 reg,
217 const u16 data) 213 const u16 data)
218{ 214{
219 int ret; 215 int ret;
220 216
221 ret = mt9m111_reg_read(icd, reg); 217 ret = mt9m111_reg_read(client, reg);
222 if (ret >= 0) 218 if (ret >= 0)
223 ret = mt9m111_reg_write(icd, reg, ret | data); 219 ret = mt9m111_reg_write(client, reg, ret | data);
224 return ret; 220 return ret;
225} 221}
226 222
227static int mt9m111_reg_clear(struct soc_camera_device *icd, const u16 reg, 223static int mt9m111_reg_clear(struct i2c_client *client, const u16 reg,
228 const u16 data) 224 const u16 data)
229{ 225{
230 int ret; 226 int ret;
231 227
232 ret = mt9m111_reg_read(icd, reg); 228 ret = mt9m111_reg_read(client, reg);
233 return mt9m111_reg_write(icd, reg, ret & ~data); 229 return mt9m111_reg_write(client, reg, ret & ~data);
234} 230}
235 231
236static int mt9m111_set_context(struct soc_camera_device *icd, 232static int mt9m111_set_context(struct soc_camera_device *icd,
237 enum mt9m111_context ctxt) 233 enum mt9m111_context ctxt)
238{ 234{
235 struct i2c_client *client = to_i2c_client(icd->control);
239 int valB = MT9M111_CTXT_CTRL_RESTART | MT9M111_CTXT_CTRL_DEFECTCOR_B 236 int valB = MT9M111_CTXT_CTRL_RESTART | MT9M111_CTXT_CTRL_DEFECTCOR_B
240 | MT9M111_CTXT_CTRL_RESIZE_B | MT9M111_CTXT_CTRL_CTRL2_B 237 | MT9M111_CTXT_CTRL_RESIZE_B | MT9M111_CTXT_CTRL_CTRL2_B
241 | MT9M111_CTXT_CTRL_GAMMA_B | MT9M111_CTXT_CTRL_READ_MODE_B 238 | MT9M111_CTXT_CTRL_GAMMA_B | MT9M111_CTXT_CTRL_READ_MODE_B
@@ -252,6 +249,7 @@ static int mt9m111_set_context(struct soc_camera_device *icd,
252static int mt9m111_setup_rect(struct soc_camera_device *icd, 249static int mt9m111_setup_rect(struct soc_camera_device *icd,
253 struct v4l2_rect *rect) 250 struct v4l2_rect *rect)
254{ 251{
252 struct i2c_client *client = to_i2c_client(icd->control);
255 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); 253 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
256 int ret, is_raw_format; 254 int ret, is_raw_format;
257 int width = rect->width; 255 int width = rect->width;
@@ -296,6 +294,7 @@ static int mt9m111_setup_rect(struct soc_camera_device *icd,
296 294
297static int mt9m111_setup_pixfmt(struct soc_camera_device *icd, u16 outfmt) 295static int mt9m111_setup_pixfmt(struct soc_camera_device *icd, u16 outfmt)
298{ 296{
297 struct i2c_client *client = to_i2c_client(icd->control);
299 int ret; 298 int ret;
300 299
301 ret = reg_write(OUTPUT_FORMAT_CTRL2_A, outfmt); 300 ret = reg_write(OUTPUT_FORMAT_CTRL2_A, outfmt);
@@ -357,12 +356,13 @@ static int mt9m111_setfmt_yuv(struct soc_camera_device *icd)
357 356
358static int mt9m111_enable(struct soc_camera_device *icd) 357static int mt9m111_enable(struct soc_camera_device *icd)
359{ 358{
359 struct i2c_client *client = to_i2c_client(icd->control);
360 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); 360 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
361 struct soc_camera_link *icl = mt9m111->client->dev.platform_data; 361 struct soc_camera_link *icl = client->dev.platform_data;
362 int ret; 362 int ret;
363 363
364 if (icl->power) { 364 if (icl->power) {
365 ret = icl->power(&mt9m111->client->dev, 1); 365 ret = icl->power(&client->dev, 1);
366 if (ret < 0) { 366 if (ret < 0) {
367 dev_err(icd->vdev->parent, 367 dev_err(icd->vdev->parent,
368 "Platform failed to power-on the camera.\n"); 368 "Platform failed to power-on the camera.\n");
@@ -378,8 +378,9 @@ static int mt9m111_enable(struct soc_camera_device *icd)
378 378
379static int mt9m111_disable(struct soc_camera_device *icd) 379static int mt9m111_disable(struct soc_camera_device *icd)
380{ 380{
381 struct i2c_client *client = to_i2c_client(icd->control);
381 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); 382 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
382 struct soc_camera_link *icl = mt9m111->client->dev.platform_data; 383 struct soc_camera_link *icl = client->dev.platform_data;
383 int ret; 384 int ret;
384 385
385 ret = reg_clear(RESET, MT9M111_RESET_CHIP_ENABLE); 386 ret = reg_clear(RESET, MT9M111_RESET_CHIP_ENABLE);
@@ -387,15 +388,15 @@ static int mt9m111_disable(struct soc_camera_device *icd)
387 mt9m111->powered = 0; 388 mt9m111->powered = 0;
388 389
389 if (icl->power) 390 if (icl->power)
390 icl->power(&mt9m111->client->dev, 0); 391 icl->power(&client->dev, 0);
391 392
392 return ret; 393 return ret;
393} 394}
394 395
395static int mt9m111_reset(struct soc_camera_device *icd) 396static int mt9m111_reset(struct soc_camera_device *icd)
396{ 397{
397 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); 398 struct i2c_client *client = to_i2c_client(icd->control);
398 struct soc_camera_link *icl = mt9m111->client->dev.platform_data; 399 struct soc_camera_link *icl = client->dev.platform_data;
399 int ret; 400 int ret;
400 401
401 ret = reg_set(RESET, MT9M111_RESET_RESET_MODE); 402 ret = reg_set(RESET, MT9M111_RESET_RESET_MODE);
@@ -406,7 +407,7 @@ static int mt9m111_reset(struct soc_camera_device *icd)
406 | MT9M111_RESET_RESET_SOC); 407 | MT9M111_RESET_RESET_SOC);
407 408
408 if (icl->reset) 409 if (icl->reset)
409 icl->reset(&mt9m111->client->dev); 410 icl->reset(&client->dev);
410 411
411 return ret; 412 return ret;
412} 413}
@@ -562,15 +563,14 @@ static int mt9m111_get_register(struct soc_camera_device *icd,
562 struct v4l2_dbg_register *reg) 563 struct v4l2_dbg_register *reg)
563{ 564{
564 int val; 565 int val;
565 566 struct i2c_client *client = to_i2c_client(icd->control);
566 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
567 567
568 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff) 568 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff)
569 return -EINVAL; 569 return -EINVAL;
570 if (reg->match.addr != mt9m111->client->addr) 570 if (reg->match.addr != client->addr)
571 return -ENODEV; 571 return -ENODEV;
572 572
573 val = mt9m111_reg_read(icd, reg->reg); 573 val = mt9m111_reg_read(client, reg->reg);
574 reg->size = 2; 574 reg->size = 2;
575 reg->val = (u64)val; 575 reg->val = (u64)val;
576 576
@@ -583,15 +583,15 @@ static int mt9m111_get_register(struct soc_camera_device *icd,
583static int mt9m111_set_register(struct soc_camera_device *icd, 583static int mt9m111_set_register(struct soc_camera_device *icd,
584 struct v4l2_dbg_register *reg) 584 struct v4l2_dbg_register *reg)
585{ 585{
586 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); 586 struct i2c_client *client = to_i2c_client(icd->control);
587 587
588 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff) 588 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x2ff)
589 return -EINVAL; 589 return -EINVAL;
590 590
591 if (reg->match.addr != mt9m111->client->addr) 591 if (reg->match.addr != client->addr)
592 return -ENODEV; 592 return -ENODEV;
593 593
594 if (mt9m111_reg_write(icd, reg->reg, reg->val) < 0) 594 if (mt9m111_reg_write(client, reg->reg, reg->val) < 0)
595 return -EIO; 595 return -EIO;
596 596
597 return 0; 597 return 0;
@@ -672,6 +672,7 @@ static struct soc_camera_ops mt9m111_ops = {
672 672
673static int mt9m111_set_flip(struct soc_camera_device *icd, int flip, int mask) 673static int mt9m111_set_flip(struct soc_camera_device *icd, int flip, int mask)
674{ 674{
675 struct i2c_client *client = to_i2c_client(icd->control);
675 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); 676 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
676 int ret; 677 int ret;
677 678
@@ -692,6 +693,7 @@ static int mt9m111_set_flip(struct soc_camera_device *icd, int flip, int mask)
692 693
693static int mt9m111_get_global_gain(struct soc_camera_device *icd) 694static int mt9m111_get_global_gain(struct soc_camera_device *icd)
694{ 695{
696 struct i2c_client *client = to_i2c_client(icd->control);
695 int data; 697 int data;
696 698
697 data = reg_read(GLOBAL_GAIN); 699 data = reg_read(GLOBAL_GAIN);
@@ -703,6 +705,7 @@ static int mt9m111_get_global_gain(struct soc_camera_device *icd)
703 705
704static int mt9m111_set_global_gain(struct soc_camera_device *icd, int gain) 706static int mt9m111_set_global_gain(struct soc_camera_device *icd, int gain)
705{ 707{
708 struct i2c_client *client = to_i2c_client(icd->control);
706 u16 val; 709 u16 val;
707 710
708 if (gain > 63 * 2 * 2) 711 if (gain > 63 * 2 * 2)
@@ -721,6 +724,7 @@ static int mt9m111_set_global_gain(struct soc_camera_device *icd, int gain)
721 724
722static int mt9m111_set_autoexposure(struct soc_camera_device *icd, int on) 725static int mt9m111_set_autoexposure(struct soc_camera_device *icd, int on)
723{ 726{
727 struct i2c_client *client = to_i2c_client(icd->control);
724 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); 728 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
725 int ret; 729 int ret;
726 730
@@ -737,6 +741,7 @@ static int mt9m111_set_autoexposure(struct soc_camera_device *icd, int on)
737 741
738static int mt9m111_set_autowhitebalance(struct soc_camera_device *icd, int on) 742static int mt9m111_set_autowhitebalance(struct soc_camera_device *icd, int on)
739{ 743{
744 struct i2c_client *client = to_i2c_client(icd->control);
740 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); 745 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
741 int ret; 746 int ret;
742 747
@@ -754,6 +759,7 @@ static int mt9m111_set_autowhitebalance(struct soc_camera_device *icd, int on)
754static int mt9m111_get_control(struct soc_camera_device *icd, 759static int mt9m111_get_control(struct soc_camera_device *icd,
755 struct v4l2_control *ctrl) 760 struct v4l2_control *ctrl)
756{ 761{
762 struct i2c_client *client = to_i2c_client(icd->control);
757 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); 763 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
758 int data; 764 int data;
759 765
@@ -898,6 +904,7 @@ static int mt9m111_release(struct soc_camera_device *icd)
898 */ 904 */
899static int mt9m111_video_probe(struct soc_camera_device *icd) 905static int mt9m111_video_probe(struct soc_camera_device *icd)
900{ 906{
907 struct i2c_client *client = to_i2c_client(icd->control);
901 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd); 908 struct mt9m111 *mt9m111 = container_of(icd, struct mt9m111, icd);
902 s32 data; 909 s32 data;
903 int ret; 910 int ret;
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index 2b0927bfd217..f72aeb7c4deb 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -76,64 +76,61 @@ struct mt9t031 {
76 u16 yskip; 76 u16 yskip;
77}; 77};
78 78
79static int reg_read(struct soc_camera_device *icd, const u8 reg) 79static int reg_read(struct i2c_client *client, const u8 reg)
80{ 80{
81 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
82 struct i2c_client *client = mt9t031->client;
83 s32 data = i2c_smbus_read_word_data(client, reg); 81 s32 data = i2c_smbus_read_word_data(client, reg);
84 return data < 0 ? data : swab16(data); 82 return data < 0 ? data : swab16(data);
85} 83}
86 84
87static int reg_write(struct soc_camera_device *icd, const u8 reg, 85static int reg_write(struct i2c_client *client, const u8 reg,
88 const u16 data) 86 const u16 data)
89{ 87{
90 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); 88 return i2c_smbus_write_word_data(client, reg, swab16(data));
91 return i2c_smbus_write_word_data(mt9t031->client, reg, swab16(data));
92} 89}
93 90
94static int reg_set(struct soc_camera_device *icd, const u8 reg, 91static int reg_set(struct i2c_client *client, const u8 reg,
95 const u16 data) 92 const u16 data)
96{ 93{
97 int ret; 94 int ret;
98 95
99 ret = reg_read(icd, reg); 96 ret = reg_read(client, reg);
100 if (ret < 0) 97 if (ret < 0)
101 return ret; 98 return ret;
102 return reg_write(icd, reg, ret | data); 99 return reg_write(client, reg, ret | data);
103} 100}
104 101
105static int reg_clear(struct soc_camera_device *icd, const u8 reg, 102static int reg_clear(struct i2c_client *client, const u8 reg,
106 const u16 data) 103 const u16 data)
107{ 104{
108 int ret; 105 int ret;
109 106
110 ret = reg_read(icd, reg); 107 ret = reg_read(client, reg);
111 if (ret < 0) 108 if (ret < 0)
112 return ret; 109 return ret;
113 return reg_write(icd, reg, ret & ~data); 110 return reg_write(client, reg, ret & ~data);
114} 111}
115 112
116static int set_shutter(struct soc_camera_device *icd, const u32 data) 113static int set_shutter(struct i2c_client *client, const u32 data)
117{ 114{
118 int ret; 115 int ret;
119 116
120 ret = reg_write(icd, MT9T031_SHUTTER_WIDTH_UPPER, data >> 16); 117 ret = reg_write(client, MT9T031_SHUTTER_WIDTH_UPPER, data >> 16);
121 118
122 if (ret >= 0) 119 if (ret >= 0)
123 ret = reg_write(icd, MT9T031_SHUTTER_WIDTH, data & 0xffff); 120 ret = reg_write(client, MT9T031_SHUTTER_WIDTH, data & 0xffff);
124 121
125 return ret; 122 return ret;
126} 123}
127 124
128static int get_shutter(struct soc_camera_device *icd, u32 *data) 125static int get_shutter(struct i2c_client *client, u32 *data)
129{ 126{
130 int ret; 127 int ret;
131 128
132 ret = reg_read(icd, MT9T031_SHUTTER_WIDTH_UPPER); 129 ret = reg_read(client, MT9T031_SHUTTER_WIDTH_UPPER);
133 *data = ret << 16; 130 *data = ret << 16;
134 131
135 if (ret >= 0) 132 if (ret >= 0)
136 ret = reg_read(icd, MT9T031_SHUTTER_WIDTH); 133 ret = reg_read(client, MT9T031_SHUTTER_WIDTH);
137 *data |= ret & 0xffff; 134 *data |= ret & 0xffff;
138 135
139 return ret < 0 ? ret : 0; 136 return ret < 0 ? ret : 0;
@@ -141,12 +138,12 @@ static int get_shutter(struct soc_camera_device *icd, u32 *data)
141 138
142static int mt9t031_init(struct soc_camera_device *icd) 139static int mt9t031_init(struct soc_camera_device *icd)
143{ 140{
144 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); 141 struct i2c_client *client = to_i2c_client(icd->control);
145 struct soc_camera_link *icl = mt9t031->client->dev.platform_data; 142 struct soc_camera_link *icl = client->dev.platform_data;
146 int ret; 143 int ret;
147 144
148 if (icl->power) { 145 if (icl->power) {
149 ret = icl->power(&mt9t031->client->dev, 1); 146 ret = icl->power(&client->dev, 1);
150 if (ret < 0) { 147 if (ret < 0) {
151 dev_err(icd->vdev->parent, 148 dev_err(icd->vdev->parent,
152 "Platform failed to power-on the camera.\n"); 149 "Platform failed to power-on the camera.\n");
@@ -155,44 +152,48 @@ static int mt9t031_init(struct soc_camera_device *icd)
155 } 152 }
156 153
157 /* Disable chip output, synchronous option update */ 154 /* Disable chip output, synchronous option update */
158 ret = reg_write(icd, MT9T031_RESET, 1); 155 ret = reg_write(client, MT9T031_RESET, 1);
159 if (ret >= 0) 156 if (ret >= 0)
160 ret = reg_write(icd, MT9T031_RESET, 0); 157 ret = reg_write(client, MT9T031_RESET, 0);
161 if (ret >= 0) 158 if (ret >= 0)
162 ret = reg_clear(icd, MT9T031_OUTPUT_CONTROL, 2); 159 ret = reg_clear(client, MT9T031_OUTPUT_CONTROL, 2);
163 160
164 if (ret < 0 && icl->power) 161 if (ret < 0 && icl->power)
165 icl->power(&mt9t031->client->dev, 0); 162 icl->power(&client->dev, 0);
166 163
167 return ret >= 0 ? 0 : -EIO; 164 return ret >= 0 ? 0 : -EIO;
168} 165}
169 166
170static int mt9t031_release(struct soc_camera_device *icd) 167static int mt9t031_release(struct soc_camera_device *icd)
171{ 168{
172 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); 169 struct i2c_client *client = to_i2c_client(icd->control);
173 struct soc_camera_link *icl = mt9t031->client->dev.platform_data; 170 struct soc_camera_link *icl = client->dev.platform_data;
174 171
175 /* Disable the chip */ 172 /* Disable the chip */
176 reg_clear(icd, MT9T031_OUTPUT_CONTROL, 2); 173 reg_clear(client, MT9T031_OUTPUT_CONTROL, 2);
177 174
178 if (icl->power) 175 if (icl->power)
179 icl->power(&mt9t031->client->dev, 0); 176 icl->power(&client->dev, 0);
180 177
181 return 0; 178 return 0;
182} 179}
183 180
184static int mt9t031_start_capture(struct soc_camera_device *icd) 181static int mt9t031_start_capture(struct soc_camera_device *icd)
185{ 182{
183 struct i2c_client *client = to_i2c_client(icd->control);
184
186 /* Switch to master "normal" mode */ 185 /* Switch to master "normal" mode */
187 if (reg_set(icd, MT9T031_OUTPUT_CONTROL, 2) < 0) 186 if (reg_set(client, MT9T031_OUTPUT_CONTROL, 2) < 0)
188 return -EIO; 187 return -EIO;
189 return 0; 188 return 0;
190} 189}
191 190
192static int mt9t031_stop_capture(struct soc_camera_device *icd) 191static int mt9t031_stop_capture(struct soc_camera_device *icd)
193{ 192{
193 struct i2c_client *client = to_i2c_client(icd->control);
194
194 /* Stop sensor readout */ 195 /* Stop sensor readout */
195 if (reg_clear(icd, MT9T031_OUTPUT_CONTROL, 2) < 0) 196 if (reg_clear(client, MT9T031_OUTPUT_CONTROL, 2) < 0)
196 return -EIO; 197 return -EIO;
197 return 0; 198 return 0;
198} 199}
@@ -200,14 +201,16 @@ static int mt9t031_stop_capture(struct soc_camera_device *icd)
200static int mt9t031_set_bus_param(struct soc_camera_device *icd, 201static int mt9t031_set_bus_param(struct soc_camera_device *icd,
201 unsigned long flags) 202 unsigned long flags)
202{ 203{
204 struct i2c_client *client = to_i2c_client(icd->control);
205
203 /* The caller should have queried our parameters, check anyway */ 206 /* The caller should have queried our parameters, check anyway */
204 if (flags & ~MT9T031_BUS_PARAM) 207 if (flags & ~MT9T031_BUS_PARAM)
205 return -EINVAL; 208 return -EINVAL;
206 209
207 if (flags & SOCAM_PCLK_SAMPLE_FALLING) 210 if (flags & SOCAM_PCLK_SAMPLE_FALLING)
208 reg_clear(icd, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000); 211 reg_clear(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
209 else 212 else
210 reg_set(icd, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000); 213 reg_set(client, MT9T031_PIXEL_CLOCK_CONTROL, 0x8000);
211 214
212 return 0; 215 return 0;
213} 216}
@@ -235,6 +238,7 @@ static void recalculate_limits(struct soc_camera_device *icd,
235static int mt9t031_set_params(struct soc_camera_device *icd, 238static int mt9t031_set_params(struct soc_camera_device *icd,
236 struct v4l2_rect *rect, u16 xskip, u16 yskip) 239 struct v4l2_rect *rect, u16 xskip, u16 yskip)
237{ 240{
241 struct i2c_client *client = to_i2c_client(icd->control);
238 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); 242 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
239 int ret; 243 int ret;
240 u16 xbin, ybin, width, height, left, top; 244 u16 xbin, ybin, width, height, left, top;
@@ -277,22 +281,22 @@ static int mt9t031_set_params(struct soc_camera_device *icd,
277 } 281 }
278 282
279 /* Disable register update, reconfigure atomically */ 283 /* Disable register update, reconfigure atomically */
280 ret = reg_set(icd, MT9T031_OUTPUT_CONTROL, 1); 284 ret = reg_set(client, MT9T031_OUTPUT_CONTROL, 1);
281 if (ret < 0) 285 if (ret < 0)
282 return ret; 286 return ret;
283 287
284 /* Blanking and start values - default... */ 288 /* Blanking and start values - default... */
285 ret = reg_write(icd, MT9T031_HORIZONTAL_BLANKING, hblank); 289 ret = reg_write(client, MT9T031_HORIZONTAL_BLANKING, hblank);
286 if (ret >= 0) 290 if (ret >= 0)
287 ret = reg_write(icd, MT9T031_VERTICAL_BLANKING, vblank); 291 ret = reg_write(client, MT9T031_VERTICAL_BLANKING, vblank);
288 292
289 if (yskip != mt9t031->yskip || xskip != mt9t031->xskip) { 293 if (yskip != mt9t031->yskip || xskip != mt9t031->xskip) {
290 /* Binning, skipping */ 294 /* Binning, skipping */
291 if (ret >= 0) 295 if (ret >= 0)
292 ret = reg_write(icd, MT9T031_COLUMN_ADDRESS_MODE, 296 ret = reg_write(client, MT9T031_COLUMN_ADDRESS_MODE,
293 ((xbin - 1) << 4) | (xskip - 1)); 297 ((xbin - 1) << 4) | (xskip - 1));
294 if (ret >= 0) 298 if (ret >= 0)
295 ret = reg_write(icd, MT9T031_ROW_ADDRESS_MODE, 299 ret = reg_write(client, MT9T031_ROW_ADDRESS_MODE,
296 ((ybin - 1) << 4) | (yskip - 1)); 300 ((ybin - 1) << 4) | (yskip - 1));
297 } 301 }
298 dev_dbg(&icd->dev, "new physical left %u, top %u\n", left, top); 302 dev_dbg(&icd->dev, "new physical left %u, top %u\n", left, top);
@@ -300,16 +304,16 @@ static int mt9t031_set_params(struct soc_camera_device *icd,
300 /* The caller provides a supported format, as guaranteed by 304 /* The caller provides a supported format, as guaranteed by
301 * icd->try_fmt_cap(), soc_camera_s_crop() and soc_camera_cropcap() */ 305 * icd->try_fmt_cap(), soc_camera_s_crop() and soc_camera_cropcap() */
302 if (ret >= 0) 306 if (ret >= 0)
303 ret = reg_write(icd, MT9T031_COLUMN_START, left); 307 ret = reg_write(client, MT9T031_COLUMN_START, left);
304 if (ret >= 0) 308 if (ret >= 0)
305 ret = reg_write(icd, MT9T031_ROW_START, top); 309 ret = reg_write(client, MT9T031_ROW_START, top);
306 if (ret >= 0) 310 if (ret >= 0)
307 ret = reg_write(icd, MT9T031_WINDOW_WIDTH, width - 1); 311 ret = reg_write(client, MT9T031_WINDOW_WIDTH, width - 1);
308 if (ret >= 0) 312 if (ret >= 0)
309 ret = reg_write(icd, MT9T031_WINDOW_HEIGHT, 313 ret = reg_write(client, MT9T031_WINDOW_HEIGHT,
310 height + icd->y_skip_top - 1); 314 height + icd->y_skip_top - 1);
311 if (ret >= 0 && mt9t031->autoexposure) { 315 if (ret >= 0 && mt9t031->autoexposure) {
312 ret = set_shutter(icd, height + icd->y_skip_top + vblank); 316 ret = set_shutter(client, height + icd->y_skip_top + vblank);
313 if (ret >= 0) { 317 if (ret >= 0) {
314 const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank; 318 const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank;
315 const struct v4l2_queryctrl *qctrl = 319 const struct v4l2_queryctrl *qctrl =
@@ -324,7 +328,7 @@ static int mt9t031_set_params(struct soc_camera_device *icd,
324 328
325 /* Re-enable register update, commit all changes */ 329 /* Re-enable register update, commit all changes */
326 if (ret >= 0) 330 if (ret >= 0)
327 ret = reg_clear(icd, MT9T031_OUTPUT_CONTROL, 1); 331 ret = reg_clear(client, MT9T031_OUTPUT_CONTROL, 1);
328 332
329 return ret < 0 ? ret : 0; 333 return ret < 0 ? ret : 0;
330} 334}
@@ -417,15 +421,15 @@ static int mt9t031_get_chip_id(struct soc_camera_device *icd,
417static int mt9t031_get_register(struct soc_camera_device *icd, 421static int mt9t031_get_register(struct soc_camera_device *icd,
418 struct v4l2_dbg_register *reg) 422 struct v4l2_dbg_register *reg)
419{ 423{
420 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); 424 struct i2c_client *client = to_i2c_client(icd->control);
421 425
422 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) 426 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
423 return -EINVAL; 427 return -EINVAL;
424 428
425 if (reg->match.addr != mt9t031->client->addr) 429 if (reg->match.addr != client->addr)
426 return -ENODEV; 430 return -ENODEV;
427 431
428 reg->val = reg_read(icd, reg->reg); 432 reg->val = reg_read(client, reg->reg);
429 433
430 if (reg->val > 0xffff) 434 if (reg->val > 0xffff)
431 return -EIO; 435 return -EIO;
@@ -436,15 +440,15 @@ static int mt9t031_get_register(struct soc_camera_device *icd,
436static int mt9t031_set_register(struct soc_camera_device *icd, 440static int mt9t031_set_register(struct soc_camera_device *icd,
437 struct v4l2_dbg_register *reg) 441 struct v4l2_dbg_register *reg)
438{ 442{
439 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); 443 struct i2c_client *client = to_i2c_client(icd->control);
440 444
441 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) 445 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
442 return -EINVAL; 446 return -EINVAL;
443 447
444 if (reg->match.addr != mt9t031->client->addr) 448 if (reg->match.addr != client->addr)
445 return -ENODEV; 449 return -ENODEV;
446 450
447 if (reg_write(icd, reg->reg, reg->val) < 0) 451 if (reg_write(client, reg->reg, reg->val) < 0)
448 return -EIO; 452 return -EIO;
449 453
450 return 0; 454 return 0;
@@ -528,18 +532,19 @@ static struct soc_camera_ops mt9t031_ops = {
528 532
529static int mt9t031_get_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) 533static int mt9t031_get_control(struct soc_camera_device *icd, struct v4l2_control *ctrl)
530{ 534{
535 struct i2c_client *client = to_i2c_client(icd->control);
531 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); 536 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
532 int data; 537 int data;
533 538
534 switch (ctrl->id) { 539 switch (ctrl->id) {
535 case V4L2_CID_VFLIP: 540 case V4L2_CID_VFLIP:
536 data = reg_read(icd, MT9T031_READ_MODE_2); 541 data = reg_read(client, MT9T031_READ_MODE_2);
537 if (data < 0) 542 if (data < 0)
538 return -EIO; 543 return -EIO;
539 ctrl->value = !!(data & 0x8000); 544 ctrl->value = !!(data & 0x8000);
540 break; 545 break;
541 case V4L2_CID_HFLIP: 546 case V4L2_CID_HFLIP:
542 data = reg_read(icd, MT9T031_READ_MODE_2); 547 data = reg_read(client, MT9T031_READ_MODE_2);
543 if (data < 0) 548 if (data < 0)
544 return -EIO; 549 return -EIO;
545 ctrl->value = !!(data & 0x4000); 550 ctrl->value = !!(data & 0x4000);
@@ -553,6 +558,7 @@ static int mt9t031_get_control(struct soc_camera_device *icd, struct v4l2_contro
553 558
554static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_control *ctrl) 559static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_control *ctrl)
555{ 560{
561 struct i2c_client *client = to_i2c_client(icd->control);
556 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); 562 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
557 const struct v4l2_queryctrl *qctrl; 563 const struct v4l2_queryctrl *qctrl;
558 int data; 564 int data;
@@ -565,17 +571,17 @@ static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_contro
565 switch (ctrl->id) { 571 switch (ctrl->id) {
566 case V4L2_CID_VFLIP: 572 case V4L2_CID_VFLIP:
567 if (ctrl->value) 573 if (ctrl->value)
568 data = reg_set(icd, MT9T031_READ_MODE_2, 0x8000); 574 data = reg_set(client, MT9T031_READ_MODE_2, 0x8000);
569 else 575 else
570 data = reg_clear(icd, MT9T031_READ_MODE_2, 0x8000); 576 data = reg_clear(client, MT9T031_READ_MODE_2, 0x8000);
571 if (data < 0) 577 if (data < 0)
572 return -EIO; 578 return -EIO;
573 break; 579 break;
574 case V4L2_CID_HFLIP: 580 case V4L2_CID_HFLIP:
575 if (ctrl->value) 581 if (ctrl->value)
576 data = reg_set(icd, MT9T031_READ_MODE_2, 0x4000); 582 data = reg_set(client, MT9T031_READ_MODE_2, 0x4000);
577 else 583 else
578 data = reg_clear(icd, MT9T031_READ_MODE_2, 0x4000); 584 data = reg_clear(client, MT9T031_READ_MODE_2, 0x4000);
579 if (data < 0) 585 if (data < 0)
580 return -EIO; 586 return -EIO;
581 break; 587 break;
@@ -589,7 +595,7 @@ static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_contro
589 data = ((ctrl->value - qctrl->minimum) * 8 + range / 2) / range; 595 data = ((ctrl->value - qctrl->minimum) * 8 + range / 2) / range;
590 596
591 dev_dbg(&icd->dev, "Setting gain %d\n", data); 597 dev_dbg(&icd->dev, "Setting gain %d\n", data);
592 data = reg_write(icd, MT9T031_GLOBAL_GAIN, data); 598 data = reg_write(client, MT9T031_GLOBAL_GAIN, data);
593 if (data < 0) 599 if (data < 0)
594 return -EIO; 600 return -EIO;
595 } else { 601 } else {
@@ -609,8 +615,8 @@ static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_contro
609 data = (((gain - 64 + 7) * 32) & 0xff00) | 0x60; 615 data = (((gain - 64 + 7) * 32) & 0xff00) | 0x60;
610 616
611 dev_dbg(&icd->dev, "Setting gain from 0x%x to 0x%x\n", 617 dev_dbg(&icd->dev, "Setting gain from 0x%x to 0x%x\n",
612 reg_read(icd, MT9T031_GLOBAL_GAIN), data); 618 reg_read(client, MT9T031_GLOBAL_GAIN), data);
613 data = reg_write(icd, MT9T031_GLOBAL_GAIN, data); 619 data = reg_write(client, MT9T031_GLOBAL_GAIN, data);
614 if (data < 0) 620 if (data < 0)
615 return -EIO; 621 return -EIO;
616 } 622 }
@@ -628,10 +634,10 @@ static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_contro
628 range / 2) / range + 1; 634 range / 2) / range + 1;
629 u32 old; 635 u32 old;
630 636
631 get_shutter(icd, &old); 637 get_shutter(client, &old);
632 dev_dbg(&icd->dev, "Setting shutter width from %u to %u\n", 638 dev_dbg(&icd->dev, "Setting shutter width from %u to %u\n",
633 old, shutter); 639 old, shutter);
634 if (set_shutter(icd, shutter) < 0) 640 if (set_shutter(client, shutter) < 0)
635 return -EIO; 641 return -EIO;
636 icd->exposure = ctrl->value; 642 icd->exposure = ctrl->value;
637 mt9t031->autoexposure = 0; 643 mt9t031->autoexposure = 0;
@@ -641,7 +647,7 @@ static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_contro
641 if (ctrl->value) { 647 if (ctrl->value) {
642 const u16 vblank = MT9T031_VERTICAL_BLANK; 648 const u16 vblank = MT9T031_VERTICAL_BLANK;
643 const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank; 649 const u32 shutter_max = MT9T031_MAX_HEIGHT + vblank;
644 if (set_shutter(icd, icd->height + 650 if (set_shutter(client, icd->height +
645 icd->y_skip_top + vblank) < 0) 651 icd->y_skip_top + vblank) < 0)
646 return -EIO; 652 return -EIO;
647 qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE); 653 qctrl = soc_camera_find_qctrl(icd->ops, V4L2_CID_EXPOSURE);
@@ -661,6 +667,7 @@ static int mt9t031_set_control(struct soc_camera_device *icd, struct v4l2_contro
661 * this wasn't our capture interface, so, we wait for the right one */ 667 * this wasn't our capture interface, so, we wait for the right one */
662static int mt9t031_video_probe(struct soc_camera_device *icd) 668static int mt9t031_video_probe(struct soc_camera_device *icd)
663{ 669{
670 struct i2c_client *client = to_i2c_client(icd->control);
664 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd); 671 struct mt9t031 *mt9t031 = container_of(icd, struct mt9t031, icd);
665 s32 data; 672 s32 data;
666 int ret; 673 int ret;
@@ -672,11 +679,11 @@ static int mt9t031_video_probe(struct soc_camera_device *icd)
672 return -ENODEV; 679 return -ENODEV;
673 680
674 /* Enable the chip */ 681 /* Enable the chip */
675 data = reg_write(icd, MT9T031_CHIP_ENABLE, 1); 682 data = reg_write(client, MT9T031_CHIP_ENABLE, 1);
676 dev_dbg(&icd->dev, "write: %d\n", data); 683 dev_dbg(&icd->dev, "write: %d\n", data);
677 684
678 /* Read out the chip version register */ 685 /* Read out the chip version register */
679 data = reg_read(icd, MT9T031_CHIP_VERSION); 686 data = reg_read(client, MT9T031_CHIP_VERSION);
680 687
681 switch (data) { 688 switch (data) {
682 case 0x1621: 689 case 0x1621:
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index 4d3b4813c322..be20d312b1dc 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -91,51 +91,49 @@ struct mt9v022 {
91 u16 chip_control; 91 u16 chip_control;
92}; 92};
93 93
94static int reg_read(struct soc_camera_device *icd, const u8 reg) 94static int reg_read(struct i2c_client *client, const u8 reg)
95{ 95{
96 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
97 struct i2c_client *client = mt9v022->client;
98 s32 data = i2c_smbus_read_word_data(client, reg); 96 s32 data = i2c_smbus_read_word_data(client, reg);
99 return data < 0 ? data : swab16(data); 97 return data < 0 ? data : swab16(data);
100} 98}
101 99
102static int reg_write(struct soc_camera_device *icd, const u8 reg, 100static int reg_write(struct i2c_client *client, const u8 reg,
103 const u16 data) 101 const u16 data)
104{ 102{
105 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); 103 return i2c_smbus_write_word_data(client, reg, swab16(data));
106 return i2c_smbus_write_word_data(mt9v022->client, reg, swab16(data));
107} 104}
108 105
109static int reg_set(struct soc_camera_device *icd, const u8 reg, 106static int reg_set(struct i2c_client *client, const u8 reg,
110 const u16 data) 107 const u16 data)
111{ 108{
112 int ret; 109 int ret;
113 110
114 ret = reg_read(icd, reg); 111 ret = reg_read(client, reg);
115 if (ret < 0) 112 if (ret < 0)
116 return ret; 113 return ret;
117 return reg_write(icd, reg, ret | data); 114 return reg_write(client, reg, ret | data);
118} 115}
119 116
120static int reg_clear(struct soc_camera_device *icd, const u8 reg, 117static int reg_clear(struct i2c_client *client, const u8 reg,
121 const u16 data) 118 const u16 data)
122{ 119{
123 int ret; 120 int ret;
124 121
125 ret = reg_read(icd, reg); 122 ret = reg_read(client, reg);
126 if (ret < 0) 123 if (ret < 0)
127 return ret; 124 return ret;
128 return reg_write(icd, reg, ret & ~data); 125 return reg_write(client, reg, ret & ~data);
129} 126}
130 127
131static int mt9v022_init(struct soc_camera_device *icd) 128static int mt9v022_init(struct soc_camera_device *icd)
132{ 129{
130 struct i2c_client *client = to_i2c_client(icd->control);
133 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); 131 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
134 struct soc_camera_link *icl = mt9v022->client->dev.platform_data; 132 struct soc_camera_link *icl = client->dev.platform_data;
135 int ret; 133 int ret;
136 134
137 if (icl->power) { 135 if (icl->power) {
138 ret = icl->power(&mt9v022->client->dev, 1); 136 ret = icl->power(&client->dev, 1);
139 if (ret < 0) { 137 if (ret < 0) {
140 dev_err(icd->vdev->parent, 138 dev_err(icd->vdev->parent,
141 "Platform failed to power-on the camera.\n"); 139 "Platform failed to power-on the camera.\n");
@@ -148,27 +146,27 @@ static int mt9v022_init(struct soc_camera_device *icd)
148 * if available. Soft reset is done in video_probe(). 146 * if available. Soft reset is done in video_probe().
149 */ 147 */
150 if (icl->reset) 148 if (icl->reset)
151 icl->reset(&mt9v022->client->dev); 149 icl->reset(&client->dev);
152 150
153 /* Almost the default mode: master, parallel, simultaneous, and an 151 /* Almost the default mode: master, parallel, simultaneous, and an
154 * undocumented bit 0x200, which is present in table 7, but not in 8, 152 * undocumented bit 0x200, which is present in table 7, but not in 8,
155 * plus snapshot mode to disable scan for now */ 153 * plus snapshot mode to disable scan for now */
156 mt9v022->chip_control |= 0x10; 154 mt9v022->chip_control |= 0x10;
157 ret = reg_write(icd, MT9V022_CHIP_CONTROL, mt9v022->chip_control); 155 ret = reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control);
158 if (!ret) 156 if (!ret)
159 ret = reg_write(icd, MT9V022_READ_MODE, 0x300); 157 ret = reg_write(client, MT9V022_READ_MODE, 0x300);
160 158
161 /* All defaults */ 159 /* All defaults */
162 if (!ret) 160 if (!ret)
163 /* AEC, AGC on */ 161 /* AEC, AGC on */
164 ret = reg_set(icd, MT9V022_AEC_AGC_ENABLE, 0x3); 162 ret = reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x3);
165 if (!ret) 163 if (!ret)
166 ret = reg_write(icd, MT9V022_MAX_TOTAL_SHUTTER_WIDTH, 480); 164 ret = reg_write(client, MT9V022_MAX_TOTAL_SHUTTER_WIDTH, 480);
167 if (!ret) 165 if (!ret)
168 /* default - auto */ 166 /* default - auto */
169 ret = reg_clear(icd, MT9V022_BLACK_LEVEL_CALIB_CTRL, 1); 167 ret = reg_clear(client, MT9V022_BLACK_LEVEL_CALIB_CTRL, 1);
170 if (!ret) 168 if (!ret)
171 ret = reg_write(icd, MT9V022_DIGITAL_TEST_PATTERN, 0); 169 ret = reg_write(client, MT9V022_DIGITAL_TEST_PATTERN, 0);
172 170
173 return ret; 171 return ret;
174} 172}
@@ -186,10 +184,11 @@ static int mt9v022_release(struct soc_camera_device *icd)
186 184
187static int mt9v022_start_capture(struct soc_camera_device *icd) 185static int mt9v022_start_capture(struct soc_camera_device *icd)
188{ 186{
187 struct i2c_client *client = to_i2c_client(icd->control);
189 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); 188 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
190 /* Switch to master "normal" mode */ 189 /* Switch to master "normal" mode */
191 mt9v022->chip_control &= ~0x10; 190 mt9v022->chip_control &= ~0x10;
192 if (reg_write(icd, MT9V022_CHIP_CONTROL, 191 if (reg_write(client, MT9V022_CHIP_CONTROL,
193 mt9v022->chip_control) < 0) 192 mt9v022->chip_control) < 0)
194 return -EIO; 193 return -EIO;
195 return 0; 194 return 0;
@@ -197,10 +196,11 @@ static int mt9v022_start_capture(struct soc_camera_device *icd)
197 196
198static int mt9v022_stop_capture(struct soc_camera_device *icd) 197static int mt9v022_stop_capture(struct soc_camera_device *icd)
199{ 198{
199 struct i2c_client *client = to_i2c_client(icd->control);
200 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); 200 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
201 /* Switch to snapshot mode */ 201 /* Switch to snapshot mode */
202 mt9v022->chip_control |= 0x10; 202 mt9v022->chip_control |= 0x10;
203 if (reg_write(icd, MT9V022_CHIP_CONTROL, 203 if (reg_write(client, MT9V022_CHIP_CONTROL,
204 mt9v022->chip_control) < 0) 204 mt9v022->chip_control) < 0)
205 return -EIO; 205 return -EIO;
206 return 0; 206 return 0;
@@ -209,8 +209,9 @@ static int mt9v022_stop_capture(struct soc_camera_device *icd)
209static int mt9v022_set_bus_param(struct soc_camera_device *icd, 209static int mt9v022_set_bus_param(struct soc_camera_device *icd,
210 unsigned long flags) 210 unsigned long flags)
211{ 211{
212 struct i2c_client *client = to_i2c_client(icd->control);
212 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); 213 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
213 struct soc_camera_link *icl = mt9v022->client->dev.platform_data; 214 struct soc_camera_link *icl = client->dev.platform_data;
214 unsigned int width_flag = flags & SOCAM_DATAWIDTH_MASK; 215 unsigned int width_flag = flags & SOCAM_DATAWIDTH_MASK;
215 int ret; 216 int ret;
216 u16 pixclk = 0; 217 u16 pixclk = 0;
@@ -243,14 +244,14 @@ static int mt9v022_set_bus_param(struct soc_camera_device *icd,
243 if (!(flags & SOCAM_VSYNC_ACTIVE_HIGH)) 244 if (!(flags & SOCAM_VSYNC_ACTIVE_HIGH))
244 pixclk |= 0x2; 245 pixclk |= 0x2;
245 246
246 ret = reg_write(icd, MT9V022_PIXCLK_FV_LV, pixclk); 247 ret = reg_write(client, MT9V022_PIXCLK_FV_LV, pixclk);
247 if (ret < 0) 248 if (ret < 0)
248 return ret; 249 return ret;
249 250
250 if (!(flags & SOCAM_MASTER)) 251 if (!(flags & SOCAM_MASTER))
251 mt9v022->chip_control &= ~0x8; 252 mt9v022->chip_control &= ~0x8;
252 253
253 ret = reg_write(icd, MT9V022_CHIP_CONTROL, mt9v022->chip_control); 254 ret = reg_write(client, MT9V022_CHIP_CONTROL, mt9v022->chip_control);
254 if (ret < 0) 255 if (ret < 0)
255 return ret; 256 return ret;
256 257
@@ -282,35 +283,36 @@ static unsigned long mt9v022_query_bus_param(struct soc_camera_device *icd)
282static int mt9v022_set_crop(struct soc_camera_device *icd, 283static int mt9v022_set_crop(struct soc_camera_device *icd,
283 struct v4l2_rect *rect) 284 struct v4l2_rect *rect)
284{ 285{
286 struct i2c_client *client = to_i2c_client(icd->control);
285 int ret; 287 int ret;
286 288
287 /* Like in example app. Contradicts the datasheet though */ 289 /* Like in example app. Contradicts the datasheet though */
288 ret = reg_read(icd, MT9V022_AEC_AGC_ENABLE); 290 ret = reg_read(client, MT9V022_AEC_AGC_ENABLE);
289 if (ret >= 0) { 291 if (ret >= 0) {
290 if (ret & 1) /* Autoexposure */ 292 if (ret & 1) /* Autoexposure */
291 ret = reg_write(icd, MT9V022_MAX_TOTAL_SHUTTER_WIDTH, 293 ret = reg_write(client, MT9V022_MAX_TOTAL_SHUTTER_WIDTH,
292 rect->height + icd->y_skip_top + 43); 294 rect->height + icd->y_skip_top + 43);
293 else 295 else
294 ret = reg_write(icd, MT9V022_TOTAL_SHUTTER_WIDTH, 296 ret = reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH,
295 rect->height + icd->y_skip_top + 43); 297 rect->height + icd->y_skip_top + 43);
296 } 298 }
297 /* Setup frame format: defaults apart from width and height */ 299 /* Setup frame format: defaults apart from width and height */
298 if (!ret) 300 if (!ret)
299 ret = reg_write(icd, MT9V022_COLUMN_START, rect->left); 301 ret = reg_write(client, MT9V022_COLUMN_START, rect->left);
300 if (!ret) 302 if (!ret)
301 ret = reg_write(icd, MT9V022_ROW_START, rect->top); 303 ret = reg_write(client, MT9V022_ROW_START, rect->top);
302 if (!ret) 304 if (!ret)
303 /* Default 94, Phytec driver says: 305 /* Default 94, Phytec driver says:
304 * "width + horizontal blank >= 660" */ 306 * "width + horizontal blank >= 660" */
305 ret = reg_write(icd, MT9V022_HORIZONTAL_BLANKING, 307 ret = reg_write(client, MT9V022_HORIZONTAL_BLANKING,
306 rect->width > 660 - 43 ? 43 : 308 rect->width > 660 - 43 ? 43 :
307 660 - rect->width); 309 660 - rect->width);
308 if (!ret) 310 if (!ret)
309 ret = reg_write(icd, MT9V022_VERTICAL_BLANKING, 45); 311 ret = reg_write(client, MT9V022_VERTICAL_BLANKING, 45);
310 if (!ret) 312 if (!ret)
311 ret = reg_write(icd, MT9V022_WINDOW_WIDTH, rect->width); 313 ret = reg_write(client, MT9V022_WINDOW_WIDTH, rect->width);
312 if (!ret) 314 if (!ret)
313 ret = reg_write(icd, MT9V022_WINDOW_HEIGHT, 315 ret = reg_write(client, MT9V022_WINDOW_HEIGHT,
314 rect->height + icd->y_skip_top); 316 rect->height + icd->y_skip_top);
315 317
316 if (ret < 0) 318 if (ret < 0)
@@ -396,16 +398,16 @@ static int mt9v022_get_chip_id(struct soc_camera_device *icd,
396static int mt9v022_get_register(struct soc_camera_device *icd, 398static int mt9v022_get_register(struct soc_camera_device *icd,
397 struct v4l2_dbg_register *reg) 399 struct v4l2_dbg_register *reg)
398{ 400{
399 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); 401 struct i2c_client *client = to_i2c_client(icd->control);
400 402
401 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) 403 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
402 return -EINVAL; 404 return -EINVAL;
403 405
404 if (reg->match.addr != mt9v022->client->addr) 406 if (reg->match.addr != client->addr)
405 return -ENODEV; 407 return -ENODEV;
406 408
407 reg->size = 2; 409 reg->size = 2;
408 reg->val = reg_read(icd, reg->reg); 410 reg->val = reg_read(client, reg->reg);
409 411
410 if (reg->val > 0xffff) 412 if (reg->val > 0xffff)
411 return -EIO; 413 return -EIO;
@@ -416,15 +418,15 @@ static int mt9v022_get_register(struct soc_camera_device *icd,
416static int mt9v022_set_register(struct soc_camera_device *icd, 418static int mt9v022_set_register(struct soc_camera_device *icd,
417 struct v4l2_dbg_register *reg) 419 struct v4l2_dbg_register *reg)
418{ 420{
419 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); 421 struct i2c_client *client = to_i2c_client(icd->control);
420 422
421 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) 423 if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff)
422 return -EINVAL; 424 return -EINVAL;
423 425
424 if (reg->match.addr != mt9v022->client->addr) 426 if (reg->match.addr != client->addr)
425 return -ENODEV; 427 return -ENODEV;
426 428
427 if (reg_write(icd, reg->reg, reg->val) < 0) 429 if (reg_write(client, reg->reg, reg->val) < 0)
428 return -EIO; 430 return -EIO;
429 431
430 return 0; 432 return 0;
@@ -517,29 +519,30 @@ static struct soc_camera_ops mt9v022_ops = {
517static int mt9v022_get_control(struct soc_camera_device *icd, 519static int mt9v022_get_control(struct soc_camera_device *icd,
518 struct v4l2_control *ctrl) 520 struct v4l2_control *ctrl)
519{ 521{
522 struct i2c_client *client = to_i2c_client(icd->control);
520 int data; 523 int data;
521 524
522 switch (ctrl->id) { 525 switch (ctrl->id) {
523 case V4L2_CID_VFLIP: 526 case V4L2_CID_VFLIP:
524 data = reg_read(icd, MT9V022_READ_MODE); 527 data = reg_read(client, MT9V022_READ_MODE);
525 if (data < 0) 528 if (data < 0)
526 return -EIO; 529 return -EIO;
527 ctrl->value = !!(data & 0x10); 530 ctrl->value = !!(data & 0x10);
528 break; 531 break;
529 case V4L2_CID_HFLIP: 532 case V4L2_CID_HFLIP:
530 data = reg_read(icd, MT9V022_READ_MODE); 533 data = reg_read(client, MT9V022_READ_MODE);
531 if (data < 0) 534 if (data < 0)
532 return -EIO; 535 return -EIO;
533 ctrl->value = !!(data & 0x20); 536 ctrl->value = !!(data & 0x20);
534 break; 537 break;
535 case V4L2_CID_EXPOSURE_AUTO: 538 case V4L2_CID_EXPOSURE_AUTO:
536 data = reg_read(icd, MT9V022_AEC_AGC_ENABLE); 539 data = reg_read(client, MT9V022_AEC_AGC_ENABLE);
537 if (data < 0) 540 if (data < 0)
538 return -EIO; 541 return -EIO;
539 ctrl->value = !!(data & 0x1); 542 ctrl->value = !!(data & 0x1);
540 break; 543 break;
541 case V4L2_CID_AUTOGAIN: 544 case V4L2_CID_AUTOGAIN:
542 data = reg_read(icd, MT9V022_AEC_AGC_ENABLE); 545 data = reg_read(client, MT9V022_AEC_AGC_ENABLE);
543 if (data < 0) 546 if (data < 0)
544 return -EIO; 547 return -EIO;
545 ctrl->value = !!(data & 0x2); 548 ctrl->value = !!(data & 0x2);
@@ -552,6 +555,7 @@ static int mt9v022_set_control(struct soc_camera_device *icd,
552 struct v4l2_control *ctrl) 555 struct v4l2_control *ctrl)
553{ 556{
554 int data; 557 int data;
558 struct i2c_client *client = to_i2c_client(icd->control);
555 const struct v4l2_queryctrl *qctrl; 559 const struct v4l2_queryctrl *qctrl;
556 560
557 qctrl = soc_camera_find_qctrl(&mt9v022_ops, ctrl->id); 561 qctrl = soc_camera_find_qctrl(&mt9v022_ops, ctrl->id);
@@ -562,17 +566,17 @@ static int mt9v022_set_control(struct soc_camera_device *icd,
562 switch (ctrl->id) { 566 switch (ctrl->id) {
563 case V4L2_CID_VFLIP: 567 case V4L2_CID_VFLIP:
564 if (ctrl->value) 568 if (ctrl->value)
565 data = reg_set(icd, MT9V022_READ_MODE, 0x10); 569 data = reg_set(client, MT9V022_READ_MODE, 0x10);
566 else 570 else
567 data = reg_clear(icd, MT9V022_READ_MODE, 0x10); 571 data = reg_clear(client, MT9V022_READ_MODE, 0x10);
568 if (data < 0) 572 if (data < 0)
569 return -EIO; 573 return -EIO;
570 break; 574 break;
571 case V4L2_CID_HFLIP: 575 case V4L2_CID_HFLIP:
572 if (ctrl->value) 576 if (ctrl->value)
573 data = reg_set(icd, MT9V022_READ_MODE, 0x20); 577 data = reg_set(client, MT9V022_READ_MODE, 0x20);
574 else 578 else
575 data = reg_clear(icd, MT9V022_READ_MODE, 0x20); 579 data = reg_clear(client, MT9V022_READ_MODE, 0x20);
576 if (data < 0) 580 if (data < 0)
577 return -EIO; 581 return -EIO;
578 break; 582 break;
@@ -593,12 +597,12 @@ static int mt9v022_set_control(struct soc_camera_device *icd,
593 /* The user wants to set gain manually, hope, she 597 /* The user wants to set gain manually, hope, she
594 * knows, what she's doing... Switch AGC off. */ 598 * knows, what she's doing... Switch AGC off. */
595 599
596 if (reg_clear(icd, MT9V022_AEC_AGC_ENABLE, 0x2) < 0) 600 if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x2) < 0)
597 return -EIO; 601 return -EIO;
598 602
599 dev_info(&icd->dev, "Setting gain from %d to %lu\n", 603 dev_info(&icd->dev, "Setting gain from %d to %lu\n",
600 reg_read(icd, MT9V022_ANALOG_GAIN), gain); 604 reg_read(client, MT9V022_ANALOG_GAIN), gain);
601 if (reg_write(icd, MT9V022_ANALOG_GAIN, gain) < 0) 605 if (reg_write(client, MT9V022_ANALOG_GAIN, gain) < 0)
602 return -EIO; 606 return -EIO;
603 icd->gain = ctrl->value; 607 icd->gain = ctrl->value;
604 } 608 }
@@ -614,13 +618,13 @@ static int mt9v022_set_control(struct soc_camera_device *icd,
614 /* The user wants to set shutter width manually, hope, 618 /* The user wants to set shutter width manually, hope,
615 * she knows, what she's doing... Switch AEC off. */ 619 * she knows, what she's doing... Switch AEC off. */
616 620
617 if (reg_clear(icd, MT9V022_AEC_AGC_ENABLE, 0x1) < 0) 621 if (reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x1) < 0)
618 return -EIO; 622 return -EIO;
619 623
620 dev_dbg(&icd->dev, "Shutter width from %d to %lu\n", 624 dev_dbg(&icd->dev, "Shutter width from %d to %lu\n",
621 reg_read(icd, MT9V022_TOTAL_SHUTTER_WIDTH), 625 reg_read(client, MT9V022_TOTAL_SHUTTER_WIDTH),
622 shutter); 626 shutter);
623 if (reg_write(icd, MT9V022_TOTAL_SHUTTER_WIDTH, 627 if (reg_write(client, MT9V022_TOTAL_SHUTTER_WIDTH,
624 shutter) < 0) 628 shutter) < 0)
625 return -EIO; 629 return -EIO;
626 icd->exposure = ctrl->value; 630 icd->exposure = ctrl->value;
@@ -628,17 +632,17 @@ static int mt9v022_set_control(struct soc_camera_device *icd,
628 break; 632 break;
629 case V4L2_CID_AUTOGAIN: 633 case V4L2_CID_AUTOGAIN:
630 if (ctrl->value) 634 if (ctrl->value)
631 data = reg_set(icd, MT9V022_AEC_AGC_ENABLE, 0x2); 635 data = reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x2);
632 else 636 else
633 data = reg_clear(icd, MT9V022_AEC_AGC_ENABLE, 0x2); 637 data = reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x2);
634 if (data < 0) 638 if (data < 0)
635 return -EIO; 639 return -EIO;
636 break; 640 break;
637 case V4L2_CID_EXPOSURE_AUTO: 641 case V4L2_CID_EXPOSURE_AUTO:
638 if (ctrl->value) 642 if (ctrl->value)
639 data = reg_set(icd, MT9V022_AEC_AGC_ENABLE, 0x1); 643 data = reg_set(client, MT9V022_AEC_AGC_ENABLE, 0x1);
640 else 644 else
641 data = reg_clear(icd, MT9V022_AEC_AGC_ENABLE, 0x1); 645 data = reg_clear(client, MT9V022_AEC_AGC_ENABLE, 0x1);
642 if (data < 0) 646 if (data < 0)
643 return -EIO; 647 return -EIO;
644 break; 648 break;
@@ -650,8 +654,9 @@ static int mt9v022_set_control(struct soc_camera_device *icd,
650 * this wasn't our capture interface, so, we wait for the right one */ 654 * this wasn't our capture interface, so, we wait for the right one */
651static int mt9v022_video_probe(struct soc_camera_device *icd) 655static int mt9v022_video_probe(struct soc_camera_device *icd)
652{ 656{
657 struct i2c_client *client = to_i2c_client(icd->control);
653 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); 658 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
654 struct soc_camera_link *icl = mt9v022->client->dev.platform_data; 659 struct soc_camera_link *icl = client->dev.platform_data;
655 s32 data; 660 s32 data;
656 int ret; 661 int ret;
657 unsigned long flags; 662 unsigned long flags;
@@ -661,7 +666,7 @@ static int mt9v022_video_probe(struct soc_camera_device *icd)
661 return -ENODEV; 666 return -ENODEV;
662 667
663 /* Read out the chip version register */ 668 /* Read out the chip version register */
664 data = reg_read(icd, MT9V022_CHIP_VERSION); 669 data = reg_read(client, MT9V022_CHIP_VERSION);
665 670
666 /* must be 0x1311 or 0x1313 */ 671 /* must be 0x1311 or 0x1313 */
667 if (data != 0x1311 && data != 0x1313) { 672 if (data != 0x1311 && data != 0x1313) {
@@ -672,12 +677,12 @@ static int mt9v022_video_probe(struct soc_camera_device *icd)
672 } 677 }
673 678
674 /* Soft reset */ 679 /* Soft reset */
675 ret = reg_write(icd, MT9V022_RESET, 1); 680 ret = reg_write(client, MT9V022_RESET, 1);
676 if (ret < 0) 681 if (ret < 0)
677 goto ei2c; 682 goto ei2c;
678 /* 15 clock cycles */ 683 /* 15 clock cycles */
679 udelay(200); 684 udelay(200);
680 if (reg_read(icd, MT9V022_RESET)) { 685 if (reg_read(client, MT9V022_RESET)) {
681 dev_err(&icd->dev, "Resetting MT9V022 failed!\n"); 686 dev_err(&icd->dev, "Resetting MT9V022 failed!\n");
682 goto ei2c; 687 goto ei2c;
683 } 688 }
@@ -685,11 +690,11 @@ static int mt9v022_video_probe(struct soc_camera_device *icd)
685 /* Set monochrome or colour sensor type */ 690 /* Set monochrome or colour sensor type */
686 if (sensor_type && (!strcmp("colour", sensor_type) || 691 if (sensor_type && (!strcmp("colour", sensor_type) ||
687 !strcmp("color", sensor_type))) { 692 !strcmp("color", sensor_type))) {
688 ret = reg_write(icd, MT9V022_PIXEL_OPERATION_MODE, 4 | 0x11); 693 ret = reg_write(client, MT9V022_PIXEL_OPERATION_MODE, 4 | 0x11);
689 mt9v022->model = V4L2_IDENT_MT9V022IX7ATC; 694 mt9v022->model = V4L2_IDENT_MT9V022IX7ATC;
690 icd->formats = mt9v022_colour_formats; 695 icd->formats = mt9v022_colour_formats;
691 } else { 696 } else {
692 ret = reg_write(icd, MT9V022_PIXEL_OPERATION_MODE, 0x11); 697 ret = reg_write(client, MT9V022_PIXEL_OPERATION_MODE, 0x11);
693 mt9v022->model = V4L2_IDENT_MT9V022IX7ATM; 698 mt9v022->model = V4L2_IDENT_MT9V022IX7ATM;
694 icd->formats = mt9v022_monochrome_formats; 699 icd->formats = mt9v022_monochrome_formats;
695 } 700 }
@@ -735,10 +740,13 @@ ei2c:
735static void mt9v022_video_remove(struct soc_camera_device *icd) 740static void mt9v022_video_remove(struct soc_camera_device *icd)
736{ 741{
737 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd); 742 struct mt9v022 *mt9v022 = container_of(icd, struct mt9v022, icd);
743 struct soc_camera_link *icl = mt9v022->client->dev.platform_data;
738 744
739 dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", mt9v022->client->addr, 745 dev_dbg(&icd->dev, "Video %x removed: %p, %p\n", mt9v022->client->addr,
740 icd->dev.parent, icd->vdev); 746 icd->dev.parent, icd->vdev);
741 soc_camera_video_stop(icd); 747 soc_camera_video_stop(icd);
748 if (icl->free_bus)
749 icl->free_bus(icl);
742} 750}
743 751
744static int mt9v022_probe(struct i2c_client *client, 752static int mt9v022_probe(struct i2c_client *client,
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 86fab56c5a20..2d075205bdfe 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -102,10 +102,10 @@ struct mx1_buffer {
102 * Interface. If anyone ever builds hardware to enable more than 102 * Interface. If anyone ever builds hardware to enable more than
103 * one camera, they will have to modify this driver too */ 103 * one camera, they will have to modify this driver too */
104struct mx1_camera_dev { 104struct mx1_camera_dev {
105 struct soc_camera_host soc_host;
105 struct soc_camera_device *icd; 106 struct soc_camera_device *icd;
106 struct mx1_camera_pdata *pdata; 107 struct mx1_camera_pdata *pdata;
107 struct mx1_buffer *active; 108 struct mx1_buffer *active;
108 struct device *dev;
109 struct resource *res; 109 struct resource *res;
110 struct clk *clk; 110 struct clk *clk;
111 struct list_head capture; 111 struct list_head capture;
@@ -219,7 +219,7 @@ static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev)
219 int ret; 219 int ret;
220 220
221 if (unlikely(!pcdev->active)) { 221 if (unlikely(!pcdev->active)) {
222 dev_err(pcdev->dev, "DMA End IRQ with no active buffer\n"); 222 dev_err(pcdev->soc_host.dev, "DMA End IRQ with no active buffer\n");
223 return -EFAULT; 223 return -EFAULT;
224 } 224 }
225 225
@@ -229,7 +229,7 @@ static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev)
229 vbuf->size, pcdev->res->start + 229 vbuf->size, pcdev->res->start +
230 CSIRXR, DMA_MODE_READ); 230 CSIRXR, DMA_MODE_READ);
231 if (unlikely(ret)) 231 if (unlikely(ret))
232 dev_err(pcdev->dev, "Failed to setup DMA sg list\n"); 232 dev_err(pcdev->soc_host.dev, "Failed to setup DMA sg list\n");
233 233
234 return ret; 234 return ret;
235} 235}
@@ -338,14 +338,14 @@ static void mx1_camera_dma_irq(int channel, void *data)
338 imx_dma_disable(channel); 338 imx_dma_disable(channel);
339 339
340 if (unlikely(!pcdev->active)) { 340 if (unlikely(!pcdev->active)) {
341 dev_err(pcdev->dev, "DMA End IRQ with no active buffer\n"); 341 dev_err(pcdev->soc_host.dev, "DMA End IRQ with no active buffer\n");
342 goto out; 342 goto out;
343 } 343 }
344 344
345 vb = &pcdev->active->vb; 345 vb = &pcdev->active->vb;
346 buf = container_of(vb, struct mx1_buffer, vb); 346 buf = container_of(vb, struct mx1_buffer, vb);
347 WARN_ON(buf->inwork || list_empty(&vb->queue)); 347 WARN_ON(buf->inwork || list_empty(&vb->queue));
348 dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, 348 dev_dbg(pcdev->soc_host.dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
349 vb, vb->baddr, vb->bsize); 349 vb, vb->baddr, vb->bsize);
350 350
351 mx1_camera_wakeup(pcdev, vb, buf); 351 mx1_camera_wakeup(pcdev, vb, buf);
@@ -366,7 +366,7 @@ static void mx1_camera_init_videobuf(struct videobuf_queue *q,
366 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 366 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
367 struct mx1_camera_dev *pcdev = ici->priv; 367 struct mx1_camera_dev *pcdev = ici->priv;
368 368
369 videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, pcdev->dev, 369 videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, ici->dev,
370 &pcdev->lock, 370 &pcdev->lock,
371 V4L2_BUF_TYPE_VIDEO_CAPTURE, 371 V4L2_BUF_TYPE_VIDEO_CAPTURE,
372 V4L2_FIELD_NONE, 372 V4L2_FIELD_NONE,
@@ -385,7 +385,7 @@ static int mclk_get_divisor(struct mx1_camera_dev *pcdev)
385 * they get a nice Oops */ 385 * they get a nice Oops */
386 div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1; 386 div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1;
387 387
388 dev_dbg(pcdev->dev, "System clock %lukHz, target freq %dkHz, " 388 dev_dbg(pcdev->soc_host.dev, "System clock %lukHz, target freq %dkHz, "
389 "divisor %lu\n", lcdclk / 1000, mclk / 1000, div); 389 "divisor %lu\n", lcdclk / 1000, mclk / 1000, div);
390 390
391 return div; 391 return div;
@@ -395,7 +395,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev)
395{ 395{
396 unsigned int csicr1 = CSICR1_EN; 396 unsigned int csicr1 = CSICR1_EN;
397 397
398 dev_dbg(pcdev->dev, "Activate device\n"); 398 dev_dbg(pcdev->soc_host.dev, "Activate device\n");
399 399
400 clk_enable(pcdev->clk); 400 clk_enable(pcdev->clk);
401 401
@@ -411,7 +411,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev)
411 411
412static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev) 412static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev)
413{ 413{
414 dev_dbg(pcdev->dev, "Deactivate device\n"); 414 dev_dbg(pcdev->soc_host.dev, "Deactivate device\n");
415 415
416 /* Disable all CSI interface */ 416 /* Disable all CSI interface */
417 __raw_writel(0x00, pcdev->base + CSICR1); 417 __raw_writel(0x00, pcdev->base + CSICR1);
@@ -550,7 +550,7 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd,
550 550
551 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); 551 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
552 if (!xlate) { 552 if (!xlate) {
553 dev_warn(&ici->dev, "Format %x not found\n", pix->pixelformat); 553 dev_warn(ici->dev, "Format %x not found\n", pix->pixelformat);
554 return -EINVAL; 554 return -EINVAL;
555 } 555 }
556 556
@@ -633,12 +633,6 @@ static struct soc_camera_host_ops mx1_soc_camera_host_ops = {
633 .querycap = mx1_camera_querycap, 633 .querycap = mx1_camera_querycap,
634}; 634};
635 635
636/* Should be allocated dynamically too, but we have only one. */
637static struct soc_camera_host mx1_soc_camera_host = {
638 .drv_name = DRIVER_NAME,
639 .ops = &mx1_soc_camera_host_ops,
640};
641
642static struct fiq_handler fh = { 636static struct fiq_handler fh = {
643 .name = "csi_sof" 637 .name = "csi_sof"
644}; 638};
@@ -673,7 +667,6 @@ static int __init mx1_camera_probe(struct platform_device *pdev)
673 goto exit_put_clk; 667 goto exit_put_clk;
674 } 668 }
675 669
676 dev_set_drvdata(&pdev->dev, pcdev);
677 pcdev->res = res; 670 pcdev->res = res;
678 pcdev->clk = clk; 671 pcdev->clk = clk;
679 672
@@ -707,16 +700,15 @@ static int __init mx1_camera_probe(struct platform_device *pdev)
707 } 700 }
708 pcdev->irq = irq; 701 pcdev->irq = irq;
709 pcdev->base = base; 702 pcdev->base = base;
710 pcdev->dev = &pdev->dev;
711 703
712 /* request dma */ 704 /* request dma */
713 pcdev->dma_chan = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_HIGH); 705 pcdev->dma_chan = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_HIGH);
714 if (pcdev->dma_chan < 0) { 706 if (pcdev->dma_chan < 0) {
715 dev_err(pcdev->dev, "Can't request DMA for MX1 CSI\n"); 707 dev_err(&pdev->dev, "Can't request DMA for MX1 CSI\n");
716 err = -EBUSY; 708 err = -EBUSY;
717 goto exit_iounmap; 709 goto exit_iounmap;
718 } 710 }
719 dev_dbg(pcdev->dev, "got DMA channel %d\n", pcdev->dma_chan); 711 dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chan);
720 712
721 imx_dma_setup_handlers(pcdev->dma_chan, mx1_camera_dma_irq, NULL, 713 imx_dma_setup_handlers(pcdev->dma_chan, mx1_camera_dma_irq, NULL,
722 pcdev); 714 pcdev);
@@ -729,7 +721,7 @@ static int __init mx1_camera_probe(struct platform_device *pdev)
729 /* request irq */ 721 /* request irq */
730 err = claim_fiq(&fh); 722 err = claim_fiq(&fh);
731 if (err) { 723 if (err) {
732 dev_err(pcdev->dev, "Camera interrupt register failed \n"); 724 dev_err(&pdev->dev, "Camera interrupt register failed \n");
733 goto exit_free_dma; 725 goto exit_free_dma;
734 } 726 }
735 727
@@ -746,10 +738,12 @@ static int __init mx1_camera_probe(struct platform_device *pdev)
746 mxc_set_irq_fiq(irq, 1); 738 mxc_set_irq_fiq(irq, 1);
747 enable_fiq(irq); 739 enable_fiq(irq);
748 740
749 mx1_soc_camera_host.priv = pcdev; 741 pcdev->soc_host.drv_name = DRIVER_NAME;
750 mx1_soc_camera_host.dev.parent = &pdev->dev; 742 pcdev->soc_host.ops = &mx1_soc_camera_host_ops;
751 mx1_soc_camera_host.nr = pdev->id; 743 pcdev->soc_host.priv = pcdev;
752 err = soc_camera_host_register(&mx1_soc_camera_host); 744 pcdev->soc_host.dev = &pdev->dev;
745 pcdev->soc_host.nr = pdev->id;
746 err = soc_camera_host_register(&pcdev->soc_host);
753 if (err) 747 if (err)
754 goto exit_free_irq; 748 goto exit_free_irq;
755 749
@@ -777,7 +771,9 @@ exit:
777 771
778static int __exit mx1_camera_remove(struct platform_device *pdev) 772static int __exit mx1_camera_remove(struct platform_device *pdev)
779{ 773{
780 struct mx1_camera_dev *pcdev = platform_get_drvdata(pdev); 774 struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
775 struct mx1_camera_dev *pcdev = container_of(soc_host,
776 struct mx1_camera_dev, soc_host);
781 struct resource *res; 777 struct resource *res;
782 778
783 imx_dma_free(pcdev->dma_chan); 779 imx_dma_free(pcdev->dma_chan);
@@ -787,7 +783,7 @@ static int __exit mx1_camera_remove(struct platform_device *pdev)
787 783
788 clk_put(pcdev->clk); 784 clk_put(pcdev->clk);
789 785
790 soc_camera_host_unregister(&mx1_soc_camera_host); 786 soc_camera_host_unregister(soc_host);
791 787
792 iounmap(pcdev->base); 788 iounmap(pcdev->base);
793 789
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index 2d0781118eb0..e605c076ed89 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -87,7 +87,6 @@ struct mx3_camera_buffer {
87 * @soc_host: embedded soc_host object 87 * @soc_host: embedded soc_host object
88 */ 88 */
89struct mx3_camera_dev { 89struct mx3_camera_dev {
90 struct device *dev;
91 /* 90 /*
92 * i.MX3x is only supposed to handle one camera on its Camera Sensor 91 * i.MX3x is only supposed to handle one camera on its Camera Sensor
93 * Interface. If anyone ever builds hardware to enable more than one 92 * Interface. If anyone ever builds hardware to enable more than one
@@ -431,7 +430,7 @@ static void mx3_camera_init_videobuf(struct videobuf_queue *q,
431 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 430 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
432 struct mx3_camera_dev *mx3_cam = ici->priv; 431 struct mx3_camera_dev *mx3_cam = ici->priv;
433 432
434 videobuf_queue_dma_contig_init(q, &mx3_videobuf_ops, mx3_cam->dev, 433 videobuf_queue_dma_contig_init(q, &mx3_videobuf_ops, ici->dev,
435 &mx3_cam->lock, 434 &mx3_cam->lock,
436 V4L2_BUF_TYPE_VIDEO_CAPTURE, 435 V4L2_BUF_TYPE_VIDEO_CAPTURE,
437 V4L2_FIELD_NONE, 436 V4L2_FIELD_NONE,
@@ -599,7 +598,8 @@ static int test_platform_param(struct mx3_camera_dev *mx3_cam,
599 *flags |= SOCAM_DATAWIDTH_4; 598 *flags |= SOCAM_DATAWIDTH_4;
600 break; 599 break;
601 default: 600 default:
602 dev_info(mx3_cam->dev, "Unsupported bus width %d\n", buswidth); 601 dev_info(mx3_cam->soc_host.dev, "Unsupported bus width %d\n",
602 buswidth);
603 return -EINVAL; 603 return -EINVAL;
604 } 604 }
605 605
@@ -614,7 +614,7 @@ static int mx3_camera_try_bus_param(struct soc_camera_device *icd,
614 unsigned long bus_flags, camera_flags; 614 unsigned long bus_flags, camera_flags;
615 int ret = test_platform_param(mx3_cam, depth, &bus_flags); 615 int ret = test_platform_param(mx3_cam, depth, &bus_flags);
616 616
617 dev_dbg(&ici->dev, "requested bus width %d bit: %d\n", depth, ret); 617 dev_dbg(ici->dev, "requested bus width %d bit: %d\n", depth, ret);
618 618
619 if (ret < 0) 619 if (ret < 0)
620 return ret; 620 return ret;
@@ -637,7 +637,7 @@ static bool chan_filter(struct dma_chan *chan, void *arg)
637 if (!rq) 637 if (!rq)
638 return false; 638 return false;
639 639
640 pdata = rq->mx3_cam->dev->platform_data; 640 pdata = rq->mx3_cam->soc_host.dev->platform_data;
641 641
642 return rq->id == chan->chan_id && 642 return rq->id == chan->chan_id &&
643 pdata->dma_dev == chan->device->dev; 643 pdata->dma_dev == chan->device->dev;
@@ -697,7 +697,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, int idx,
697 xlate->cam_fmt = icd->formats + idx; 697 xlate->cam_fmt = icd->formats + idx;
698 xlate->buswidth = buswidth; 698 xlate->buswidth = buswidth;
699 xlate++; 699 xlate++;
700 dev_dbg(&ici->dev, "Providing format %s using %s\n", 700 dev_dbg(ici->dev, "Providing format %s using %s\n",
701 mx3_camera_formats[0].name, 701 mx3_camera_formats[0].name,
702 icd->formats[idx].name); 702 icd->formats[idx].name);
703 } 703 }
@@ -709,7 +709,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, int idx,
709 xlate->cam_fmt = icd->formats + idx; 709 xlate->cam_fmt = icd->formats + idx;
710 xlate->buswidth = buswidth; 710 xlate->buswidth = buswidth;
711 xlate++; 711 xlate++;
712 dev_dbg(&ici->dev, "Providing format %s using %s\n", 712 dev_dbg(ici->dev, "Providing format %s using %s\n",
713 mx3_camera_formats[0].name, 713 mx3_camera_formats[0].name,
714 icd->formats[idx].name); 714 icd->formats[idx].name);
715 } 715 }
@@ -722,7 +722,7 @@ passthrough:
722 xlate->cam_fmt = icd->formats + idx; 722 xlate->cam_fmt = icd->formats + idx;
723 xlate->buswidth = buswidth; 723 xlate->buswidth = buswidth;
724 xlate++; 724 xlate++;
725 dev_dbg(&ici->dev, 725 dev_dbg(ici->dev,
726 "Providing format %s in pass-through mode\n", 726 "Providing format %s in pass-through mode\n",
727 icd->formats[idx].name); 727 icd->formats[idx].name);
728 } 728 }
@@ -829,7 +829,7 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd,
829 829
830 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); 830 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
831 if (!xlate) { 831 if (!xlate) {
832 dev_warn(&ici->dev, "Format %x not found\n", pix->pixelformat); 832 dev_warn(ici->dev, "Format %x not found\n", pix->pixelformat);
833 return -EINVAL; 833 return -EINVAL;
834 } 834 }
835 835
@@ -866,7 +866,7 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
866 866
867 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 867 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
868 if (pixfmt && !xlate) { 868 if (pixfmt && !xlate) {
869 dev_warn(&ici->dev, "Format %x not found\n", pixfmt); 869 dev_warn(ici->dev, "Format %x not found\n", pixfmt);
870 return -EINVAL; 870 return -EINVAL;
871 } 871 }
872 872
@@ -933,11 +933,11 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
933 933
934 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 934 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
935 if (!xlate) { 935 if (!xlate) {
936 dev_warn(&ici->dev, "Format %x not found\n", pixfmt); 936 dev_warn(ici->dev, "Format %x not found\n", pixfmt);
937 return -EINVAL; 937 return -EINVAL;
938 } 938 }
939 939
940 dev_dbg(&ici->dev, "requested bus width %d bit: %d\n", 940 dev_dbg(ici->dev, "requested bus width %d bit: %d\n",
941 icd->buswidth, ret); 941 icd->buswidth, ret);
942 942
943 if (ret < 0) 943 if (ret < 0)
@@ -947,7 +947,7 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
947 947
948 common_flags = soc_camera_bus_param_compatible(camera_flags, bus_flags); 948 common_flags = soc_camera_bus_param_compatible(camera_flags, bus_flags);
949 if (!common_flags) { 949 if (!common_flags) {
950 dev_dbg(&ici->dev, "no common flags: camera %lx, host %lx\n", 950 dev_dbg(ici->dev, "no common flags: camera %lx, host %lx\n",
951 camera_flags, bus_flags); 951 camera_flags, bus_flags);
952 return -EINVAL; 952 return -EINVAL;
953 } 953 }
@@ -1054,7 +1054,7 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
1054 1054
1055 csi_reg_write(mx3_cam, sens_conf | dw, CSI_SENS_CONF); 1055 csi_reg_write(mx3_cam, sens_conf | dw, CSI_SENS_CONF);
1056 1056
1057 dev_dbg(&ici->dev, "Set SENS_CONF to %x\n", sens_conf | dw); 1057 dev_dbg(ici->dev, "Set SENS_CONF to %x\n", sens_conf | dw);
1058 1058
1059 return 0; 1059 return 0;
1060} 1060}
@@ -1074,7 +1074,7 @@ static struct soc_camera_host_ops mx3_soc_camera_host_ops = {
1074 .set_bus_param = mx3_camera_set_bus_param, 1074 .set_bus_param = mx3_camera_set_bus_param,
1075}; 1075};
1076 1076
1077static int mx3_camera_probe(struct platform_device *pdev) 1077static int __devinit mx3_camera_probe(struct platform_device *pdev)
1078{ 1078{
1079 struct mx3_camera_dev *mx3_cam; 1079 struct mx3_camera_dev *mx3_cam;
1080 struct resource *res; 1080 struct resource *res;
@@ -1102,8 +1102,6 @@ static int mx3_camera_probe(struct platform_device *pdev)
1102 goto eclkget; 1102 goto eclkget;
1103 } 1103 }
1104 1104
1105 dev_set_drvdata(&pdev->dev, mx3_cam);
1106
1107 mx3_cam->pdata = pdev->dev.platform_data; 1105 mx3_cam->pdata = pdev->dev.platform_data;
1108 mx3_cam->platform_flags = mx3_cam->pdata->flags; 1106 mx3_cam->platform_flags = mx3_cam->pdata->flags;
1109 if (!(mx3_cam->platform_flags & (MX3_CAMERA_DATAWIDTH_4 | 1107 if (!(mx3_cam->platform_flags & (MX3_CAMERA_DATAWIDTH_4 |
@@ -1135,14 +1133,14 @@ static int mx3_camera_probe(struct platform_device *pdev)
1135 } 1133 }
1136 1134
1137 mx3_cam->base = base; 1135 mx3_cam->base = base;
1138 mx3_cam->dev = &pdev->dev;
1139 1136
1140 soc_host = &mx3_cam->soc_host; 1137 soc_host = &mx3_cam->soc_host;
1141 soc_host->drv_name = MX3_CAM_DRV_NAME; 1138 soc_host->drv_name = MX3_CAM_DRV_NAME;
1142 soc_host->ops = &mx3_soc_camera_host_ops; 1139 soc_host->ops = &mx3_soc_camera_host_ops;
1143 soc_host->priv = mx3_cam; 1140 soc_host->priv = mx3_cam;
1144 soc_host->dev.parent = &pdev->dev; 1141 soc_host->dev = &pdev->dev;
1145 soc_host->nr = pdev->id; 1142 soc_host->nr = pdev->id;
1143
1146 err = soc_camera_host_register(soc_host); 1144 err = soc_camera_host_register(soc_host);
1147 if (err) 1145 if (err)
1148 goto ecamhostreg; 1146 goto ecamhostreg;
@@ -1165,11 +1163,13 @@ egetres:
1165 1163
1166static int __devexit mx3_camera_remove(struct platform_device *pdev) 1164static int __devexit mx3_camera_remove(struct platform_device *pdev)
1167{ 1165{
1168 struct mx3_camera_dev *mx3_cam = platform_get_drvdata(pdev); 1166 struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
1167 struct mx3_camera_dev *mx3_cam = container_of(soc_host,
1168 struct mx3_camera_dev, soc_host);
1169 1169
1170 clk_put(mx3_cam->clk); 1170 clk_put(mx3_cam->clk);
1171 1171
1172 soc_camera_host_unregister(&mx3_cam->soc_host); 1172 soc_camera_host_unregister(soc_host);
1173 1173
1174 iounmap(mx3_cam->base); 1174 iounmap(mx3_cam->base);
1175 1175
@@ -1194,11 +1194,11 @@ static struct platform_driver mx3_camera_driver = {
1194 .name = MX3_CAM_DRV_NAME, 1194 .name = MX3_CAM_DRV_NAME,
1195 }, 1195 },
1196 .probe = mx3_camera_probe, 1196 .probe = mx3_camera_probe,
1197 .remove = __exit_p(mx3_camera_remove), 1197 .remove = __devexit_p(mx3_camera_remove),
1198}; 1198};
1199 1199
1200 1200
1201static int __devinit mx3_camera_init(void) 1201static int __init mx3_camera_init(void)
1202{ 1202{
1203 return platform_driver_register(&mx3_camera_driver); 1203 return platform_driver_register(&mx3_camera_driver);
1204} 1204}
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c
index 3be5a71bdac2..35890e8b2431 100644
--- a/drivers/media/video/mxb.c
+++ b/drivers/media/video/mxb.c
@@ -453,7 +453,7 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *vc)
453static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i) 453static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
454{ 454{
455 DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index)); 455 DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index));
456 if (i->index < 0 || i->index >= MXB_INPUTS) 456 if (i->index >= MXB_INPUTS)
457 return -EINVAL; 457 return -EINVAL;
458 memcpy(i, &mxb_inputs[i->index], sizeof(struct v4l2_input)); 458 memcpy(i, &mxb_inputs[i->index], sizeof(struct v4l2_input));
459 return 0; 459 return 0;
@@ -616,7 +616,7 @@ static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
616 struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; 616 struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
617 struct mxb *mxb = (struct mxb *)dev->ext_priv; 617 struct mxb *mxb = (struct mxb *)dev->ext_priv;
618 618
619 if (a->index < 0 || a->index > MXB_INPUTS) { 619 if (a->index > MXB_INPUTS) {
620 DEB_D(("VIDIOC_G_AUDIO %d out of range.\n", a->index)); 620 DEB_D(("VIDIOC_G_AUDIO %d out of range.\n", a->index));
621 return -EINVAL; 621 return -EINVAL;
622 } 622 }
diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c
index 9af5532db142..08cfd3e4ae8a 100644
--- a/drivers/media/video/ov511.c
+++ b/drivers/media/video/ov511.c
@@ -112,6 +112,8 @@ static int framedrop = -1;
112static int fastset; 112static int fastset;
113static int force_palette; 113static int force_palette;
114static int backlight; 114static int backlight;
115/* Bitmask marking allocated devices from 0 to OV511_MAX_UNIT_VIDEO */
116static unsigned long ov511_devused;
115static int unit_video[OV511_MAX_UNIT_VIDEO]; 117static int unit_video[OV511_MAX_UNIT_VIDEO];
116static int remove_zeros; 118static int remove_zeros;
117static int mirror; 119static int mirror;
@@ -5720,7 +5722,7 @@ ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id)
5720 struct usb_device *dev = interface_to_usbdev(intf); 5722 struct usb_device *dev = interface_to_usbdev(intf);
5721 struct usb_interface_descriptor *idesc; 5723 struct usb_interface_descriptor *idesc;
5722 struct usb_ov511 *ov; 5724 struct usb_ov511 *ov;
5723 int i; 5725 int i, rc, nr;
5724 5726
5725 PDEBUG(1, "probing for device..."); 5727 PDEBUG(1, "probing for device...");
5726 5728
@@ -5845,33 +5847,41 @@ ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id)
5845 ov->vdev->parent = &intf->dev; 5847 ov->vdev->parent = &intf->dev;
5846 video_set_drvdata(ov->vdev, ov); 5848 video_set_drvdata(ov->vdev, ov);
5847 5849
5848 for (i = 0; i < OV511_MAX_UNIT_VIDEO; i++) { 5850 mutex_lock(&ov->lock);
5849 /* Minor 0 cannot be specified; assume user wants autodetect */
5850 if (unit_video[i] == 0)
5851 break;
5852 5851
5853 if (video_register_device(ov->vdev, VFL_TYPE_GRABBER, 5852 /* Check to see next free device and mark as used */
5854 unit_video[i]) >= 0) { 5853 nr = find_first_zero_bit(&ov511_devused, OV511_MAX_UNIT_VIDEO);
5855 break; 5854
5856 } 5855 /* Registers device */
5857 } 5856 if (unit_video[nr] != 0)
5857 rc = video_register_device(ov->vdev, VFL_TYPE_GRABBER,
5858 unit_video[nr]);
5859 else
5860 rc = video_register_device(ov->vdev, VFL_TYPE_GRABBER, -1);
5858 5861
5859 /* Use the next available one */ 5862 if (rc < 0) {
5860 if ((ov->vdev->minor == -1) &&
5861 video_register_device(ov->vdev, VFL_TYPE_GRABBER, -1) < 0) {
5862 err("video_register_device failed"); 5863 err("video_register_device failed");
5864 mutex_unlock(&ov->lock);
5863 goto error; 5865 goto error;
5864 } 5866 }
5865 5867
5868 /* Mark device as used */
5869 ov511_devused |= 1 << nr;
5870 ov->nr = nr;
5871
5866 dev_info(&intf->dev, "Device at %s registered to minor %d\n", 5872 dev_info(&intf->dev, "Device at %s registered to minor %d\n",
5867 ov->usb_path, ov->vdev->minor); 5873 ov->usb_path, ov->vdev->minor);
5868 5874
5869 usb_set_intfdata(intf, ov); 5875 usb_set_intfdata(intf, ov);
5870 if (ov_create_sysfs(ov->vdev)) { 5876 if (ov_create_sysfs(ov->vdev)) {
5871 err("ov_create_sysfs failed"); 5877 err("ov_create_sysfs failed");
5878 ov511_devused &= ~(1 << nr);
5879 mutex_unlock(&ov->lock);
5872 goto error; 5880 goto error;
5873 } 5881 }
5874 5882
5883 mutex_lock(&ov->lock);
5884
5875 return 0; 5885 return 0;
5876 5886
5877error: 5887error:
@@ -5906,10 +5916,16 @@ ov51x_disconnect(struct usb_interface *intf)
5906 5916
5907 PDEBUG(3, ""); 5917 PDEBUG(3, "");
5908 5918
5919 mutex_lock(&ov->lock);
5909 usb_set_intfdata (intf, NULL); 5920 usb_set_intfdata (intf, NULL);
5910 5921
5911 if (!ov) 5922 if (!ov) {
5923 mutex_unlock(&ov->lock);
5912 return; 5924 return;
5925 }
5926
5927 /* Free device number */
5928 ov511_devused &= ~(1 << ov->nr);
5913 5929
5914 if (ov->vdev) 5930 if (ov->vdev)
5915 video_unregister_device(ov->vdev); 5931 video_unregister_device(ov->vdev);
@@ -5927,6 +5943,7 @@ ov51x_disconnect(struct usb_interface *intf)
5927 5943
5928 ov->streaming = 0; 5944 ov->streaming = 0;
5929 ov51x_unlink_isoc(ov); 5945 ov51x_unlink_isoc(ov);
5946 mutex_unlock(&ov->lock);
5930 5947
5931 ov->dev = NULL; 5948 ov->dev = NULL;
5932 5949
diff --git a/drivers/media/video/ov511.h b/drivers/media/video/ov511.h
index 70d99e52329d..c450c92468da 100644
--- a/drivers/media/video/ov511.h
+++ b/drivers/media/video/ov511.h
@@ -494,6 +494,9 @@ struct usb_ov511 {
494 int has_decoder; /* Device has a video decoder */ 494 int has_decoder; /* Device has a video decoder */
495 int pal; /* Device is designed for PAL resolution */ 495 int pal; /* Device is designed for PAL resolution */
496 496
497 /* ov511 device number ID */
498 int nr; /* Stores a device number */
499
497 /* I2C interface */ 500 /* I2C interface */
498 struct mutex i2c_lock; /* Protect I2C controller regs */ 501 struct mutex i2c_lock; /* Protect I2C controller regs */
499 unsigned char primary_i2c_slave; /* I2C write id of sensor */ 502 unsigned char primary_i2c_slave; /* I2C write id of sensor */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.c b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
index 1cb6a260e8b0..336a20eded0f 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-devattr.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
@@ -71,6 +71,7 @@ static const struct pvr2_device_desc pvr2_device_29xxx = {
71 .flag_has_svideo = !0, 71 .flag_has_svideo = !0,
72 .signal_routing_scheme = PVR2_ROUTING_SCHEME_HAUPPAUGE, 72 .signal_routing_scheme = PVR2_ROUTING_SCHEME_HAUPPAUGE,
73 .led_scheme = PVR2_LED_SCHEME_HAUPPAUGE, 73 .led_scheme = PVR2_LED_SCHEME_HAUPPAUGE,
74 .ir_scheme = PVR2_IR_SCHEME_29XXX,
74}; 75};
75 76
76 77
@@ -284,6 +285,11 @@ static struct tda10048_config hauppauge_tda10048_config = {
284 .output_mode = TDA10048_PARALLEL_OUTPUT, 285 .output_mode = TDA10048_PARALLEL_OUTPUT,
285 .fwbulkwritelen = TDA10048_BULKWRITE_50, 286 .fwbulkwritelen = TDA10048_BULKWRITE_50,
286 .inversion = TDA10048_INVERSION_ON, 287 .inversion = TDA10048_INVERSION_ON,
288 .dtv6_if_freq_khz = TDA10048_IF_3300,
289 .dtv7_if_freq_khz = TDA10048_IF_3800,
290 .dtv8_if_freq_khz = TDA10048_IF_4300,
291 .clk_freq_khz = TDA10048_CLK_16000,
292 .disable_gate_access = 1,
287}; 293};
288 294
289static struct tda829x_config tda829x_no_probe = { 295static struct tda829x_config tda829x_no_probe = {
diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.h b/drivers/media/video/pvrusb2/pvrusb2-devattr.h
index 3e553389cbc3..ea04ecf8aa39 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-devattr.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.h
@@ -69,6 +69,7 @@ struct pvr2_string_table {
69#define PVR2_ROUTING_SCHEME_HAUPPAUGE 0 69#define PVR2_ROUTING_SCHEME_HAUPPAUGE 0
70#define PVR2_ROUTING_SCHEME_GOTVIEW 1 70#define PVR2_ROUTING_SCHEME_GOTVIEW 1
71#define PVR2_ROUTING_SCHEME_ONAIR 2 71#define PVR2_ROUTING_SCHEME_ONAIR 2
72#define PVR2_ROUTING_SCHEME_AV400 3
72 73
73#define PVR2_DIGITAL_SCHEME_NONE 0 74#define PVR2_DIGITAL_SCHEME_NONE 0
74#define PVR2_DIGITAL_SCHEME_HAUPPAUGE 1 75#define PVR2_DIGITAL_SCHEME_HAUPPAUGE 1
@@ -78,8 +79,10 @@ struct pvr2_string_table {
78#define PVR2_LED_SCHEME_HAUPPAUGE 1 79#define PVR2_LED_SCHEME_HAUPPAUGE 1
79 80
80#define PVR2_IR_SCHEME_NONE 0 81#define PVR2_IR_SCHEME_NONE 0
81#define PVR2_IR_SCHEME_24XXX 1 82#define PVR2_IR_SCHEME_24XXX 1 /* FX2-controlled IR */
82#define PVR2_IR_SCHEME_ZILOG 2 83#define PVR2_IR_SCHEME_ZILOG 2 /* HVR-1950 style (must be taken out of reset) */
84#define PVR2_IR_SCHEME_24XXX_MCE 3 /* 24xxx MCE device */
85#define PVR2_IR_SCHEME_29XXX 4 /* Original 29xxx device */
83 86
84/* This describes a particular hardware type (except for the USB device ID 87/* This describes a particular hardware type (except for the USB device ID
85 which must live in a separate structure due to environmental 88 which must live in a separate structure due to environmental
@@ -162,19 +165,9 @@ struct pvr2_device_desc {
162 ensure that it is found. */ 165 ensure that it is found. */
163 unsigned int flag_has_wm8775:1; 166 unsigned int flag_has_wm8775:1;
164 167
165 /* Indicate any specialized IR scheme that might need to be 168 /* Indicate IR scheme of hardware. If not set, then it is assumed
166 supported by this driver. If not set, then it is assumed that 169 that IR can work without any help from the driver. */
167 IR can work without help from the driver (which is frequently 170 unsigned int ir_scheme:3;
168 the case). This is otherwise set to one of
169 PVR2_IR_SCHEME_xxxx. For "xxxx", the value "24XXX" indicates a
170 Hauppauge 24xxx class device which has an FPGA-hosted IR
171 receiver that can only be reached via FX2 command codes. In
172 that case the pvrusb2 driver will emulate the behavior of the
173 older 29xxx device's IR receiver (a "virtual" I2C chip) in terms
174 of those command codes. For the value "ZILOG", we're dealing
175 with an IR chip that must be taken out of reset via another FX2
176 command code (which is the case for HVR-1950 devices). */
177 unsigned int ir_scheme:2;
178 171
179 /* These bits define which kinds of sources the device can handle. 172 /* These bits define which kinds of sources the device can handle.
180 Note: Digital tuner presence is inferred by the 173 Note: Digital tuner presence is inferred by the
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
index 5d75eb5211b1..5b152ff20bd0 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
@@ -200,6 +200,9 @@ struct pvr2_hdw {
200 int i2c_cx25840_hack_state; 200 int i2c_cx25840_hack_state;
201 int i2c_linked; 201 int i2c_linked;
202 202
203 /* IR related */
204 unsigned int ir_scheme_active; /* IR scheme as seen from the outside */
205
203 /* Frequency table */ 206 /* Frequency table */
204 unsigned int freqTable[FREQTABLE_SIZE]; 207 unsigned int freqTable[FREQTABLE_SIZE];
205 unsigned int freqProgSlot; 208 unsigned int freqProgSlot;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index add3395d3248..0c745b142fb7 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -142,6 +142,15 @@ static const unsigned char *module_i2c_addresses[] = {
142}; 142};
143 143
144 144
145static const char *ir_scheme_names[] = {
146 [PVR2_IR_SCHEME_NONE] = "none",
147 [PVR2_IR_SCHEME_29XXX] = "29xxx",
148 [PVR2_IR_SCHEME_24XXX] = "24xxx (29xxx emulation)",
149 [PVR2_IR_SCHEME_24XXX_MCE] = "24xxx (MCE device)",
150 [PVR2_IR_SCHEME_ZILOG] = "Zilog",
151};
152
153
145/* Define the list of additional controls we'll dynamically construct based 154/* Define the list of additional controls we'll dynamically construct based
146 on query of the cx2341x module. */ 155 on query of the cx2341x module. */
147struct pvr2_mpeg_ids { 156struct pvr2_mpeg_ids {
@@ -2170,7 +2179,7 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
2170 } 2179 }
2171 2180
2172 /* Take the IR chip out of reset, if appropriate */ 2181 /* Take the IR chip out of reset, if appropriate */
2173 if (hdw->hdw_desc->ir_scheme == PVR2_IR_SCHEME_ZILOG) { 2182 if (hdw->ir_scheme_active == PVR2_IR_SCHEME_ZILOG) {
2174 pvr2_issue_simple_cmd(hdw, 2183 pvr2_issue_simple_cmd(hdw,
2175 FX2CMD_HCW_ZILOG_RESET | 2184 FX2CMD_HCW_ZILOG_RESET |
2176 (1 << 8) | 2185 (1 << 8) |
@@ -2451,6 +2460,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
2451 GFP_KERNEL); 2460 GFP_KERNEL);
2452 if (!hdw->controls) goto fail; 2461 if (!hdw->controls) goto fail;
2453 hdw->hdw_desc = hdw_desc; 2462 hdw->hdw_desc = hdw_desc;
2463 hdw->ir_scheme_active = hdw->hdw_desc->ir_scheme;
2454 for (idx = 0; idx < hdw->control_cnt; idx++) { 2464 for (idx = 0; idx < hdw->control_cnt; idx++) {
2455 cptr = hdw->controls + idx; 2465 cptr = hdw->controls + idx;
2456 cptr->hdw = hdw; 2466 cptr->hdw = hdw;
@@ -4809,6 +4819,12 @@ static unsigned int pvr2_hdw_report_unlocked(struct pvr2_hdw *hdw,int which,
4809 stats.buffers_processed, 4819 stats.buffers_processed,
4810 stats.buffers_failed); 4820 stats.buffers_failed);
4811 } 4821 }
4822 case 6: {
4823 unsigned int id = hdw->ir_scheme_active;
4824 return scnprintf(buf, acnt, "ir scheme: id=%d %s", id,
4825 (id >= ARRAY_SIZE(ir_scheme_names) ?
4826 "?" : ir_scheme_names[id]));
4827 }
4812 default: break; 4828 default: break;
4813 } 4829 }
4814 return 0; 4830 return 0;
@@ -4825,65 +4841,35 @@ static unsigned int pvr2_hdw_report_clients(struct pvr2_hdw *hdw,
4825 unsigned int tcnt = 0; 4841 unsigned int tcnt = 0;
4826 unsigned int ccnt; 4842 unsigned int ccnt;
4827 struct i2c_client *client; 4843 struct i2c_client *client;
4828 struct list_head *item;
4829 void *cd;
4830 const char *p; 4844 const char *p;
4831 unsigned int id; 4845 unsigned int id;
4832 4846
4833 ccnt = scnprintf(buf, acnt, "Associated v4l2-subdev drivers:"); 4847 ccnt = scnprintf(buf, acnt, "Associated v4l2-subdev drivers and I2C clients:\n");
4834 tcnt += ccnt; 4848 tcnt += ccnt;
4835 v4l2_device_for_each_subdev(sd, &hdw->v4l2_dev) { 4849 v4l2_device_for_each_subdev(sd, &hdw->v4l2_dev) {
4836 id = sd->grp_id; 4850 id = sd->grp_id;
4837 p = NULL; 4851 p = NULL;
4838 if (id < ARRAY_SIZE(module_names)) p = module_names[id]; 4852 if (id < ARRAY_SIZE(module_names)) p = module_names[id];
4839 if (p) { 4853 if (p) {
4840 ccnt = scnprintf(buf + tcnt, acnt - tcnt, " %s", p); 4854 ccnt = scnprintf(buf + tcnt, acnt - tcnt, " %s:", p);
4841 tcnt += ccnt; 4855 tcnt += ccnt;
4842 } else { 4856 } else {
4843 ccnt = scnprintf(buf + tcnt, acnt - tcnt, 4857 ccnt = scnprintf(buf + tcnt, acnt - tcnt,
4844 " (unknown id=%u)", id); 4858 " (unknown id=%u):", id);
4845 tcnt += ccnt; 4859 tcnt += ccnt;
4846 } 4860 }
4847 } 4861 client = v4l2_get_subdevdata(sd);
4848 ccnt = scnprintf(buf + tcnt, acnt - tcnt, "\n"); 4862 if (client) {
4849 tcnt += ccnt; 4863 ccnt = scnprintf(buf + tcnt, acnt - tcnt,
4850 4864 " %s @ %02x\n", client->name,
4851 ccnt = scnprintf(buf + tcnt, acnt - tcnt, "I2C clients:\n"); 4865 client->addr);
4852 tcnt += ccnt; 4866 tcnt += ccnt;
4853 4867 } else {
4854 mutex_lock(&hdw->i2c_adap.clist_lock); 4868 ccnt = scnprintf(buf + tcnt, acnt - tcnt,
4855 list_for_each(item, &hdw->i2c_adap.clients) { 4869 " no i2c client\n");
4856 client = list_entry(item, struct i2c_client, list); 4870 tcnt += ccnt;
4857 ccnt = scnprintf(buf + tcnt, acnt - tcnt,
4858 " %s: i2c=%02x", client->name, client->addr);
4859 tcnt += ccnt;
4860 cd = i2c_get_clientdata(client);
4861 v4l2_device_for_each_subdev(sd, &hdw->v4l2_dev) {
4862 if (cd == sd) {
4863 id = sd->grp_id;
4864 p = NULL;
4865 if (id < ARRAY_SIZE(module_names)) {
4866 p = module_names[id];
4867 }
4868 if (p) {
4869 ccnt = scnprintf(buf + tcnt,
4870 acnt - tcnt,
4871 " subdev=%s", p);
4872 tcnt += ccnt;
4873 } else {
4874 ccnt = scnprintf(buf + tcnt,
4875 acnt - tcnt,
4876 " subdev= id %u)",
4877 id);
4878 tcnt += ccnt;
4879 }
4880 break;
4881 }
4882 } 4871 }
4883 ccnt = scnprintf(buf + tcnt, acnt - tcnt, "\n");
4884 tcnt += ccnt;
4885 } 4872 }
4886 mutex_unlock(&hdw->i2c_adap.clist_lock);
4887 return tcnt; 4873 return tcnt;
4888} 4874}
4889 4875
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
index 9af282f9e765..610bd848df24 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -42,6 +42,18 @@ static int ir_mode[PVR_NUM] = { [0 ... PVR_NUM-1] = 1 };
42module_param_array(ir_mode, int, NULL, 0444); 42module_param_array(ir_mode, int, NULL, 0444);
43MODULE_PARM_DESC(ir_mode,"specify: 0=disable IR reception, 1=normal IR"); 43MODULE_PARM_DESC(ir_mode,"specify: 0=disable IR reception, 1=normal IR");
44 44
45static int pvr2_disable_ir_video;
46module_param_named(disable_autoload_ir_video, pvr2_disable_ir_video,
47 int, S_IRUGO|S_IWUSR);
48MODULE_PARM_DESC(disable_autoload_ir_video,
49 "1=do not try to autoload ir_video IR receiver");
50
51/* Mapping of IR schemes to known I2C addresses - if any */
52static const unsigned char ir_video_addresses[] = {
53 [PVR2_IR_SCHEME_29XXX] = 0x18,
54 [PVR2_IR_SCHEME_24XXX] = 0x18,
55};
56
45static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */ 57static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
46 u8 i2c_addr, /* I2C address we're talking to */ 58 u8 i2c_addr, /* I2C address we're talking to */
47 u8 *data, /* Data to write */ 59 u8 *data, /* Data to write */
@@ -559,6 +571,31 @@ static void do_i2c_scan(struct pvr2_hdw *hdw)
559 printk(KERN_INFO "%s: i2c scan done.\n", hdw->name); 571 printk(KERN_INFO "%s: i2c scan done.\n", hdw->name);
560} 572}
561 573
574static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
575{
576 struct i2c_board_info info;
577 unsigned char addr = 0;
578 if (pvr2_disable_ir_video) {
579 pvr2_trace(PVR2_TRACE_INFO,
580 "Automatic binding of ir_video has been disabled.");
581 return;
582 }
583 if (hdw->ir_scheme_active < ARRAY_SIZE(ir_video_addresses)) {
584 addr = ir_video_addresses[hdw->ir_scheme_active];
585 }
586 if (!addr) {
587 /* The device either doesn't support I2C-based IR or we
588 don't know (yet) how to operate IR on the device. */
589 return;
590 }
591 pvr2_trace(PVR2_TRACE_INFO,
592 "Binding ir_video to i2c address 0x%02x.", addr);
593 memset(&info, 0, sizeof(struct i2c_board_info));
594 strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
595 info.addr = addr;
596 i2c_new_device(&hdw->i2c_adap, &info);
597}
598
562void pvr2_i2c_core_init(struct pvr2_hdw *hdw) 599void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
563{ 600{
564 unsigned int idx; 601 unsigned int idx;
@@ -574,7 +611,9 @@ void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
574 printk(KERN_INFO "%s: IR disabled\n",hdw->name); 611 printk(KERN_INFO "%s: IR disabled\n",hdw->name);
575 hdw->i2c_func[0x18] = i2c_black_hole; 612 hdw->i2c_func[0x18] = i2c_black_hole;
576 } else if (ir_mode[hdw->unit_number] == 1) { 613 } else if (ir_mode[hdw->unit_number] == 1) {
577 if (hdw->hdw_desc->ir_scheme == PVR2_IR_SCHEME_24XXX) { 614 if (hdw->ir_scheme_active == PVR2_IR_SCHEME_24XXX) {
615 /* Set up translation so that our IR looks like a
616 29xxx device */
578 hdw->i2c_func[0x18] = i2c_24xxx_ir; 617 hdw->i2c_func[0x18] = i2c_24xxx_ir;
579 } 618 }
580 } 619 }
@@ -597,15 +636,23 @@ void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
597 i2c_add_adapter(&hdw->i2c_adap); 636 i2c_add_adapter(&hdw->i2c_adap);
598 if (hdw->i2c_func[0x18] == i2c_24xxx_ir) { 637 if (hdw->i2c_func[0x18] == i2c_24xxx_ir) {
599 /* Probe for a different type of IR receiver on this 638 /* Probe for a different type of IR receiver on this
600 device. If present, disable the emulated IR receiver. */ 639 device. This is really the only way to differentiate
640 older 24xxx devices from 24xxx variants that include an
641 IR blaster. If the IR blaster is present, the IR
642 receiver is part of that chip and thus we must disable
643 the emulated IR receiver. */
601 if (do_i2c_probe(hdw, 0x71)) { 644 if (do_i2c_probe(hdw, 0x71)) {
602 pvr2_trace(PVR2_TRACE_INFO, 645 pvr2_trace(PVR2_TRACE_INFO,
603 "Device has newer IR hardware;" 646 "Device has newer IR hardware;"
604 " disabling unneeded virtual IR device"); 647 " disabling unneeded virtual IR device");
605 hdw->i2c_func[0x18] = NULL; 648 hdw->i2c_func[0x18] = NULL;
649 /* Remember that this is a different device... */
650 hdw->ir_scheme_active = PVR2_IR_SCHEME_24XXX_MCE;
606 } 651 }
607 } 652 }
608 if (i2c_scan) do_i2c_scan(hdw); 653 if (i2c_scan) do_i2c_scan(hdw);
654
655 pvr2_i2c_register_ir(hdw);
609} 656}
610 657
611void pvr2_i2c_core_done(struct pvr2_hdw *hdw) 658void pvr2_i2c_core_done(struct pvr2_hdw *hdw)
diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
index 299c1cbc3832..6c23456e0bda 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
@@ -539,7 +539,7 @@ static void class_dev_destroy(struct pvr2_sysfs *sfp)
539 &sfp->attr_unit_number); 539 &sfp->attr_unit_number);
540 } 540 }
541 pvr2_sysfs_trace("Destroying class_dev id=%p",sfp->class_dev); 541 pvr2_sysfs_trace("Destroying class_dev id=%p",sfp->class_dev);
542 sfp->class_dev->driver_data = NULL; 542 dev_set_drvdata(sfp->class_dev, NULL);
543 device_unregister(sfp->class_dev); 543 device_unregister(sfp->class_dev);
544 sfp->class_dev = NULL; 544 sfp->class_dev = NULL;
545} 545}
@@ -549,7 +549,7 @@ static ssize_t v4l_minor_number_show(struct device *class_dev,
549 struct device_attribute *attr, char *buf) 549 struct device_attribute *attr, char *buf)
550{ 550{
551 struct pvr2_sysfs *sfp; 551 struct pvr2_sysfs *sfp;
552 sfp = (struct pvr2_sysfs *)class_dev->driver_data; 552 sfp = dev_get_drvdata(class_dev);
553 if (!sfp) return -EINVAL; 553 if (!sfp) return -EINVAL;
554 return scnprintf(buf,PAGE_SIZE,"%d\n", 554 return scnprintf(buf,PAGE_SIZE,"%d\n",
555 pvr2_hdw_v4l_get_minor_number(sfp->channel.hdw, 555 pvr2_hdw_v4l_get_minor_number(sfp->channel.hdw,
@@ -561,7 +561,7 @@ static ssize_t bus_info_show(struct device *class_dev,
561 struct device_attribute *attr, char *buf) 561 struct device_attribute *attr, char *buf)
562{ 562{
563 struct pvr2_sysfs *sfp; 563 struct pvr2_sysfs *sfp;
564 sfp = (struct pvr2_sysfs *)class_dev->driver_data; 564 sfp = dev_get_drvdata(class_dev);
565 if (!sfp) return -EINVAL; 565 if (!sfp) return -EINVAL;
566 return scnprintf(buf,PAGE_SIZE,"%s\n", 566 return scnprintf(buf,PAGE_SIZE,"%s\n",
567 pvr2_hdw_get_bus_info(sfp->channel.hdw)); 567 pvr2_hdw_get_bus_info(sfp->channel.hdw));
@@ -572,7 +572,7 @@ static ssize_t hdw_name_show(struct device *class_dev,
572 struct device_attribute *attr, char *buf) 572 struct device_attribute *attr, char *buf)
573{ 573{
574 struct pvr2_sysfs *sfp; 574 struct pvr2_sysfs *sfp;
575 sfp = (struct pvr2_sysfs *)class_dev->driver_data; 575 sfp = dev_get_drvdata(class_dev);
576 if (!sfp) return -EINVAL; 576 if (!sfp) return -EINVAL;
577 return scnprintf(buf,PAGE_SIZE,"%s\n", 577 return scnprintf(buf,PAGE_SIZE,"%s\n",
578 pvr2_hdw_get_type(sfp->channel.hdw)); 578 pvr2_hdw_get_type(sfp->channel.hdw));
@@ -583,7 +583,7 @@ static ssize_t hdw_desc_show(struct device *class_dev,
583 struct device_attribute *attr, char *buf) 583 struct device_attribute *attr, char *buf)
584{ 584{
585 struct pvr2_sysfs *sfp; 585 struct pvr2_sysfs *sfp;
586 sfp = (struct pvr2_sysfs *)class_dev->driver_data; 586 sfp = dev_get_drvdata(class_dev);
587 if (!sfp) return -EINVAL; 587 if (!sfp) return -EINVAL;
588 return scnprintf(buf,PAGE_SIZE,"%s\n", 588 return scnprintf(buf,PAGE_SIZE,"%s\n",
589 pvr2_hdw_get_desc(sfp->channel.hdw)); 589 pvr2_hdw_get_desc(sfp->channel.hdw));
@@ -595,7 +595,7 @@ static ssize_t v4l_radio_minor_number_show(struct device *class_dev,
595 char *buf) 595 char *buf)
596{ 596{
597 struct pvr2_sysfs *sfp; 597 struct pvr2_sysfs *sfp;
598 sfp = (struct pvr2_sysfs *)class_dev->driver_data; 598 sfp = dev_get_drvdata(class_dev);
599 if (!sfp) return -EINVAL; 599 if (!sfp) return -EINVAL;
600 return scnprintf(buf,PAGE_SIZE,"%d\n", 600 return scnprintf(buf,PAGE_SIZE,"%d\n",
601 pvr2_hdw_v4l_get_minor_number(sfp->channel.hdw, 601 pvr2_hdw_v4l_get_minor_number(sfp->channel.hdw,
@@ -607,7 +607,7 @@ static ssize_t unit_number_show(struct device *class_dev,
607 struct device_attribute *attr, char *buf) 607 struct device_attribute *attr, char *buf)
608{ 608{
609 struct pvr2_sysfs *sfp; 609 struct pvr2_sysfs *sfp;
610 sfp = (struct pvr2_sysfs *)class_dev->driver_data; 610 sfp = dev_get_drvdata(class_dev);
611 if (!sfp) return -EINVAL; 611 if (!sfp) return -EINVAL;
612 return scnprintf(buf,PAGE_SIZE,"%d\n", 612 return scnprintf(buf,PAGE_SIZE,"%d\n",
613 pvr2_hdw_get_unit_number(sfp->channel.hdw)); 613 pvr2_hdw_get_unit_number(sfp->channel.hdw));
@@ -635,7 +635,7 @@ static void class_dev_create(struct pvr2_sysfs *sfp,
635 class_dev->parent = &usb_dev->dev; 635 class_dev->parent = &usb_dev->dev;
636 636
637 sfp->class_dev = class_dev; 637 sfp->class_dev = class_dev;
638 class_dev->driver_data = sfp; 638 dev_set_drvdata(class_dev, sfp);
639 ret = device_register(class_dev); 639 ret = device_register(class_dev);
640 if (ret) { 640 if (ret) {
641 pvr2_trace(PVR2_TRACE_ERROR_LEGS, 641 pvr2_trace(PVR2_TRACE_ERROR_LEGS,
@@ -792,7 +792,7 @@ static ssize_t debuginfo_show(struct device *class_dev,
792 struct device_attribute *attr, char *buf) 792 struct device_attribute *attr, char *buf)
793{ 793{
794 struct pvr2_sysfs *sfp; 794 struct pvr2_sysfs *sfp;
795 sfp = (struct pvr2_sysfs *)class_dev->driver_data; 795 sfp = dev_get_drvdata(class_dev);
796 if (!sfp) return -EINVAL; 796 if (!sfp) return -EINVAL;
797 pvr2_hdw_trigger_module_log(sfp->channel.hdw); 797 pvr2_hdw_trigger_module_log(sfp->channel.hdw);
798 return pvr2_debugifc_print_info(sfp->channel.hdw,buf,PAGE_SIZE); 798 return pvr2_debugifc_print_info(sfp->channel.hdw,buf,PAGE_SIZE);
@@ -803,7 +803,7 @@ static ssize_t debugcmd_show(struct device *class_dev,
803 struct device_attribute *attr, char *buf) 803 struct device_attribute *attr, char *buf)
804{ 804{
805 struct pvr2_sysfs *sfp; 805 struct pvr2_sysfs *sfp;
806 sfp = (struct pvr2_sysfs *)class_dev->driver_data; 806 sfp = dev_get_drvdata(class_dev);
807 if (!sfp) return -EINVAL; 807 if (!sfp) return -EINVAL;
808 return pvr2_debugifc_print_status(sfp->channel.hdw,buf,PAGE_SIZE); 808 return pvr2_debugifc_print_status(sfp->channel.hdw,buf,PAGE_SIZE);
809} 809}
@@ -816,7 +816,7 @@ static ssize_t debugcmd_store(struct device *class_dev,
816 struct pvr2_sysfs *sfp; 816 struct pvr2_sysfs *sfp;
817 int ret; 817 int ret;
818 818
819 sfp = (struct pvr2_sysfs *)class_dev->driver_data; 819 sfp = dev_get_drvdata(class_dev);
820 if (!sfp) return -EINVAL; 820 if (!sfp) return -EINVAL;
821 821
822 ret = pvr2_debugifc_docmd(sfp->channel.hdw,buf,count); 822 ret = pvr2_debugifc_docmd(sfp->channel.hdw,buf,count);
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index 9e0f2b07b93b..2d8825e5b1be 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -90,7 +90,7 @@ static struct v4l2_capability pvr_capability ={
90 .driver = "pvrusb2", 90 .driver = "pvrusb2",
91 .card = "Hauppauge WinTV pvr-usb2", 91 .card = "Hauppauge WinTV pvr-usb2",
92 .bus_info = "usb", 92 .bus_info = "usb",
93 .version = KERNEL_VERSION(0,8,0), 93 .version = KERNEL_VERSION(0, 9, 0),
94 .capabilities = (V4L2_CAP_VIDEO_CAPTURE | 94 .capabilities = (V4L2_CAP_VIDEO_CAPTURE |
95 V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO | 95 V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
96 V4L2_CAP_READWRITE), 96 V4L2_CAP_READWRITE),
@@ -267,7 +267,7 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
267 memset(&tmp,0,sizeof(tmp)); 267 memset(&tmp,0,sizeof(tmp));
268 tmp.index = vi->index; 268 tmp.index = vi->index;
269 ret = 0; 269 ret = 0;
270 if ((vi->index < 0) || (vi->index >= fh->input_cnt)) { 270 if (vi->index >= fh->input_cnt) {
271 ret = -EINVAL; 271 ret = -EINVAL;
272 break; 272 break;
273 } 273 }
@@ -331,7 +331,7 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
331 case VIDIOC_S_INPUT: 331 case VIDIOC_S_INPUT:
332 { 332 {
333 struct v4l2_input *vi = (struct v4l2_input *)arg; 333 struct v4l2_input *vi = (struct v4l2_input *)arg;
334 if ((vi->index < 0) || (vi->index >= fh->input_cnt)) { 334 if (vi->index >= fh->input_cnt) {
335 ret = -ERANGE; 335 ret = -ERANGE;
336 break; 336 break;
337 } 337 }
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 7c542caf248e..db25c3034c11 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -601,7 +601,7 @@ static void pwc_snapshot_button(struct pwc_device *pdev, int down)
601 601
602#ifdef CONFIG_USB_PWC_INPUT_EVDEV 602#ifdef CONFIG_USB_PWC_INPUT_EVDEV
603 if (pdev->button_dev) { 603 if (pdev->button_dev) {
604 input_report_key(pdev->button_dev, BTN_0, down); 604 input_report_key(pdev->button_dev, KEY_CAMERA, down);
605 input_sync(pdev->button_dev); 605 input_sync(pdev->button_dev);
606 } 606 }
607#endif 607#endif
@@ -1783,7 +1783,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1783 return -ENOMEM; 1783 return -ENOMEM;
1784 } 1784 }
1785 memcpy(pdev->vdev, &pwc_template, sizeof(pwc_template)); 1785 memcpy(pdev->vdev, &pwc_template, sizeof(pwc_template));
1786 pdev->vdev->parent = &(udev->dev); 1786 pdev->vdev->parent = &intf->dev;
1787 strcpy(pdev->vdev->name, name); 1787 strcpy(pdev->vdev->name, name);
1788 video_set_drvdata(pdev->vdev, pdev); 1788 video_set_drvdata(pdev->vdev, pdev);
1789 1789
@@ -1847,7 +1847,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1847 usb_to_input_id(pdev->udev, &pdev->button_dev->id); 1847 usb_to_input_id(pdev->udev, &pdev->button_dev->id);
1848 pdev->button_dev->dev.parent = &pdev->udev->dev; 1848 pdev->button_dev->dev.parent = &pdev->udev->dev;
1849 pdev->button_dev->evbit[0] = BIT_MASK(EV_KEY); 1849 pdev->button_dev->evbit[0] = BIT_MASK(EV_KEY);
1850 pdev->button_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0); 1850 pdev->button_dev->keybit[BIT_WORD(KEY_CAMERA)] = BIT_MASK(KEY_CAMERA);
1851 1851
1852 rc = input_register_device(pdev->button_dev); 1852 rc = input_register_device(pdev->button_dev);
1853 if (rc) { 1853 if (rc) {
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index bc0a464295c5..2876ce084510 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -1107,7 +1107,7 @@ long pwc_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
1107 return -EINVAL; 1107 return -EINVAL;
1108 if (buf->memory != V4L2_MEMORY_MMAP) 1108 if (buf->memory != V4L2_MEMORY_MMAP)
1109 return -EINVAL; 1109 return -EINVAL;
1110 if (buf->index < 0 || buf->index >= pwc_mbufs) 1110 if (buf->index >= pwc_mbufs)
1111 return -EINVAL; 1111 return -EINVAL;
1112 1112
1113 buf->flags |= V4L2_BUF_FLAG_QUEUED; 1113 buf->flags |= V4L2_BUF_FLAG_QUEUED;
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index c639845460ff..f60de40fd21f 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -202,7 +202,7 @@ struct pxa_buffer {
202}; 202};
203 203
204struct pxa_camera_dev { 204struct pxa_camera_dev {
205 struct device *dev; 205 struct soc_camera_host soc_host;
206 /* PXA27x is only supposed to handle one camera on its Quick Capture 206 /* PXA27x is only supposed to handle one camera on its Quick Capture
207 * interface. If anyone ever builds hardware to enable more than 207 * interface. If anyone ever builds hardware to enable more than
208 * one camera, they will have to modify this driver too */ 208 * one camera, they will have to modify this driver too */
@@ -261,7 +261,6 @@ static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
261{ 261{
262 struct soc_camera_device *icd = vq->priv_data; 262 struct soc_camera_device *icd = vq->priv_data;
263 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); 263 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
264 struct pxa_camera_dev *pcdev = ici->priv;
265 struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb); 264 struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
266 int i; 265 int i;
267 266
@@ -278,7 +277,7 @@ static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
278 277
279 for (i = 0; i < ARRAY_SIZE(buf->dmas); i++) { 278 for (i = 0; i < ARRAY_SIZE(buf->dmas); i++) {
280 if (buf->dmas[i].sg_cpu) 279 if (buf->dmas[i].sg_cpu)
281 dma_free_coherent(pcdev->dev, buf->dmas[i].sg_size, 280 dma_free_coherent(ici->dev, buf->dmas[i].sg_size,
282 buf->dmas[i].sg_cpu, 281 buf->dmas[i].sg_cpu,
283 buf->dmas[i].sg_dma); 282 buf->dmas[i].sg_dma);
284 buf->dmas[i].sg_cpu = NULL; 283 buf->dmas[i].sg_cpu = NULL;
@@ -338,14 +337,14 @@ static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev,
338 int dma_len = 0, xfer_len = 0; 337 int dma_len = 0, xfer_len = 0;
339 338
340 if (pxa_dma->sg_cpu) 339 if (pxa_dma->sg_cpu)
341 dma_free_coherent(pcdev->dev, pxa_dma->sg_size, 340 dma_free_coherent(pcdev->soc_host.dev, pxa_dma->sg_size,
342 pxa_dma->sg_cpu, pxa_dma->sg_dma); 341 pxa_dma->sg_cpu, pxa_dma->sg_dma);
343 342
344 sglen = calculate_dma_sglen(*sg_first, dma->sglen, 343 sglen = calculate_dma_sglen(*sg_first, dma->sglen,
345 *sg_first_ofs, size); 344 *sg_first_ofs, size);
346 345
347 pxa_dma->sg_size = (sglen + 1) * sizeof(struct pxa_dma_desc); 346 pxa_dma->sg_size = (sglen + 1) * sizeof(struct pxa_dma_desc);
348 pxa_dma->sg_cpu = dma_alloc_coherent(pcdev->dev, pxa_dma->sg_size, 347 pxa_dma->sg_cpu = dma_alloc_coherent(pcdev->soc_host.dev, pxa_dma->sg_size,
349 &pxa_dma->sg_dma, GFP_KERNEL); 348 &pxa_dma->sg_dma, GFP_KERNEL);
350 if (!pxa_dma->sg_cpu) 349 if (!pxa_dma->sg_cpu)
351 return -ENOMEM; 350 return -ENOMEM;
@@ -353,7 +352,7 @@ static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev,
353 pxa_dma->sglen = sglen; 352 pxa_dma->sglen = sglen;
354 offset = *sg_first_ofs; 353 offset = *sg_first_ofs;
355 354
356 dev_dbg(pcdev->dev, "DMA: sg_first=%p, sglen=%d, ofs=%d, dma.desc=%x\n", 355 dev_dbg(pcdev->soc_host.dev, "DMA: sg_first=%p, sglen=%d, ofs=%d, dma.desc=%x\n",
357 *sg_first, sglen, *sg_first_ofs, pxa_dma->sg_dma); 356 *sg_first, sglen, *sg_first_ofs, pxa_dma->sg_dma);
358 357
359 358
@@ -376,7 +375,7 @@ static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev,
376 pxa_dma->sg_cpu[i].ddadr = 375 pxa_dma->sg_cpu[i].ddadr =
377 pxa_dma->sg_dma + (i + 1) * sizeof(struct pxa_dma_desc); 376 pxa_dma->sg_dma + (i + 1) * sizeof(struct pxa_dma_desc);
378 377
379 dev_vdbg(pcdev->dev, "DMA: desc.%08x->@phys=0x%08x, len=%d\n", 378 dev_vdbg(pcdev->soc_host.dev, "DMA: desc.%08x->@phys=0x%08x, len=%d\n",
380 pxa_dma->sg_dma + i * sizeof(struct pxa_dma_desc), 379 pxa_dma->sg_dma + i * sizeof(struct pxa_dma_desc),
381 sg_dma_address(sg) + offset, xfer_len); 380 sg_dma_address(sg) + offset, xfer_len);
382 offset = 0; 381 offset = 0;
@@ -488,7 +487,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
488 ret = pxa_init_dma_channel(pcdev, buf, dma, 0, CIBR0, size_y, 487 ret = pxa_init_dma_channel(pcdev, buf, dma, 0, CIBR0, size_y,
489 &sg, &next_ofs); 488 &sg, &next_ofs);
490 if (ret) { 489 if (ret) {
491 dev_err(pcdev->dev, 490 dev_err(pcdev->soc_host.dev,
492 "DMA initialization for Y/RGB failed\n"); 491 "DMA initialization for Y/RGB failed\n");
493 goto fail; 492 goto fail;
494 } 493 }
@@ -498,7 +497,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
498 ret = pxa_init_dma_channel(pcdev, buf, dma, 1, CIBR1, 497 ret = pxa_init_dma_channel(pcdev, buf, dma, 1, CIBR1,
499 size_u, &sg, &next_ofs); 498 size_u, &sg, &next_ofs);
500 if (ret) { 499 if (ret) {
501 dev_err(pcdev->dev, 500 dev_err(pcdev->soc_host.dev,
502 "DMA initialization for U failed\n"); 501 "DMA initialization for U failed\n");
503 goto fail_u; 502 goto fail_u;
504 } 503 }
@@ -508,7 +507,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
508 ret = pxa_init_dma_channel(pcdev, buf, dma, 2, CIBR2, 507 ret = pxa_init_dma_channel(pcdev, buf, dma, 2, CIBR2,
509 size_v, &sg, &next_ofs); 508 size_v, &sg, &next_ofs);
510 if (ret) { 509 if (ret) {
511 dev_err(pcdev->dev, 510 dev_err(pcdev->soc_host.dev,
512 "DMA initialization for V failed\n"); 511 "DMA initialization for V failed\n");
513 goto fail_v; 512 goto fail_v;
514 } 513 }
@@ -522,10 +521,10 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
522 return 0; 521 return 0;
523 522
524fail_v: 523fail_v:
525 dma_free_coherent(pcdev->dev, buf->dmas[1].sg_size, 524 dma_free_coherent(pcdev->soc_host.dev, buf->dmas[1].sg_size,
526 buf->dmas[1].sg_cpu, buf->dmas[1].sg_dma); 525 buf->dmas[1].sg_cpu, buf->dmas[1].sg_dma);
527fail_u: 526fail_u:
528 dma_free_coherent(pcdev->dev, buf->dmas[0].sg_size, 527 dma_free_coherent(pcdev->soc_host.dev, buf->dmas[0].sg_size,
529 buf->dmas[0].sg_cpu, buf->dmas[0].sg_dma); 528 buf->dmas[0].sg_cpu, buf->dmas[0].sg_dma);
530fail: 529fail:
531 free_buffer(vq, buf); 530 free_buffer(vq, buf);
@@ -549,7 +548,7 @@ static void pxa_dma_start_channels(struct pxa_camera_dev *pcdev)
549 active = pcdev->active; 548 active = pcdev->active;
550 549
551 for (i = 0; i < pcdev->channels; i++) { 550 for (i = 0; i < pcdev->channels; i++) {
552 dev_dbg(pcdev->dev, "%s (channel=%d) ddadr=%08x\n", __func__, 551 dev_dbg(pcdev->soc_host.dev, "%s (channel=%d) ddadr=%08x\n", __func__,
553 i, active->dmas[i].sg_dma); 552 i, active->dmas[i].sg_dma);
554 DDADR(pcdev->dma_chans[i]) = active->dmas[i].sg_dma; 553 DDADR(pcdev->dma_chans[i]) = active->dmas[i].sg_dma;
555 DCSR(pcdev->dma_chans[i]) = DCSR_RUN; 554 DCSR(pcdev->dma_chans[i]) = DCSR_RUN;
@@ -561,7 +560,7 @@ static void pxa_dma_stop_channels(struct pxa_camera_dev *pcdev)
561 int i; 560 int i;
562 561
563 for (i = 0; i < pcdev->channels; i++) { 562 for (i = 0; i < pcdev->channels; i++) {
564 dev_dbg(pcdev->dev, "%s (channel=%d)\n", __func__, i); 563 dev_dbg(pcdev->soc_host.dev, "%s (channel=%d)\n", __func__, i);
565 DCSR(pcdev->dma_chans[i]) = 0; 564 DCSR(pcdev->dma_chans[i]) = 0;
566 } 565 }
567} 566}
@@ -597,7 +596,7 @@ static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev)
597{ 596{
598 unsigned long cicr0, cifr; 597 unsigned long cicr0, cifr;
599 598
600 dev_dbg(pcdev->dev, "%s\n", __func__); 599 dev_dbg(pcdev->soc_host.dev, "%s\n", __func__);
601 /* Reset the FIFOs */ 600 /* Reset the FIFOs */
602 cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F; 601 cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
603 __raw_writel(cifr, pcdev->base + CIFR); 602 __raw_writel(cifr, pcdev->base + CIFR);
@@ -617,7 +616,7 @@ static void pxa_camera_stop_capture(struct pxa_camera_dev *pcdev)
617 __raw_writel(cicr0, pcdev->base + CICR0); 616 __raw_writel(cicr0, pcdev->base + CICR0);
618 617
619 pcdev->active = NULL; 618 pcdev->active = NULL;
620 dev_dbg(pcdev->dev, "%s\n", __func__); 619 dev_dbg(pcdev->soc_host.dev, "%s\n", __func__);
621} 620}
622 621
623static void pxa_videobuf_queue(struct videobuf_queue *vq, 622static void pxa_videobuf_queue(struct videobuf_queue *vq,
@@ -686,7 +685,7 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
686 do_gettimeofday(&vb->ts); 685 do_gettimeofday(&vb->ts);
687 vb->field_count++; 686 vb->field_count++;
688 wake_up(&vb->done); 687 wake_up(&vb->done);
689 dev_dbg(pcdev->dev, "%s dequeud buffer (vb=0x%p)\n", __func__, vb); 688 dev_dbg(pcdev->soc_host.dev, "%s dequeud buffer (vb=0x%p)\n", __func__, vb);
690 689
691 if (list_empty(&pcdev->capture)) { 690 if (list_empty(&pcdev->capture)) {
692 pxa_camera_stop_capture(pcdev); 691 pxa_camera_stop_capture(pcdev);
@@ -722,7 +721,7 @@ static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev)
722 for (i = 0; i < pcdev->channels; i++) 721 for (i = 0; i < pcdev->channels; i++)
723 if (DDADR(pcdev->dma_chans[i]) != DDADR_STOP) 722 if (DDADR(pcdev->dma_chans[i]) != DDADR_STOP)
724 is_dma_stopped = 0; 723 is_dma_stopped = 0;
725 dev_dbg(pcdev->dev, "%s : top queued buffer=%p, dma_stopped=%d\n", 724 dev_dbg(pcdev->soc_host.dev, "%s : top queued buffer=%p, dma_stopped=%d\n",
726 __func__, pcdev->active, is_dma_stopped); 725 __func__, pcdev->active, is_dma_stopped);
727 if (pcdev->active && is_dma_stopped) 726 if (pcdev->active && is_dma_stopped)
728 pxa_camera_start_capture(pcdev); 727 pxa_camera_start_capture(pcdev);
@@ -747,12 +746,12 @@ static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev,
747 overrun |= CISR_IFO_1 | CISR_IFO_2; 746 overrun |= CISR_IFO_1 | CISR_IFO_2;
748 747
749 if (status & DCSR_BUSERR) { 748 if (status & DCSR_BUSERR) {
750 dev_err(pcdev->dev, "DMA Bus Error IRQ!\n"); 749 dev_err(pcdev->soc_host.dev, "DMA Bus Error IRQ!\n");
751 goto out; 750 goto out;
752 } 751 }
753 752
754 if (!(status & (DCSR_ENDINTR | DCSR_STARTINTR))) { 753 if (!(status & (DCSR_ENDINTR | DCSR_STARTINTR))) {
755 dev_err(pcdev->dev, "Unknown DMA IRQ source, " 754 dev_err(pcdev->soc_host.dev, "Unknown DMA IRQ source, "
756 "status: 0x%08x\n", status); 755 "status: 0x%08x\n", status);
757 goto out; 756 goto out;
758 } 757 }
@@ -776,7 +775,7 @@ static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev,
776 buf = container_of(vb, struct pxa_buffer, vb); 775 buf = container_of(vb, struct pxa_buffer, vb);
777 WARN_ON(buf->inwork || list_empty(&vb->queue)); 776 WARN_ON(buf->inwork || list_empty(&vb->queue));
778 777
779 dev_dbg(pcdev->dev, "%s channel=%d %s%s(vb=0x%p) dma.desc=%x\n", 778 dev_dbg(pcdev->soc_host.dev, "%s channel=%d %s%s(vb=0x%p) dma.desc=%x\n",
780 __func__, channel, status & DCSR_STARTINTR ? "SOF " : "", 779 __func__, channel, status & DCSR_STARTINTR ? "SOF " : "",
781 status & DCSR_ENDINTR ? "EOF " : "", vb, DDADR(channel)); 780 status & DCSR_ENDINTR ? "EOF " : "", vb, DDADR(channel));
782 781
@@ -787,7 +786,7 @@ static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev,
787 */ 786 */
788 if (camera_status & overrun && 787 if (camera_status & overrun &&
789 !list_is_last(pcdev->capture.next, &pcdev->capture)) { 788 !list_is_last(pcdev->capture.next, &pcdev->capture)) {
790 dev_dbg(pcdev->dev, "FIFO overrun! CISR: %x\n", 789 dev_dbg(pcdev->soc_host.dev, "FIFO overrun! CISR: %x\n",
791 camera_status); 790 camera_status);
792 pxa_camera_stop_capture(pcdev); 791 pxa_camera_stop_capture(pcdev);
793 pxa_camera_start_capture(pcdev); 792 pxa_camera_start_capture(pcdev);
@@ -854,7 +853,7 @@ static u32 mclk_get_divisor(struct pxa_camera_dev *pcdev)
854 /* mclk <= ciclk / 4 (27.4.2) */ 853 /* mclk <= ciclk / 4 (27.4.2) */
855 if (mclk > lcdclk / 4) { 854 if (mclk > lcdclk / 4) {
856 mclk = lcdclk / 4; 855 mclk = lcdclk / 4;
857 dev_warn(pcdev->dev, "Limiting master clock to %lu\n", mclk); 856 dev_warn(pcdev->soc_host.dev, "Limiting master clock to %lu\n", mclk);
858 } 857 }
859 858
860 /* We verify mclk != 0, so if anyone breaks it, here comes their Oops */ 859 /* We verify mclk != 0, so if anyone breaks it, here comes their Oops */
@@ -864,7 +863,7 @@ static u32 mclk_get_divisor(struct pxa_camera_dev *pcdev)
864 if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN) 863 if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
865 pcdev->mclk = lcdclk / (2 * (div + 1)); 864 pcdev->mclk = lcdclk / (2 * (div + 1));
866 865
867 dev_dbg(pcdev->dev, "LCD clock %luHz, target freq %luHz, " 866 dev_dbg(pcdev->soc_host.dev, "LCD clock %luHz, target freq %luHz, "
868 "divisor %u\n", lcdclk, mclk, div); 867 "divisor %u\n", lcdclk, mclk, div);
869 868
870 return div; 869 return div;
@@ -884,12 +883,12 @@ static void pxa_camera_activate(struct pxa_camera_dev *pcdev)
884 struct pxacamera_platform_data *pdata = pcdev->pdata; 883 struct pxacamera_platform_data *pdata = pcdev->pdata;
885 u32 cicr4 = 0; 884 u32 cicr4 = 0;
886 885
887 dev_dbg(pcdev->dev, "Registered platform device at %p data %p\n", 886 dev_dbg(pcdev->soc_host.dev, "Registered platform device at %p data %p\n",
888 pcdev, pdata); 887 pcdev, pdata);
889 888
890 if (pdata && pdata->init) { 889 if (pdata && pdata->init) {
891 dev_dbg(pcdev->dev, "%s: Init gpios\n", __func__); 890 dev_dbg(pcdev->soc_host.dev, "%s: Init gpios\n", __func__);
892 pdata->init(pcdev->dev); 891 pdata->init(pcdev->soc_host.dev);
893 } 892 }
894 893
895 /* disable all interrupts */ 894 /* disable all interrupts */
@@ -931,7 +930,7 @@ static irqreturn_t pxa_camera_irq(int irq, void *data)
931 struct videobuf_buffer *vb; 930 struct videobuf_buffer *vb;
932 931
933 status = __raw_readl(pcdev->base + CISR); 932 status = __raw_readl(pcdev->base + CISR);
934 dev_dbg(pcdev->dev, "Camera interrupt status 0x%lx\n", status); 933 dev_dbg(pcdev->soc_host.dev, "Camera interrupt status 0x%lx\n", status);
935 934
936 if (!status) 935 if (!status)
937 return IRQ_NONE; 936 return IRQ_NONE;
@@ -1259,7 +1258,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx,
1259 xlate->cam_fmt = icd->formats + idx; 1258 xlate->cam_fmt = icd->formats + idx;
1260 xlate->buswidth = buswidth; 1259 xlate->buswidth = buswidth;
1261 xlate++; 1260 xlate++;
1262 dev_dbg(&ici->dev, "Providing format %s using %s\n", 1261 dev_dbg(ici->dev, "Providing format %s using %s\n",
1263 pxa_camera_formats[0].name, 1262 pxa_camera_formats[0].name,
1264 icd->formats[idx].name); 1263 icd->formats[idx].name);
1265 } 1264 }
@@ -1274,7 +1273,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx,
1274 xlate->cam_fmt = icd->formats + idx; 1273 xlate->cam_fmt = icd->formats + idx;
1275 xlate->buswidth = buswidth; 1274 xlate->buswidth = buswidth;
1276 xlate++; 1275 xlate++;
1277 dev_dbg(&ici->dev, "Providing format %s packed\n", 1276 dev_dbg(ici->dev, "Providing format %s packed\n",
1278 icd->formats[idx].name); 1277 icd->formats[idx].name);
1279 } 1278 }
1280 break; 1279 break;
@@ -1286,7 +1285,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, int idx,
1286 xlate->cam_fmt = icd->formats + idx; 1285 xlate->cam_fmt = icd->formats + idx;
1287 xlate->buswidth = icd->formats[idx].depth; 1286 xlate->buswidth = icd->formats[idx].depth;
1288 xlate++; 1287 xlate++;
1289 dev_dbg(&ici->dev, 1288 dev_dbg(ici->dev,
1290 "Providing format %s in pass-through mode\n", 1289 "Providing format %s in pass-through mode\n",
1291 icd->formats[idx].name); 1290 icd->formats[idx].name);
1292 } 1291 }
@@ -1315,11 +1314,11 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
1315 icd->sense = NULL; 1314 icd->sense = NULL;
1316 1315
1317 if (ret < 0) { 1316 if (ret < 0) {
1318 dev_warn(&ici->dev, "Failed to crop to %ux%u@%u:%u\n", 1317 dev_warn(ici->dev, "Failed to crop to %ux%u@%u:%u\n",
1319 rect->width, rect->height, rect->left, rect->top); 1318 rect->width, rect->height, rect->left, rect->top);
1320 } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { 1319 } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {
1321 if (sense.pixel_clock > sense.pixel_clock_max) { 1320 if (sense.pixel_clock > sense.pixel_clock_max) {
1322 dev_err(&ici->dev, 1321 dev_err(ici->dev,
1323 "pixel clock %lu set by the camera too high!", 1322 "pixel clock %lu set by the camera too high!",
1324 sense.pixel_clock); 1323 sense.pixel_clock);
1325 return -EIO; 1324 return -EIO;
@@ -1347,7 +1346,7 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd,
1347 1346
1348 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); 1347 xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
1349 if (!xlate) { 1348 if (!xlate) {
1350 dev_warn(&ici->dev, "Format %x not found\n", pix->pixelformat); 1349 dev_warn(ici->dev, "Format %x not found\n", pix->pixelformat);
1351 return -EINVAL; 1350 return -EINVAL;
1352 } 1351 }
1353 1352
@@ -1363,11 +1362,11 @@ static int pxa_camera_set_fmt(struct soc_camera_device *icd,
1363 icd->sense = NULL; 1362 icd->sense = NULL;
1364 1363
1365 if (ret < 0) { 1364 if (ret < 0) {
1366 dev_warn(&ici->dev, "Failed to configure for format %x\n", 1365 dev_warn(ici->dev, "Failed to configure for format %x\n",
1367 pix->pixelformat); 1366 pix->pixelformat);
1368 } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { 1367 } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) {
1369 if (sense.pixel_clock > sense.pixel_clock_max) { 1368 if (sense.pixel_clock > sense.pixel_clock_max) {
1370 dev_err(&ici->dev, 1369 dev_err(ici->dev,
1371 "pixel clock %lu set by the camera too high!", 1370 "pixel clock %lu set by the camera too high!",
1372 sense.pixel_clock); 1371 sense.pixel_clock);
1373 return -EIO; 1372 return -EIO;
@@ -1395,7 +1394,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
1395 1394
1396 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 1395 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
1397 if (!xlate) { 1396 if (!xlate) {
1398 dev_warn(&ici->dev, "Format %x not found\n", pixfmt); 1397 dev_warn(ici->dev, "Format %x not found\n", pixfmt);
1399 return -EINVAL; 1398 return -EINVAL;
1400 } 1399 }
1401 1400
@@ -1552,13 +1551,7 @@ static struct soc_camera_host_ops pxa_soc_camera_host_ops = {
1552 .set_bus_param = pxa_camera_set_bus_param, 1551 .set_bus_param = pxa_camera_set_bus_param,
1553}; 1552};
1554 1553
1555/* Should be allocated dynamically too, but we have only one. */ 1554static int __devinit pxa_camera_probe(struct platform_device *pdev)
1556static struct soc_camera_host pxa_soc_camera_host = {
1557 .drv_name = PXA_CAM_DRV_NAME,
1558 .ops = &pxa_soc_camera_host_ops,
1559};
1560
1561static int pxa_camera_probe(struct platform_device *pdev)
1562{ 1555{
1563 struct pxa_camera_dev *pcdev; 1556 struct pxa_camera_dev *pcdev;
1564 struct resource *res; 1557 struct resource *res;
@@ -1586,7 +1579,6 @@ static int pxa_camera_probe(struct platform_device *pdev)
1586 goto exit_kfree; 1579 goto exit_kfree;
1587 } 1580 }
1588 1581
1589 dev_set_drvdata(&pdev->dev, pcdev);
1590 pcdev->res = res; 1582 pcdev->res = res;
1591 1583
1592 pcdev->pdata = pdev->dev.platform_data; 1584 pcdev->pdata = pdev->dev.platform_data;
@@ -1607,7 +1599,6 @@ static int pxa_camera_probe(struct platform_device *pdev)
1607 pcdev->mclk = 20000000; 1599 pcdev->mclk = 20000000;
1608 } 1600 }
1609 1601
1610 pcdev->dev = &pdev->dev;
1611 pcdev->mclk_divisor = mclk_get_divisor(pcdev); 1602 pcdev->mclk_divisor = mclk_get_divisor(pcdev);
1612 1603
1613 INIT_LIST_HEAD(&pcdev->capture); 1604 INIT_LIST_HEAD(&pcdev->capture);
@@ -1616,13 +1607,13 @@ static int pxa_camera_probe(struct platform_device *pdev)
1616 /* 1607 /*
1617 * Request the regions. 1608 * Request the regions.
1618 */ 1609 */
1619 if (!request_mem_region(res->start, res->end - res->start + 1, 1610 if (!request_mem_region(res->start, resource_size(res),
1620 PXA_CAM_DRV_NAME)) { 1611 PXA_CAM_DRV_NAME)) {
1621 err = -EBUSY; 1612 err = -EBUSY;
1622 goto exit_clk; 1613 goto exit_clk;
1623 } 1614 }
1624 1615
1625 base = ioremap(res->start, res->end - res->start + 1); 1616 base = ioremap(res->start, resource_size(res));
1626 if (!base) { 1617 if (!base) {
1627 err = -ENOMEM; 1618 err = -ENOMEM;
1628 goto exit_release; 1619 goto exit_release;
@@ -1634,29 +1625,29 @@ static int pxa_camera_probe(struct platform_device *pdev)
1634 err = pxa_request_dma("CI_Y", DMA_PRIO_HIGH, 1625 err = pxa_request_dma("CI_Y", DMA_PRIO_HIGH,
1635 pxa_camera_dma_irq_y, pcdev); 1626 pxa_camera_dma_irq_y, pcdev);
1636 if (err < 0) { 1627 if (err < 0) {
1637 dev_err(pcdev->dev, "Can't request DMA for Y\n"); 1628 dev_err(&pdev->dev, "Can't request DMA for Y\n");
1638 goto exit_iounmap; 1629 goto exit_iounmap;
1639 } 1630 }
1640 pcdev->dma_chans[0] = err; 1631 pcdev->dma_chans[0] = err;
1641 dev_dbg(pcdev->dev, "got DMA channel %d\n", pcdev->dma_chans[0]); 1632 dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chans[0]);
1642 1633
1643 err = pxa_request_dma("CI_U", DMA_PRIO_HIGH, 1634 err = pxa_request_dma("CI_U", DMA_PRIO_HIGH,
1644 pxa_camera_dma_irq_u, pcdev); 1635 pxa_camera_dma_irq_u, pcdev);
1645 if (err < 0) { 1636 if (err < 0) {
1646 dev_err(pcdev->dev, "Can't request DMA for U\n"); 1637 dev_err(&pdev->dev, "Can't request DMA for U\n");
1647 goto exit_free_dma_y; 1638 goto exit_free_dma_y;
1648 } 1639 }
1649 pcdev->dma_chans[1] = err; 1640 pcdev->dma_chans[1] = err;
1650 dev_dbg(pcdev->dev, "got DMA channel (U) %d\n", pcdev->dma_chans[1]); 1641 dev_dbg(&pdev->dev, "got DMA channel (U) %d\n", pcdev->dma_chans[1]);
1651 1642
1652 err = pxa_request_dma("CI_V", DMA_PRIO_HIGH, 1643 err = pxa_request_dma("CI_V", DMA_PRIO_HIGH,
1653 pxa_camera_dma_irq_v, pcdev); 1644 pxa_camera_dma_irq_v, pcdev);
1654 if (err < 0) { 1645 if (err < 0) {
1655 dev_err(pcdev->dev, "Can't request DMA for V\n"); 1646 dev_err(&pdev->dev, "Can't request DMA for V\n");
1656 goto exit_free_dma_u; 1647 goto exit_free_dma_u;
1657 } 1648 }
1658 pcdev->dma_chans[2] = err; 1649 pcdev->dma_chans[2] = err;
1659 dev_dbg(pcdev->dev, "got DMA channel (V) %d\n", pcdev->dma_chans[2]); 1650 dev_dbg(&pdev->dev, "got DMA channel (V) %d\n", pcdev->dma_chans[2]);
1660 1651
1661 DRCMR(68) = pcdev->dma_chans[0] | DRCMR_MAPVLD; 1652 DRCMR(68) = pcdev->dma_chans[0] | DRCMR_MAPVLD;
1662 DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD; 1653 DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD;
@@ -1666,14 +1657,17 @@ static int pxa_camera_probe(struct platform_device *pdev)
1666 err = request_irq(pcdev->irq, pxa_camera_irq, 0, PXA_CAM_DRV_NAME, 1657 err = request_irq(pcdev->irq, pxa_camera_irq, 0, PXA_CAM_DRV_NAME,
1667 pcdev); 1658 pcdev);
1668 if (err) { 1659 if (err) {
1669 dev_err(pcdev->dev, "Camera interrupt register failed \n"); 1660 dev_err(&pdev->dev, "Camera interrupt register failed \n");
1670 goto exit_free_dma; 1661 goto exit_free_dma;
1671 } 1662 }
1672 1663
1673 pxa_soc_camera_host.priv = pcdev; 1664 pcdev->soc_host.drv_name = PXA_CAM_DRV_NAME;
1674 pxa_soc_camera_host.dev.parent = &pdev->dev; 1665 pcdev->soc_host.ops = &pxa_soc_camera_host_ops;
1675 pxa_soc_camera_host.nr = pdev->id; 1666 pcdev->soc_host.priv = pcdev;
1676 err = soc_camera_host_register(&pxa_soc_camera_host); 1667 pcdev->soc_host.dev = &pdev->dev;
1668 pcdev->soc_host.nr = pdev->id;
1669
1670 err = soc_camera_host_register(&pcdev->soc_host);
1677 if (err) 1671 if (err)
1678 goto exit_free_irq; 1672 goto exit_free_irq;
1679 1673
@@ -1690,7 +1684,7 @@ exit_free_dma_y:
1690exit_iounmap: 1684exit_iounmap:
1691 iounmap(base); 1685 iounmap(base);
1692exit_release: 1686exit_release:
1693 release_mem_region(res->start, res->end - res->start + 1); 1687 release_mem_region(res->start, resource_size(res));
1694exit_clk: 1688exit_clk:
1695 clk_put(pcdev->clk); 1689 clk_put(pcdev->clk);
1696exit_kfree: 1690exit_kfree:
@@ -1701,7 +1695,9 @@ exit:
1701 1695
1702static int __devexit pxa_camera_remove(struct platform_device *pdev) 1696static int __devexit pxa_camera_remove(struct platform_device *pdev)
1703{ 1697{
1704 struct pxa_camera_dev *pcdev = platform_get_drvdata(pdev); 1698 struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
1699 struct pxa_camera_dev *pcdev = container_of(soc_host,
1700 struct pxa_camera_dev, soc_host);
1705 struct resource *res; 1701 struct resource *res;
1706 1702
1707 clk_put(pcdev->clk); 1703 clk_put(pcdev->clk);
@@ -1711,12 +1707,12 @@ static int __devexit pxa_camera_remove(struct platform_device *pdev)
1711 pxa_free_dma(pcdev->dma_chans[2]); 1707 pxa_free_dma(pcdev->dma_chans[2]);
1712 free_irq(pcdev->irq, pcdev); 1708 free_irq(pcdev->irq, pcdev);
1713 1709
1714 soc_camera_host_unregister(&pxa_soc_camera_host); 1710 soc_camera_host_unregister(soc_host);
1715 1711
1716 iounmap(pcdev->base); 1712 iounmap(pcdev->base);
1717 1713
1718 res = pcdev->res; 1714 res = pcdev->res;
1719 release_mem_region(res->start, res->end - res->start + 1); 1715 release_mem_region(res->start, resource_size(res));
1720 1716
1721 kfree(pcdev); 1717 kfree(pcdev);
1722 1718
@@ -1730,11 +1726,11 @@ static struct platform_driver pxa_camera_driver = {
1730 .name = PXA_CAM_DRV_NAME, 1726 .name = PXA_CAM_DRV_NAME,
1731 }, 1727 },
1732 .probe = pxa_camera_probe, 1728 .probe = pxa_camera_probe,
1733 .remove = __exit_p(pxa_camera_remove), 1729 .remove = __devexit_p(pxa_camera_remove),
1734}; 1730};
1735 1731
1736 1732
1737static int __devinit pxa_camera_init(void) 1733static int __init pxa_camera_init(void)
1738{ 1734{
1739 return platform_driver_register(&pxa_camera_driver); 1735 return platform_driver_register(&pxa_camera_driver);
1740} 1736}
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 30f4698be90a..6be845ccc7d7 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -77,6 +77,8 @@
77#define MAX_CHANNELS 4 77#define MAX_CHANNELS 4
78#define S2255_MARKER_FRAME 0x2255DA4AL 78#define S2255_MARKER_FRAME 0x2255DA4AL
79#define S2255_MARKER_RESPONSE 0x2255ACACL 79#define S2255_MARKER_RESPONSE 0x2255ACACL
80#define S2255_RESPONSE_SETMODE 0x01
81#define S2255_RESPONSE_FW 0x10
80#define S2255_USB_XFER_SIZE (16 * 1024) 82#define S2255_USB_XFER_SIZE (16 * 1024)
81#define MAX_CHANNELS 4 83#define MAX_CHANNELS 4
82#define MAX_PIPE_BUFFERS 1 84#define MAX_PIPE_BUFFERS 1
@@ -107,6 +109,8 @@
107#define SCALE_4CIFS 1 /* 640x480(NTSC) or 704x576(PAL) */ 109#define SCALE_4CIFS 1 /* 640x480(NTSC) or 704x576(PAL) */
108#define SCALE_2CIFS 2 /* 640x240(NTSC) or 704x288(PAL) */ 110#define SCALE_2CIFS 2 /* 640x240(NTSC) or 704x288(PAL) */
109#define SCALE_1CIFS 3 /* 320x240(NTSC) or 352x288(PAL) */ 111#define SCALE_1CIFS 3 /* 320x240(NTSC) or 352x288(PAL) */
112/* SCALE_4CIFSI is the 2 fields interpolated into one */
113#define SCALE_4CIFSI 4 /* 640x480(NTSC) or 704x576(PAL) high quality */
110 114
111#define COLOR_YUVPL 1 /* YUV planar */ 115#define COLOR_YUVPL 1 /* YUV planar */
112#define COLOR_YUVPK 2 /* YUV packed */ 116#define COLOR_YUVPK 2 /* YUV packed */
@@ -178,9 +182,6 @@ struct s2255_bufferi {
178 182
179struct s2255_dmaqueue { 183struct s2255_dmaqueue {
180 struct list_head active; 184 struct list_head active;
181 /* thread for acquisition */
182 struct task_struct *kthread;
183 int frame;
184 struct s2255_dev *dev; 185 struct s2255_dev *dev;
185 int channel; 186 int channel;
186}; 187};
@@ -210,16 +211,11 @@ struct s2255_pipeinfo {
210 u32 max_transfer_size; 211 u32 max_transfer_size;
211 u32 cur_transfer_size; 212 u32 cur_transfer_size;
212 u8 *transfer_buffer; 213 u8 *transfer_buffer;
213 u32 transfer_flags;;
214 u32 state; 214 u32 state;
215 u32 prev_state;
216 u32 urb_size;
217 void *stream_urb; 215 void *stream_urb;
218 void *dev; /* back pointer to s2255_dev struct*/ 216 void *dev; /* back pointer to s2255_dev struct*/
219 u32 err_count; 217 u32 err_count;
220 u32 buf_index;
221 u32 idx; 218 u32 idx;
222 u32 priority_set;
223}; 219};
224 220
225struct s2255_fmt; /*forward declaration */ 221struct s2255_fmt; /*forward declaration */
@@ -239,13 +235,13 @@ struct s2255_dev {
239 struct list_head s2255_devlist; 235 struct list_head s2255_devlist;
240 struct timer_list timer; 236 struct timer_list timer;
241 struct s2255_fw *fw_data; 237 struct s2255_fw *fw_data;
242 int board_num;
243 int is_open;
244 struct s2255_pipeinfo pipes[MAX_PIPE_BUFFERS]; 238 struct s2255_pipeinfo pipes[MAX_PIPE_BUFFERS];
245 struct s2255_bufferi buffer[MAX_CHANNELS]; 239 struct s2255_bufferi buffer[MAX_CHANNELS];
246 struct s2255_mode mode[MAX_CHANNELS]; 240 struct s2255_mode mode[MAX_CHANNELS];
247 /* jpeg compression */ 241 /* jpeg compression */
248 struct v4l2_jpegcompression jc[MAX_CHANNELS]; 242 struct v4l2_jpegcompression jc[MAX_CHANNELS];
243 /* capture parameters (for high quality mode full size) */
244 struct v4l2_captureparm cap_parm[MAX_CHANNELS];
249 const struct s2255_fmt *cur_fmt[MAX_CHANNELS]; 245 const struct s2255_fmt *cur_fmt[MAX_CHANNELS];
250 int cur_frame[MAX_CHANNELS]; 246 int cur_frame[MAX_CHANNELS];
251 int last_frame[MAX_CHANNELS]; 247 int last_frame[MAX_CHANNELS];
@@ -297,9 +293,10 @@ struct s2255_fh {
297 int resources[MAX_CHANNELS]; 293 int resources[MAX_CHANNELS];
298}; 294};
299 295
300#define CUR_USB_FWVER 774 /* current cypress EEPROM firmware version */ 296/* current cypress EEPROM firmware version */
297#define S2255_CUR_USB_FWVER ((3 << 8) | 6)
301#define S2255_MAJOR_VERSION 1 298#define S2255_MAJOR_VERSION 1
302#define S2255_MINOR_VERSION 13 299#define S2255_MINOR_VERSION 14
303#define S2255_RELEASE 0 300#define S2255_RELEASE 0
304#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \ 301#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \
305 S2255_MINOR_VERSION, \ 302 S2255_MINOR_VERSION, \
@@ -1027,9 +1024,16 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
1027 fh->type = f->type; 1024 fh->type = f->type;
1028 norm = norm_minw(fh->dev->vdev[fh->channel]); 1025 norm = norm_minw(fh->dev->vdev[fh->channel]);
1029 if (fh->width > norm_minw(fh->dev->vdev[fh->channel])) { 1026 if (fh->width > norm_minw(fh->dev->vdev[fh->channel])) {
1030 if (fh->height > norm_minh(fh->dev->vdev[fh->channel])) 1027 if (fh->height > norm_minh(fh->dev->vdev[fh->channel])) {
1031 fh->mode.scale = SCALE_4CIFS; 1028 if (fh->dev->cap_parm[fh->channel].capturemode &
1032 else 1029 V4L2_MODE_HIGHQUALITY) {
1030 fh->mode.scale = SCALE_4CIFSI;
1031 dprintk(2, "scale 4CIFSI\n");
1032 } else {
1033 fh->mode.scale = SCALE_4CIFS;
1034 dprintk(2, "scale 4CIFS\n");
1035 }
1036 } else
1033 fh->mode.scale = SCALE_2CIFS; 1037 fh->mode.scale = SCALE_2CIFS;
1034 1038
1035 } else { 1039 } else {
@@ -1130,6 +1134,7 @@ static u32 get_transfer_size(struct s2255_mode *mode)
1130 if (mode->format == FORMAT_NTSC) { 1134 if (mode->format == FORMAT_NTSC) {
1131 switch (mode->scale) { 1135 switch (mode->scale) {
1132 case SCALE_4CIFS: 1136 case SCALE_4CIFS:
1137 case SCALE_4CIFSI:
1133 linesPerFrame = NUM_LINES_4CIFS_NTSC * 2; 1138 linesPerFrame = NUM_LINES_4CIFS_NTSC * 2;
1134 pixelsPerLine = LINE_SZ_4CIFS_NTSC; 1139 pixelsPerLine = LINE_SZ_4CIFS_NTSC;
1135 break; 1140 break;
@@ -1147,6 +1152,7 @@ static u32 get_transfer_size(struct s2255_mode *mode)
1147 } else if (mode->format == FORMAT_PAL) { 1152 } else if (mode->format == FORMAT_PAL) {
1148 switch (mode->scale) { 1153 switch (mode->scale) {
1149 case SCALE_4CIFS: 1154 case SCALE_4CIFS:
1155 case SCALE_4CIFSI:
1150 linesPerFrame = NUM_LINES_4CIFS_PAL * 2; 1156 linesPerFrame = NUM_LINES_4CIFS_PAL * 2;
1151 pixelsPerLine = LINE_SZ_4CIFS_PAL; 1157 pixelsPerLine = LINE_SZ_4CIFS_PAL;
1152 break; 1158 break;
@@ -1502,6 +1508,33 @@ static int vidioc_s_jpegcomp(struct file *file, void *priv,
1502 dprintk(2, "setting jpeg quality %d\n", jc->quality); 1508 dprintk(2, "setting jpeg quality %d\n", jc->quality);
1503 return 0; 1509 return 0;
1504} 1510}
1511
1512static int vidioc_g_parm(struct file *file, void *priv,
1513 struct v4l2_streamparm *sp)
1514{
1515 struct s2255_fh *fh = priv;
1516 struct s2255_dev *dev = fh->dev;
1517 if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1518 return -EINVAL;
1519 sp->parm.capture.capturemode = dev->cap_parm[fh->channel].capturemode;
1520 dprintk(2, "getting parm %d\n", sp->parm.capture.capturemode);
1521 return 0;
1522}
1523
1524static int vidioc_s_parm(struct file *file, void *priv,
1525 struct v4l2_streamparm *sp)
1526{
1527 struct s2255_fh *fh = priv;
1528 struct s2255_dev *dev = fh->dev;
1529
1530 if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1531 return -EINVAL;
1532
1533 dev->cap_parm[fh->channel].capturemode = sp->parm.capture.capturemode;
1534 dprintk(2, "setting param capture mode %d\n",
1535 sp->parm.capture.capturemode);
1536 return 0;
1537}
1505static int s2255_open(struct file *file) 1538static int s2255_open(struct file *file)
1506{ 1539{
1507 int minor = video_devdata(file)->minor; 1540 int minor = video_devdata(file)->minor;
@@ -1793,6 +1826,8 @@ static const struct v4l2_ioctl_ops s2255_ioctl_ops = {
1793#endif 1826#endif
1794 .vidioc_s_jpegcomp = vidioc_s_jpegcomp, 1827 .vidioc_s_jpegcomp = vidioc_s_jpegcomp,
1795 .vidioc_g_jpegcomp = vidioc_g_jpegcomp, 1828 .vidioc_g_jpegcomp = vidioc_g_jpegcomp,
1829 .vidioc_s_parm = vidioc_s_parm,
1830 .vidioc_g_parm = vidioc_g_parm,
1796}; 1831};
1797 1832
1798static struct video_device template = { 1833static struct video_device template = {
@@ -1818,7 +1853,6 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
1818 INIT_LIST_HEAD(&dev->vidq[i].active); 1853 INIT_LIST_HEAD(&dev->vidq[i].active);
1819 dev->vidq[i].dev = dev; 1854 dev->vidq[i].dev = dev;
1820 dev->vidq[i].channel = i; 1855 dev->vidq[i].channel = i;
1821 dev->vidq[i].kthread = NULL;
1822 /* register 4 video devices */ 1856 /* register 4 video devices */
1823 dev->vdev[i] = video_device_alloc(); 1857 dev->vdev[i] = video_device_alloc();
1824 memcpy(dev->vdev[i], &template, sizeof(struct video_device)); 1858 memcpy(dev->vdev[i], &template, sizeof(struct video_device));
@@ -1839,7 +1873,9 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
1839 return ret; 1873 return ret;
1840 } 1874 }
1841 } 1875 }
1842 printk(KERN_INFO "Sensoray 2255 V4L driver\n"); 1876 printk(KERN_INFO "Sensoray 2255 V4L driver Revision: %d.%d\n",
1877 S2255_MAJOR_VERSION,
1878 S2255_MINOR_VERSION);
1843 return ret; 1879 return ret;
1844} 1880}
1845 1881
@@ -1929,14 +1965,14 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
1929 if (!(cc >= 0 && cc < MAX_CHANNELS)) 1965 if (!(cc >= 0 && cc < MAX_CHANNELS))
1930 break; 1966 break;
1931 switch (pdword[2]) { 1967 switch (pdword[2]) {
1932 case 0x01: 1968 case S2255_RESPONSE_SETMODE:
1933 /* check if channel valid */ 1969 /* check if channel valid */
1934 /* set mode ready */ 1970 /* set mode ready */
1935 dev->setmode_ready[cc] = 1; 1971 dev->setmode_ready[cc] = 1;
1936 wake_up(&dev->wait_setmode[cc]); 1972 wake_up(&dev->wait_setmode[cc]);
1937 dprintk(5, "setmode ready %d\n", cc); 1973 dprintk(5, "setmode ready %d\n", cc);
1938 break; 1974 break;
1939 case 0x10: 1975 case S2255_RESPONSE_FW:
1940 1976
1941 dev->chn_ready |= (1 << cc); 1977 dev->chn_ready |= (1 << cc);
1942 if ((dev->chn_ready & 0x0f) != 0x0f) 1978 if ((dev->chn_ready & 0x0f) != 0x0f)
@@ -2172,10 +2208,15 @@ static int s2255_board_init(struct s2255_dev *dev)
2172 /* query the firmware */ 2208 /* query the firmware */
2173 fw_ver = s2255_get_fx2fw(dev); 2209 fw_ver = s2255_get_fx2fw(dev);
2174 2210
2175 printk(KERN_INFO "2255 usb firmware version %d \n", fw_ver); 2211 printk(KERN_INFO "2255 usb firmware version %d.%d\n",
2176 if (fw_ver < CUR_USB_FWVER) 2212 (fw_ver >> 8) & 0xff,
2213 fw_ver & 0xff);
2214
2215 if (fw_ver < S2255_CUR_USB_FWVER)
2177 dev_err(&dev->udev->dev, 2216 dev_err(&dev->udev->dev,
2178 "usb firmware not up to date %d\n", fw_ver); 2217 "usb firmware not up to date %d.%d\n",
2218 (fw_ver >> 8) & 0xff,
2219 fw_ver & 0xff);
2179 2220
2180 for (j = 0; j < MAX_CHANNELS; j++) { 2221 for (j = 0; j < MAX_CHANNELS; j++) {
2181 dev->b_acquire[j] = 0; 2222 dev->b_acquire[j] = 0;
@@ -2240,8 +2281,10 @@ static void read_pipe_completion(struct urb *purb)
2240 return; 2281 return;
2241 } 2282 }
2242 status = purb->status; 2283 status = purb->status;
2243 if (status != 0) { 2284 /* if shutting down, do not resubmit, exit immediately */
2244 dprintk(2, "read_pipe_completion: err\n"); 2285 if (status == -ESHUTDOWN) {
2286 dprintk(2, "read_pipe_completion: err shutdown\n");
2287 pipe_info->err_count++;
2245 return; 2288 return;
2246 } 2289 }
2247 2290
@@ -2250,9 +2293,13 @@ static void read_pipe_completion(struct urb *purb)
2250 return; 2293 return;
2251 } 2294 }
2252 2295
2253 s2255_read_video_callback(dev, pipe_info); 2296 if (status == 0)
2297 s2255_read_video_callback(dev, pipe_info);
2298 else {
2299 pipe_info->err_count++;
2300 dprintk(1, "s2255drv: failed URB %d\n", status);
2301 }
2254 2302
2255 pipe_info->err_count = 0;
2256 pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint); 2303 pipe = usb_rcvbulkpipe(dev->udev, dev->read_endpoint);
2257 /* reuse urb */ 2304 /* reuse urb */
2258 usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev, 2305 usb_fill_bulk_urb(pipe_info->stream_urb, dev->udev,
@@ -2264,7 +2311,6 @@ static void read_pipe_completion(struct urb *purb)
2264 if (pipe_info->state != 0) { 2311 if (pipe_info->state != 0) {
2265 if (usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL)) { 2312 if (usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL)) {
2266 dev_err(&dev->udev->dev, "error submitting urb\n"); 2313 dev_err(&dev->udev->dev, "error submitting urb\n");
2267 usb_free_urb(pipe_info->stream_urb);
2268 } 2314 }
2269 } else { 2315 } else {
2270 dprintk(2, "read pipe complete state 0\n"); 2316 dprintk(2, "read pipe complete state 0\n");
@@ -2283,8 +2329,7 @@ static int s2255_start_readpipe(struct s2255_dev *dev)
2283 2329
2284 for (i = 0; i < MAX_PIPE_BUFFERS; i++) { 2330 for (i = 0; i < MAX_PIPE_BUFFERS; i++) {
2285 pipe_info->state = 1; 2331 pipe_info->state = 1;
2286 pipe_info->buf_index = (u32) i; 2332 pipe_info->err_count = 0;
2287 pipe_info->priority_set = 0;
2288 pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL); 2333 pipe_info->stream_urb = usb_alloc_urb(0, GFP_KERNEL);
2289 if (!pipe_info->stream_urb) { 2334 if (!pipe_info->stream_urb) {
2290 dev_err(&dev->udev->dev, 2335 dev_err(&dev->udev->dev,
@@ -2298,7 +2343,6 @@ static int s2255_start_readpipe(struct s2255_dev *dev)
2298 pipe_info->cur_transfer_size, 2343 pipe_info->cur_transfer_size,
2299 read_pipe_completion, pipe_info); 2344 read_pipe_completion, pipe_info);
2300 2345
2301 pipe_info->urb_size = sizeof(pipe_info->stream_urb);
2302 dprintk(4, "submitting URB %p\n", pipe_info->stream_urb); 2346 dprintk(4, "submitting URB %p\n", pipe_info->stream_urb);
2303 retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL); 2347 retval = usb_submit_urb(pipe_info->stream_urb, GFP_KERNEL);
2304 if (retval) { 2348 if (retval) {
@@ -2403,8 +2447,6 @@ static void s2255_stop_readpipe(struct s2255_dev *dev)
2403 if (pipe_info->state == 0) 2447 if (pipe_info->state == 0)
2404 continue; 2448 continue;
2405 pipe_info->state = 0; 2449 pipe_info->state = 0;
2406 pipe_info->prev_state = 1;
2407
2408 } 2450 }
2409 } 2451 }
2410 2452
@@ -2542,7 +2584,9 @@ static int s2255_probe(struct usb_interface *interface,
2542 s2255_probe_v4l(dev); 2584 s2255_probe_v4l(dev);
2543 usb_reset_device(dev->udev); 2585 usb_reset_device(dev->udev);
2544 /* load 2255 board specific */ 2586 /* load 2255 board specific */
2545 s2255_board_init(dev); 2587 retval = s2255_board_init(dev);
2588 if (retval)
2589 goto error;
2546 2590
2547 dprintk(4, "before probe done %p\n", dev); 2591 dprintk(4, "before probe done %p\n", dev);
2548 spin_lock_init(&dev->slock); 2592 spin_lock_init(&dev->slock);
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index 0ba68987bfce..5bcce092e804 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -44,6 +44,7 @@ config VIDEO_SAA7134_DVB
44 select DVB_LNBP21 if !DVB_FE_CUSTOMISE 44 select DVB_LNBP21 if !DVB_FE_CUSTOMISE
45 select DVB_ZL10353 if !DVB_FE_CUSTOMISE 45 select DVB_ZL10353 if !DVB_FE_CUSTOMISE
46 select DVB_LGDT3305 if !DVB_FE_CUSTOMISE 46 select DVB_LGDT3305 if !DVB_FE_CUSTOMISE
47 select DVB_TDA10048 if !DVB_FE_CUSTOMISE
47 select MEDIA_TUNER_TDA18271 if !MEDIA_TUNER_CUSTOMISE 48 select MEDIA_TUNER_TDA18271 if !MEDIA_TUNER_CUSTOMISE
48 select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMISE 49 select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMISE
49 ---help--- 50 ---help---
diff --git a/drivers/media/video/saa7134/Makefile b/drivers/media/video/saa7134/Makefile
index 3dbaa19a6d00..604158a8c235 100644
--- a/drivers/media/video/saa7134/Makefile
+++ b/drivers/media/video/saa7134/Makefile
@@ -3,8 +3,7 @@ saa7134-objs := saa7134-cards.o saa7134-core.o saa7134-i2c.o \
3 saa7134-ts.o saa7134-tvaudio.o saa7134-vbi.o \ 3 saa7134-ts.o saa7134-tvaudio.o saa7134-vbi.o \
4 saa7134-video.o saa7134-input.o 4 saa7134-video.o saa7134-input.o
5 5
6obj-$(CONFIG_VIDEO_SAA7134) += saa7134.o saa7134-empress.o \ 6obj-$(CONFIG_VIDEO_SAA7134) += saa6752hs.o saa7134.o saa7134-empress.o
7 saa6752hs.o
8 7
9obj-$(CONFIG_VIDEO_SAA7134_ALSA) += saa7134-alsa.o 8obj-$(CONFIG_VIDEO_SAA7134_ALSA) += saa7134-alsa.o
10 9
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index fdb19449d269..06861b782b95 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -1669,6 +1669,39 @@ struct saa7134_board saa7134_boards[] = {
1669 .amux = LINE1, 1669 .amux = LINE1,
1670 }, 1670 },
1671 }, 1671 },
1672 [SAA7134_BOARD_AVERMEDIA_CARDBUS_501] = {
1673 /* Oldrich Jedlicka <oldium.pro@seznam.cz> */
1674 .name = "AVerMedia Cardbus TV/Radio (E501R)",
1675 .audio_clock = 0x187de7,
1676 .tuner_type = TUNER_ALPS_TSBE5_PAL,
1677 .radio_type = TUNER_TEA5767,
1678 .tuner_addr = 0x61,
1679 .radio_addr = 0x60,
1680 .tda9887_conf = TDA9887_PRESENT,
1681 .gpiomask = 0x08000000,
1682 .inputs = { {
1683 .name = name_tv,
1684 .vmux = 1,
1685 .amux = TV,
1686 .tv = 1,
1687 .gpio = 0x08000000,
1688 }, {
1689 .name = name_comp1,
1690 .vmux = 3,
1691 .amux = LINE1,
1692 .gpio = 0x08000000,
1693 }, {
1694 .name = name_svideo,
1695 .vmux = 8,
1696 .amux = LINE1,
1697 .gpio = 0x08000000,
1698 } },
1699 .radio = {
1700 .name = name_radio,
1701 .amux = LINE2,
1702 .gpio = 0x00000000,
1703 },
1704 },
1672 [SAA7134_BOARD_CINERGY400_CARDBUS] = { 1705 [SAA7134_BOARD_CINERGY400_CARDBUS] = {
1673 .name = "Terratec Cinergy 400 mobile", 1706 .name = "Terratec Cinergy 400 mobile",
1674 .audio_clock = 0x187de7, 1707 .audio_clock = 0x187de7,
@@ -3331,13 +3364,15 @@ struct saa7134_board saa7134_boards[] = {
3331 }, 3364 },
3332 }, 3365 },
3333 [SAA7134_BOARD_HAUPPAUGE_HVR1110R3] = { 3366 [SAA7134_BOARD_HAUPPAUGE_HVR1110R3] = {
3334 .name = "Hauppauge WinTV-HVR1110r3", 3367 .name = "Hauppauge WinTV-HVR1110r3 DVB-T/Hybrid",
3335 .audio_clock = 0x00187de7, 3368 .audio_clock = 0x00187de7,
3336 .tuner_type = TUNER_PHILIPS_TDA8290, 3369 .tuner_type = TUNER_PHILIPS_TDA8290,
3337 .radio_type = UNSET, 3370 .radio_type = UNSET,
3338 .tuner_addr = ADDR_UNSET, 3371 .tuner_addr = ADDR_UNSET,
3339 .radio_addr = ADDR_UNSET, 3372 .radio_addr = ADDR_UNSET,
3340 .tuner_config = 3, 3373 .tuner_config = 3,
3374 .mpeg = SAA7134_MPEG_DVB,
3375 .ts_type = SAA7134_MPEG_TS_SERIAL,
3341 .gpiomask = 0x0800100, /* GPIO 21 is an INPUT */ 3376 .gpiomask = 0x0800100, /* GPIO 21 is an INPUT */
3342 .inputs = {{ 3377 .inputs = {{
3343 .name = name_tv, 3378 .name = name_tv,
@@ -4006,7 +4041,7 @@ struct saa7134_board saa7134_boards[] = {
4006 [SAA7134_BOARD_BEHOLD_505FM] = { 4041 [SAA7134_BOARD_BEHOLD_505FM] = {
4007 /* Beholder Intl. Ltd. 2008 */ 4042 /* Beholder Intl. Ltd. 2008 */
4008 /*Dmitry Belimov <d.belimov@gmail.com> */ 4043 /*Dmitry Belimov <d.belimov@gmail.com> */
4009 .name = "Beholder BeholdTV 505 FM/RDS", 4044 .name = "Beholder BeholdTV 505 FM",
4010 .audio_clock = 0x00200000, 4045 .audio_clock = 0x00200000,
4011 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, 4046 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
4012 .radio_type = UNSET, 4047 .radio_type = UNSET,
@@ -4019,6 +4054,40 @@ struct saa7134_board saa7134_boards[] = {
4019 .vmux = 3, 4054 .vmux = 3,
4020 .amux = LINE2, 4055 .amux = LINE2,
4021 .tv = 1, 4056 .tv = 1,
4057 }, {
4058 .name = name_comp1,
4059 .vmux = 1,
4060 .amux = LINE1,
4061 }, {
4062 .name = name_svideo,
4063 .vmux = 8,
4064 .amux = LINE1,
4065 } },
4066 .mute = {
4067 .name = name_mute,
4068 .amux = LINE1,
4069 },
4070 .radio = {
4071 .name = name_radio,
4072 .amux = LINE2,
4073 },
4074 },
4075 [SAA7134_BOARD_BEHOLD_505RDS] = {
4076 /* Beholder Intl. Ltd. 2008 */
4077 /*Dmitry Belimov <d.belimov@gmail.com> */
4078 .name = "Beholder BeholdTV 505 RDS",
4079 .audio_clock = 0x00200000,
4080 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* FIXME to MK5 */
4081 .radio_type = UNSET,
4082 .tuner_addr = ADDR_UNSET,
4083 .radio_addr = ADDR_UNSET,
4084 .tda9887_conf = TDA9887_PRESENT,
4085 .gpiomask = 0x00008000,
4086 .inputs = {{
4087 .name = name_tv,
4088 .vmux = 3,
4089 .amux = LINE2,
4090 .tv = 1,
4022 },{ 4091 },{
4023 .name = name_comp1, 4092 .name = name_comp1,
4024 .vmux = 1, 4093 .vmux = 1,
@@ -4040,7 +4109,7 @@ struct saa7134_board saa7134_boards[] = {
4040 [SAA7134_BOARD_BEHOLD_507_9FM] = { 4109 [SAA7134_BOARD_BEHOLD_507_9FM] = {
4041 /* Beholder Intl. Ltd. 2008 */ 4110 /* Beholder Intl. Ltd. 2008 */
4042 /*Dmitry Belimov <d.belimov@gmail.com> */ 4111 /*Dmitry Belimov <d.belimov@gmail.com> */
4043 .name = "Beholder BeholdTV 507 FM/RDS / BeholdTV 509 FM", 4112 .name = "Beholder BeholdTV 507 FM / BeholdTV 509 FM",
4044 .audio_clock = 0x00187de7, 4113 .audio_clock = 0x00187de7,
4045 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, 4114 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
4046 .radio_type = UNSET, 4115 .radio_type = UNSET,
@@ -4067,6 +4136,66 @@ struct saa7134_board saa7134_boards[] = {
4067 .amux = LINE2, 4136 .amux = LINE2,
4068 }, 4137 },
4069 }, 4138 },
4139 [SAA7134_BOARD_BEHOLD_507RDS_MK5] = {
4140 /* Beholder Intl. Ltd. 2008 */
4141 /*Dmitry Belimov <d.belimov@gmail.com> */
4142 .name = "Beholder BeholdTV 507 RDS",
4143 .audio_clock = 0x00187de7,
4144 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* FIXME to MK5 */
4145 .radio_type = UNSET,
4146 .tuner_addr = ADDR_UNSET,
4147 .radio_addr = ADDR_UNSET,
4148 .tda9887_conf = TDA9887_PRESENT,
4149 .gpiomask = 0x00008000,
4150 .inputs = {{
4151 .name = name_tv,
4152 .vmux = 3,
4153 .amux = TV,
4154 .tv = 1,
4155 }, {
4156 .name = name_comp1,
4157 .vmux = 1,
4158 .amux = LINE1,
4159 }, {
4160 .name = name_svideo,
4161 .vmux = 8,
4162 .amux = LINE1,
4163 } },
4164 .radio = {
4165 .name = name_radio,
4166 .amux = LINE2,
4167 },
4168 },
4169 [SAA7134_BOARD_BEHOLD_507RDS_MK3] = {
4170 /* Beholder Intl. Ltd. 2008 */
4171 /*Dmitry Belimov <d.belimov@gmail.com> */
4172 .name = "Beholder BeholdTV 507 RDS",
4173 .audio_clock = 0x00187de7,
4174 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
4175 .radio_type = UNSET,
4176 .tuner_addr = ADDR_UNSET,
4177 .radio_addr = ADDR_UNSET,
4178 .tda9887_conf = TDA9887_PRESENT,
4179 .gpiomask = 0x00008000,
4180 .inputs = {{
4181 .name = name_tv,
4182 .vmux = 3,
4183 .amux = TV,
4184 .tv = 1,
4185 }, {
4186 .name = name_comp1,
4187 .vmux = 1,
4188 .amux = LINE1,
4189 }, {
4190 .name = name_svideo,
4191 .vmux = 8,
4192 .amux = LINE1,
4193 } },
4194 .radio = {
4195 .name = name_radio,
4196 .amux = LINE2,
4197 },
4198 },
4070 [SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM] = { 4199 [SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM] = {
4071 /* Beholder Intl. Ltd. 2008 */ 4200 /* Beholder Intl. Ltd. 2008 */
4072 /*Dmitry Belimov <d.belimov@gmail.com> */ 4201 /*Dmitry Belimov <d.belimov@gmail.com> */
@@ -4101,9 +4230,121 @@ struct saa7134_board saa7134_boards[] = {
4101 .gpio = 0x000A8000, 4230 .gpio = 0x000A8000,
4102 }, 4231 },
4103 }, 4232 },
4104 [SAA7134_BOARD_BEHOLD_607_9FM] = { 4233 [SAA7134_BOARD_BEHOLD_607FM_MK3] = {
4234 /* Andrey Melnikoff <temnota@kmv.ru> */
4235 .name = "Beholder BeholdTV 607 FM",
4236 .audio_clock = 0x00187de7,
4237 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
4238 .radio_type = UNSET,
4239 .tuner_addr = ADDR_UNSET,
4240 .radio_addr = ADDR_UNSET,
4241 .tda9887_conf = TDA9887_PRESENT,
4242 .inputs = {{
4243 .name = name_tv,
4244 .vmux = 3,
4245 .amux = TV,
4246 .tv = 1,
4247 }, {
4248 .name = name_comp1,
4249 .vmux = 1,
4250 .amux = LINE1,
4251 }, {
4252 .name = name_svideo,
4253 .vmux = 8,
4254 .amux = LINE1,
4255 } },
4256 .radio = {
4257 .name = name_radio,
4258 .amux = LINE2,
4259 },
4260 },
4261 [SAA7134_BOARD_BEHOLD_609FM_MK3] = {
4262 /* Andrey Melnikoff <temnota@kmv.ru> */
4263 .name = "Beholder BeholdTV 609 FM",
4264 .audio_clock = 0x00187de7,
4265 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
4266 .radio_type = UNSET,
4267 .tuner_addr = ADDR_UNSET,
4268 .radio_addr = ADDR_UNSET,
4269 .tda9887_conf = TDA9887_PRESENT,
4270 .inputs = {{
4271 .name = name_tv,
4272 .vmux = 3,
4273 .amux = TV,
4274 .tv = 1,
4275 }, {
4276 .name = name_comp1,
4277 .vmux = 1,
4278 .amux = LINE1,
4279 }, {
4280 .name = name_svideo,
4281 .vmux = 8,
4282 .amux = LINE1,
4283 } },
4284 .radio = {
4285 .name = name_radio,
4286 .amux = LINE2,
4287 },
4288 },
4289 [SAA7134_BOARD_BEHOLD_607FM_MK5] = {
4290 /* Andrey Melnikoff <temnota@kmv.ru> */
4291 .name = "Beholder BeholdTV 607 FM",
4292 .audio_clock = 0x00187de7,
4293 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* FIXME to MK5 */
4294 .radio_type = UNSET,
4295 .tuner_addr = ADDR_UNSET,
4296 .radio_addr = ADDR_UNSET,
4297 .tda9887_conf = TDA9887_PRESENT,
4298 .inputs = {{
4299 .name = name_tv,
4300 .vmux = 3,
4301 .amux = TV,
4302 .tv = 1,
4303 }, {
4304 .name = name_comp1,
4305 .vmux = 1,
4306 .amux = LINE1,
4307 }, {
4308 .name = name_svideo,
4309 .vmux = 8,
4310 .amux = LINE1,
4311 } },
4312 .radio = {
4313 .name = name_radio,
4314 .amux = LINE2,
4315 },
4316 },
4317 [SAA7134_BOARD_BEHOLD_609FM_MK5] = {
4105 /* Andrey Melnikoff <temnota@kmv.ru> */ 4318 /* Andrey Melnikoff <temnota@kmv.ru> */
4106 .name = "Beholder BeholdTV 607 / BeholdTV 609", 4319 .name = "Beholder BeholdTV 609 FM",
4320 .audio_clock = 0x00187de7,
4321 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* FIXME to MK5 */
4322 .radio_type = UNSET,
4323 .tuner_addr = ADDR_UNSET,
4324 .radio_addr = ADDR_UNSET,
4325 .tda9887_conf = TDA9887_PRESENT,
4326 .inputs = {{
4327 .name = name_tv,
4328 .vmux = 3,
4329 .amux = TV,
4330 .tv = 1,
4331 }, {
4332 .name = name_comp1,
4333 .vmux = 1,
4334 .amux = LINE1,
4335 }, {
4336 .name = name_svideo,
4337 .vmux = 8,
4338 .amux = LINE1,
4339 } },
4340 .radio = {
4341 .name = name_radio,
4342 .amux = LINE2,
4343 },
4344 },
4345 [SAA7134_BOARD_BEHOLD_607RDS_MK3] = {
4346 /* Andrey Melnikoff <temnota@kmv.ru> */
4347 .name = "Beholder BeholdTV 607 RDS",
4107 .audio_clock = 0x00187de7, 4348 .audio_clock = 0x00187de7,
4108 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, 4349 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
4109 .radio_type = UNSET, 4350 .radio_type = UNSET,
@@ -4115,6 +4356,90 @@ struct saa7134_board saa7134_boards[] = {
4115 .vmux = 3, 4356 .vmux = 3,
4116 .amux = TV, 4357 .amux = TV,
4117 .tv = 1, 4358 .tv = 1,
4359 }, {
4360 .name = name_comp1,
4361 .vmux = 1,
4362 .amux = LINE1,
4363 }, {
4364 .name = name_svideo,
4365 .vmux = 8,
4366 .amux = LINE1,
4367 } },
4368 .radio = {
4369 .name = name_radio,
4370 .amux = LINE2,
4371 },
4372 },
4373 [SAA7134_BOARD_BEHOLD_609RDS_MK3] = {
4374 /* Andrey Melnikoff <temnota@kmv.ru> */
4375 .name = "Beholder BeholdTV 609 RDS",
4376 .audio_clock = 0x00187de7,
4377 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
4378 .radio_type = UNSET,
4379 .tuner_addr = ADDR_UNSET,
4380 .radio_addr = ADDR_UNSET,
4381 .tda9887_conf = TDA9887_PRESENT,
4382 .inputs = {{
4383 .name = name_tv,
4384 .vmux = 3,
4385 .amux = TV,
4386 .tv = 1,
4387 }, {
4388 .name = name_comp1,
4389 .vmux = 1,
4390 .amux = LINE1,
4391 }, {
4392 .name = name_svideo,
4393 .vmux = 8,
4394 .amux = LINE1,
4395 } },
4396 .radio = {
4397 .name = name_radio,
4398 .amux = LINE2,
4399 },
4400 },
4401 [SAA7134_BOARD_BEHOLD_607RDS_MK5] = {
4402 /* Andrey Melnikoff <temnota@kmv.ru> */
4403 .name = "Beholder BeholdTV 607 RDS",
4404 .audio_clock = 0x00187de7,
4405 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* FIXME to MK5 */
4406 .radio_type = UNSET,
4407 .tuner_addr = ADDR_UNSET,
4408 .radio_addr = ADDR_UNSET,
4409 .tda9887_conf = TDA9887_PRESENT,
4410 .inputs = {{
4411 .name = name_tv,
4412 .vmux = 3,
4413 .amux = TV,
4414 .tv = 1,
4415 }, {
4416 .name = name_comp1,
4417 .vmux = 1,
4418 .amux = LINE1,
4419 }, {
4420 .name = name_svideo,
4421 .vmux = 8,
4422 .amux = LINE1,
4423 } },
4424 .radio = {
4425 .name = name_radio,
4426 .amux = LINE2,
4427 },
4428 },
4429 [SAA7134_BOARD_BEHOLD_609RDS_MK5] = {
4430 /* Andrey Melnikoff <temnota@kmv.ru> */
4431 .name = "Beholder BeholdTV 609 RDS",
4432 .audio_clock = 0x00187de7,
4433 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* FIXME to MK5 */
4434 .radio_type = UNSET,
4435 .tuner_addr = ADDR_UNSET,
4436 .radio_addr = ADDR_UNSET,
4437 .tda9887_conf = TDA9887_PRESENT,
4438 .inputs = {{
4439 .name = name_tv,
4440 .vmux = 3,
4441 .amux = TV,
4442 .tv = 1,
4118 },{ 4443 },{
4119 .name = name_comp1, 4444 .name = name_comp1,
4120 .vmux = 1, 4445 .vmux = 1,
@@ -4133,6 +4458,7 @@ struct saa7134_board saa7134_boards[] = {
4133 /* Igor Kuznetsov <igk@igk.ru> */ 4458 /* Igor Kuznetsov <igk@igk.ru> */
4134 /* Andrey Melnikoff <temnota@kmv.ru> */ 4459 /* Andrey Melnikoff <temnota@kmv.ru> */
4135 /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */ 4460 /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */
4461 /* Alexey Osipov <lion-simba@pridelands.ru> */
4136 .name = "Beholder BeholdTV M6", 4462 .name = "Beholder BeholdTV M6",
4137 .audio_clock = 0x00187de7, 4463 .audio_clock = 0x00187de7,
4138 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, 4464 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
@@ -4207,10 +4533,10 @@ struct saa7134_board saa7134_boards[] = {
4207 /* Igor Kuznetsov <igk@igk.ru> */ 4533 /* Igor Kuznetsov <igk@igk.ru> */
4208 /* Andrey Melnikoff <temnota@kmv.ru> */ 4534 /* Andrey Melnikoff <temnota@kmv.ru> */
4209 /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */ 4535 /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */
4536 /* Alexey Osipov <lion-simba@pridelands.ru> */
4210 .name = "Beholder BeholdTV M6 Extra", 4537 .name = "Beholder BeholdTV M6 Extra",
4211 .audio_clock = 0x00187de7, 4538 .audio_clock = 0x00187de7,
4212 /* FIXME: Must be PHILIPS_FM1216ME_MK5*/ 4539 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* FIXME to MK5 */
4213 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
4214 .radio_type = UNSET, 4540 .radio_type = UNSET,
4215 .tuner_addr = ADDR_UNSET, 4541 .tuner_addr = ADDR_UNSET,
4216 .radio_addr = ADDR_UNSET, 4542 .radio_addr = ADDR_UNSET,
@@ -4465,7 +4791,6 @@ struct saa7134_board saa7134_boards[] = {
4465 .radio_type = UNSET, 4791 .radio_type = UNSET,
4466 .tuner_addr = ADDR_UNSET, 4792 .tuner_addr = ADDR_UNSET,
4467 .radio_addr = ADDR_UNSET, 4793 .radio_addr = ADDR_UNSET,
4468 .mpeg = SAA7134_MPEG_DVB,
4469 .inputs = {{ 4794 .inputs = {{
4470 .name = name_tv, 4795 .name = name_tv,
4471 .vmux = 3, 4796 .vmux = 3,
@@ -4753,6 +5078,44 @@ struct saa7134_board saa7134_boards[] = {
4753 .gpio = 0x01, 5078 .gpio = 0x01,
4754 }, 5079 },
4755 }, 5080 },
5081 [SAA7134_BOARD_AVERMEDIA_STUDIO_507UA] = {
5082 /* Andy Shevchenko <andy@smile.org.ua> */
5083 .name = "Avermedia AVerTV Studio 507UA",
5084 .audio_clock = 0x00187de7,
5085 .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* Should be MK5 */
5086 .radio_type = UNSET,
5087 .tuner_addr = ADDR_UNSET,
5088 .radio_addr = ADDR_UNSET,
5089 .tda9887_conf = TDA9887_PRESENT,
5090 .gpiomask = 0x03,
5091 .inputs = { {
5092 .name = name_tv,
5093 .vmux = 1,
5094 .amux = TV,
5095 .tv = 1,
5096 .gpio = 0x00,
5097 }, {
5098 .name = name_comp1,
5099 .vmux = 3,
5100 .amux = LINE1,
5101 .gpio = 0x00,
5102 }, {
5103 .name = name_svideo,
5104 .vmux = 8,
5105 .amux = LINE1,
5106 .gpio = 0x00,
5107 } },
5108 .radio = {
5109 .name = name_radio,
5110 .amux = LINE2,
5111 .gpio = 0x01,
5112 },
5113 .mute = {
5114 .name = name_mute,
5115 .amux = LINE1,
5116 .gpio = 0x00,
5117 },
5118 },
4756}; 5119};
4757 5120
4758const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards); 5121const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -5027,6 +5390,13 @@ struct pci_device_id saa7134_pci_tbl[] = {
5027 .subdevice = 0xd6ee, 5390 .subdevice = 0xd6ee,
5028 .driver_data = SAA7134_BOARD_AVERMEDIA_CARDBUS, 5391 .driver_data = SAA7134_BOARD_AVERMEDIA_CARDBUS,
5029 },{ 5392 },{
5393 /* AVerMedia CardBus */
5394 .vendor = PCI_VENDOR_ID_PHILIPS,
5395 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
5396 .subvendor = 0x1461, /* Avermedia Technologies Inc */
5397 .subdevice = 0xb7e9,
5398 .driver_data = SAA7134_BOARD_AVERMEDIA_CARDBUS_501,
5399 }, {
5030 /* TransGear 3000TV */ 5400 /* TransGear 3000TV */
5031 .vendor = PCI_VENDOR_ID_PHILIPS, 5401 .vendor = PCI_VENDOR_ID_PHILIPS,
5032 .device = PCI_DEVICE_ID_PHILIPS_SAA7130, 5402 .device = PCI_DEVICE_ID_PHILIPS_SAA7130,
@@ -5441,6 +5811,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
5441 .driver_data = SAA7134_BOARD_AVERMEDIA_STUDIO_507, 5811 .driver_data = SAA7134_BOARD_AVERMEDIA_STUDIO_507,
5442 },{ 5812 },{
5443 .vendor = PCI_VENDOR_ID_PHILIPS, 5813 .vendor = PCI_VENDOR_ID_PHILIPS,
5814 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
5815 .subvendor = 0x1461, /* Avermedia Technologies Inc */
5816 .subdevice = 0xa11b,
5817 .driver_data = SAA7134_BOARD_AVERMEDIA_STUDIO_507UA,
5818 }, {
5819 .vendor = PCI_VENDOR_ID_PHILIPS,
5444 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, 5820 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
5445 .subvendor = 0x1043, 5821 .subvendor = 0x1043,
5446 .subdevice = 0x4876, 5822 .subdevice = 0x4876,
@@ -5647,14 +6023,8 @@ struct pci_device_id saa7134_pci_tbl[] = {
5647 .vendor = PCI_VENDOR_ID_PHILIPS, 6023 .vendor = PCI_VENDOR_ID_PHILIPS,
5648 .device = PCI_DEVICE_ID_PHILIPS_SAA7130, 6024 .device = PCI_DEVICE_ID_PHILIPS_SAA7130,
5649 .subvendor = 0x0000, 6025 .subvendor = 0x0000,
5650 .subdevice = 0x5051,
5651 .driver_data = SAA7134_BOARD_BEHOLD_505FM,
5652 },{
5653 .vendor = PCI_VENDOR_ID_PHILIPS,
5654 .device = PCI_DEVICE_ID_PHILIPS_SAA7130,
5655 .subvendor = 0x0000,
5656 .subdevice = 0x505B, 6026 .subdevice = 0x505B,
5657 .driver_data = SAA7134_BOARD_BEHOLD_505FM, 6027 .driver_data = SAA7134_BOARD_BEHOLD_505RDS,
5658 },{ 6028 },{
5659 .vendor = PCI_VENDOR_ID_PHILIPS, 6029 .vendor = PCI_VENDOR_ID_PHILIPS,
5660 .device = PCI_DEVICE_ID_PHILIPS_SAA7130, 6030 .device = PCI_DEVICE_ID_PHILIPS_SAA7130,
@@ -5666,13 +6036,13 @@ struct pci_device_id saa7134_pci_tbl[] = {
5666 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, 6036 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
5667 .subvendor = 0x0000, 6037 .subvendor = 0x0000,
5668 .subdevice = 0x5071, 6038 .subdevice = 0x5071,
5669 .driver_data = SAA7134_BOARD_BEHOLD_507_9FM, 6039 .driver_data = SAA7134_BOARD_BEHOLD_507RDS_MK3,
5670 },{ 6040 },{
5671 .vendor = PCI_VENDOR_ID_PHILIPS, 6041 .vendor = PCI_VENDOR_ID_PHILIPS,
5672 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, 6042 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
5673 .subvendor = 0x0000, 6043 .subvendor = 0x0000,
5674 .subdevice = 0x507B, 6044 .subdevice = 0x507B,
5675 .driver_data = SAA7134_BOARD_BEHOLD_507_9FM, 6045 .driver_data = SAA7134_BOARD_BEHOLD_507RDS_MK5,
5676 },{ 6046 },{
5677 .vendor = PCI_VENDOR_ID_PHILIPS, 6047 .vendor = PCI_VENDOR_ID_PHILIPS,
5678 .device = PCI_DEVICE_ID_PHILIPS_SAA7134, 6048 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -5696,49 +6066,49 @@ struct pci_device_id saa7134_pci_tbl[] = {
5696 .device = PCI_DEVICE_ID_PHILIPS_SAA7134, 6066 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
5697 .subvendor = 0x5ace, 6067 .subvendor = 0x5ace,
5698 .subdevice = 0x6070, 6068 .subdevice = 0x6070,
5699 .driver_data = SAA7134_BOARD_BEHOLD_607_9FM, 6069 .driver_data = SAA7134_BOARD_BEHOLD_607FM_MK3,
5700 },{ 6070 },{
5701 .vendor = PCI_VENDOR_ID_PHILIPS, 6071 .vendor = PCI_VENDOR_ID_PHILIPS,
5702 .device = PCI_DEVICE_ID_PHILIPS_SAA7134, 6072 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
5703 .subvendor = 0x5ace, 6073 .subvendor = 0x5ace,
5704 .subdevice = 0x6071, 6074 .subdevice = 0x6071,
5705 .driver_data = SAA7134_BOARD_BEHOLD_607_9FM, 6075 .driver_data = SAA7134_BOARD_BEHOLD_607FM_MK5,
5706 },{ 6076 },{
5707 .vendor = PCI_VENDOR_ID_PHILIPS, 6077 .vendor = PCI_VENDOR_ID_PHILIPS,
5708 .device = PCI_DEVICE_ID_PHILIPS_SAA7134, 6078 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
5709 .subvendor = 0x5ace, 6079 .subvendor = 0x5ace,
5710 .subdevice = 0x6072, 6080 .subdevice = 0x6072,
5711 .driver_data = SAA7134_BOARD_BEHOLD_607_9FM, 6081 .driver_data = SAA7134_BOARD_BEHOLD_607RDS_MK3,
5712 },{ 6082 },{
5713 .vendor = PCI_VENDOR_ID_PHILIPS, 6083 .vendor = PCI_VENDOR_ID_PHILIPS,
5714 .device = PCI_DEVICE_ID_PHILIPS_SAA7134, 6084 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
5715 .subvendor = 0x5ace, 6085 .subvendor = 0x5ace,
5716 .subdevice = 0x6073, 6086 .subdevice = 0x6073,
5717 .driver_data = SAA7134_BOARD_BEHOLD_607_9FM, 6087 .driver_data = SAA7134_BOARD_BEHOLD_607RDS_MK5,
5718 },{ 6088 },{
5719 .vendor = PCI_VENDOR_ID_PHILIPS, 6089 .vendor = PCI_VENDOR_ID_PHILIPS,
5720 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, 6090 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
5721 .subvendor = 0x5ace, 6091 .subvendor = 0x5ace,
5722 .subdevice = 0x6090, 6092 .subdevice = 0x6090,
5723 .driver_data = SAA7134_BOARD_BEHOLD_607_9FM, 6093 .driver_data = SAA7134_BOARD_BEHOLD_609FM_MK3,
5724 },{ 6094 },{
5725 .vendor = PCI_VENDOR_ID_PHILIPS, 6095 .vendor = PCI_VENDOR_ID_PHILIPS,
5726 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, 6096 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
5727 .subvendor = 0x5ace, 6097 .subvendor = 0x5ace,
5728 .subdevice = 0x6091, 6098 .subdevice = 0x6091,
5729 .driver_data = SAA7134_BOARD_BEHOLD_607_9FM, 6099 .driver_data = SAA7134_BOARD_BEHOLD_609FM_MK5,
5730 },{ 6100 },{
5731 .vendor = PCI_VENDOR_ID_PHILIPS, 6101 .vendor = PCI_VENDOR_ID_PHILIPS,
5732 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, 6102 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
5733 .subvendor = 0x5ace, 6103 .subvendor = 0x5ace,
5734 .subdevice = 0x6092, 6104 .subdevice = 0x6092,
5735 .driver_data = SAA7134_BOARD_BEHOLD_607_9FM, 6105 .driver_data = SAA7134_BOARD_BEHOLD_609RDS_MK3,
5736 },{ 6106 },{
5737 .vendor = PCI_VENDOR_ID_PHILIPS, 6107 .vendor = PCI_VENDOR_ID_PHILIPS,
5738 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, 6108 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
5739 .subvendor = 0x5ace, 6109 .subvendor = 0x5ace,
5740 .subdevice = 0x6093, 6110 .subdevice = 0x6093,
5741 .driver_data = SAA7134_BOARD_BEHOLD_607_9FM, 6111 .driver_data = SAA7134_BOARD_BEHOLD_609RDS_MK5,
5742 },{ 6112 },{
5743 .vendor = PCI_VENDOR_ID_PHILIPS, 6113 .vendor = PCI_VENDOR_ID_PHILIPS,
5744 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, 6114 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
@@ -5832,6 +6202,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
5832 }, { 6202 }, {
5833 .vendor = PCI_VENDOR_ID_PHILIPS, 6203 .vendor = PCI_VENDOR_ID_PHILIPS,
5834 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, 6204 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
6205 .subvendor = 0x1461, /* Avermedia Technologies Inc */
6206 .subdevice = 0xf736,
6207 .driver_data = SAA7134_BOARD_AVERMEDIA_M103,
6208 }, {
6209 .vendor = PCI_VENDOR_ID_PHILIPS,
6210 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
5835 .subvendor = 0x1043, 6211 .subvendor = 0x1043,
5836 .subdevice = 0x4878, /* REV:1.02G */ 6212 .subdevice = 0x4878, /* REV:1.02G */
5837 .driver_data = SAA7134_BOARD_ASUSTeK_TIGER_3IN1, 6213 .driver_data = SAA7134_BOARD_ASUSTeK_TIGER_3IN1,
@@ -6114,7 +6490,6 @@ int saa7134_board_init1(struct saa7134_dev *dev)
6114 case SAA7134_BOARD_VIDEOMATE_DVBT_300: 6490 case SAA7134_BOARD_VIDEOMATE_DVBT_300:
6115 case SAA7134_BOARD_VIDEOMATE_DVBT_200: 6491 case SAA7134_BOARD_VIDEOMATE_DVBT_200:
6116 case SAA7134_BOARD_VIDEOMATE_DVBT_200A: 6492 case SAA7134_BOARD_VIDEOMATE_DVBT_200A:
6117 case SAA7134_BOARD_VIDEOMATE_T750:
6118 case SAA7134_BOARD_MANLI_MTV001: 6493 case SAA7134_BOARD_MANLI_MTV001:
6119 case SAA7134_BOARD_MANLI_MTV002: 6494 case SAA7134_BOARD_MANLI_MTV002:
6120 case SAA7134_BOARD_BEHOLD_409FM: 6495 case SAA7134_BOARD_BEHOLD_409FM:
@@ -6142,7 +6517,10 @@ int saa7134_board_init1(struct saa7134_dev *dev)
6142 case SAA7134_BOARD_BEHOLD_407FM: 6517 case SAA7134_BOARD_BEHOLD_407FM:
6143 case SAA7134_BOARD_BEHOLD_409: 6518 case SAA7134_BOARD_BEHOLD_409:
6144 case SAA7134_BOARD_BEHOLD_505FM: 6519 case SAA7134_BOARD_BEHOLD_505FM:
6520 case SAA7134_BOARD_BEHOLD_505RDS:
6145 case SAA7134_BOARD_BEHOLD_507_9FM: 6521 case SAA7134_BOARD_BEHOLD_507_9FM:
6522 case SAA7134_BOARD_BEHOLD_507RDS_MK3:
6523 case SAA7134_BOARD_BEHOLD_507RDS_MK5:
6146 case SAA7134_BOARD_GENIUS_TVGO_A11MCE: 6524 case SAA7134_BOARD_GENIUS_TVGO_A11MCE:
6147 case SAA7134_BOARD_REAL_ANGEL_220: 6525 case SAA7134_BOARD_REAL_ANGEL_220:
6148 case SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG: 6526 case SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG:
@@ -6196,6 +6574,16 @@ int saa7134_board_init1(struct saa7134_dev *dev)
6196 saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0xffffffff, 0xffffffff); 6574 saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0xffffffff, 0xffffffff);
6197 msleep(10); 6575 msleep(10);
6198 break; 6576 break;
6577 case SAA7134_BOARD_AVERMEDIA_CARDBUS_501:
6578 /* power-down tuner chip */
6579 saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x08400000, 0x08400000);
6580 saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x08400000, 0);
6581 msleep(10);
6582 saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x08400000, 0x08400000);
6583 saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x08400000, 0x08400000);
6584 msleep(10);
6585 dev->has_remote = SAA7134_REMOTE_I2C;
6586 break;
6199 case SAA7134_BOARD_AVERMEDIA_CARDBUS_506: 6587 case SAA7134_BOARD_AVERMEDIA_CARDBUS_506:
6200 saa7134_set_gpio(dev, 23, 0); 6588 saa7134_set_gpio(dev, 23, 0);
6201 msleep(10); 6589 msleep(10);
@@ -6253,7 +6641,14 @@ int saa7134_board_init1(struct saa7134_dev *dev)
6253 case SAA7134_BOARD_UPMOST_PURPLE_TV: 6641 case SAA7134_BOARD_UPMOST_PURPLE_TV:
6254 case SAA7134_BOARD_MSI_TVATANYWHERE_PLUS: 6642 case SAA7134_BOARD_MSI_TVATANYWHERE_PLUS:
6255 case SAA7134_BOARD_HAUPPAUGE_HVR1110: 6643 case SAA7134_BOARD_HAUPPAUGE_HVR1110:
6256 case SAA7134_BOARD_BEHOLD_607_9FM: 6644 case SAA7134_BOARD_BEHOLD_607FM_MK3:
6645 case SAA7134_BOARD_BEHOLD_607FM_MK5:
6646 case SAA7134_BOARD_BEHOLD_609FM_MK3:
6647 case SAA7134_BOARD_BEHOLD_609FM_MK5:
6648 case SAA7134_BOARD_BEHOLD_607RDS_MK3:
6649 case SAA7134_BOARD_BEHOLD_607RDS_MK5:
6650 case SAA7134_BOARD_BEHOLD_609RDS_MK3:
6651 case SAA7134_BOARD_BEHOLD_609RDS_MK5:
6257 case SAA7134_BOARD_BEHOLD_M6: 6652 case SAA7134_BOARD_BEHOLD_M6:
6258 case SAA7134_BOARD_BEHOLD_M63: 6653 case SAA7134_BOARD_BEHOLD_M63:
6259 case SAA7134_BOARD_BEHOLD_M6_EXTRA: 6654 case SAA7134_BOARD_BEHOLD_M6_EXTRA:
@@ -6635,6 +7030,7 @@ int saa7134_board_init2(struct saa7134_dev *dev)
6635 7030
6636 switch (dev->board) { 7031 switch (dev->board) {
6637 case SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM: 7032 case SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM:
7033 case SAA7134_BOARD_AVERMEDIA_CARDBUS_501:
6638 { 7034 {
6639 struct v4l2_priv_tun_config tea5767_cfg; 7035 struct v4l2_priv_tun_config tea5767_cfg;
6640 struct tea5767_ctrl ctl; 7036 struct tea5767_ctrl ctl;
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 2def6fec814b..94a023a14bbc 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -331,6 +331,10 @@ void saa7134_buffer_next(struct saa7134_dev *dev,
331 dprintk("buffer_next %p\n",NULL); 331 dprintk("buffer_next %p\n",NULL);
332 saa7134_set_dmabits(dev); 332 saa7134_set_dmabits(dev);
333 del_timer(&q->timeout); 333 del_timer(&q->timeout);
334
335 if (card_has_mpeg(dev))
336 if (dev->ts_started)
337 saa7134_ts_stop(dev);
334 } 338 }
335} 339}
336 340
@@ -416,6 +420,19 @@ int saa7134_set_dmabits(struct saa7134_dev *dev)
416 ctrl |= SAA7134_MAIN_CTRL_TE5; 420 ctrl |= SAA7134_MAIN_CTRL_TE5;
417 irq |= SAA7134_IRQ1_INTE_RA2_1 | 421 irq |= SAA7134_IRQ1_INTE_RA2_1 |
418 SAA7134_IRQ1_INTE_RA2_0; 422 SAA7134_IRQ1_INTE_RA2_0;
423
424 /* dma: setup channel 5 (= TS) */
425
426 saa_writeb(SAA7134_TS_DMA0, (dev->ts.nr_packets - 1) & 0xff);
427 saa_writeb(SAA7134_TS_DMA1,
428 ((dev->ts.nr_packets - 1) >> 8) & 0xff);
429 /* TSNOPIT=0, TSCOLAP=0 */
430 saa_writeb(SAA7134_TS_DMA2,
431 (((dev->ts.nr_packets - 1) >> 16) & 0x3f) | 0x00);
432 saa_writel(SAA7134_RS_PITCH(5), TS_PACKET_SIZE);
433 saa_writel(SAA7134_RS_CONTROL(5), SAA7134_RS_CONTROL_BURST_16 |
434 SAA7134_RS_CONTROL_ME |
435 (dev->ts.pt_ts.dma >> 12));
419 } 436 }
420 437
421 /* set task conditions + field handling */ 438 /* set task conditions + field handling */
@@ -775,7 +792,6 @@ static struct video_device *vdev_init(struct saa7134_dev *dev,
775 if (NULL == vfd) 792 if (NULL == vfd)
776 return NULL; 793 return NULL;
777 *vfd = *template; 794 *vfd = *template;
778 vfd->minor = -1;
779 vfd->v4l2_dev = &dev->v4l2_dev; 795 vfd->v4l2_dev = &dev->v4l2_dev;
780 vfd->release = video_device_release; 796 vfd->release = video_device_release;
781 vfd->debug = video_debug; 797 vfd->debug = video_debug;
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 4eff1ca8593c..31930f26ffc7 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -48,6 +48,7 @@
48#include "isl6405.h" 48#include "isl6405.h"
49#include "lnbp21.h" 49#include "lnbp21.h"
50#include "tuner-simple.h" 50#include "tuner-simple.h"
51#include "tda10048.h"
51#include "tda18271.h" 52#include "tda18271.h"
52#include "lgdt3305.h" 53#include "lgdt3305.h"
53#include "tda8290.h" 54#include "tda8290.h"
@@ -978,6 +979,18 @@ static struct lgdt3305_config hcw_lgdt3305_config = {
978 .vsb_if_khz = 3250, 979 .vsb_if_khz = 3250,
979}; 980};
980 981
982static struct tda10048_config hcw_tda10048_config = {
983 .demod_address = 0x10 >> 1,
984 .output_mode = TDA10048_SERIAL_OUTPUT,
985 .fwbulkwritelen = TDA10048_BULKWRITE_200,
986 .inversion = TDA10048_INVERSION_ON,
987 .dtv6_if_freq_khz = TDA10048_IF_3300,
988 .dtv7_if_freq_khz = TDA10048_IF_3500,
989 .dtv8_if_freq_khz = TDA10048_IF_4000,
990 .clk_freq_khz = TDA10048_CLK_16000,
991 .disable_gate_access = 1,
992};
993
981static struct tda18271_std_map hauppauge_tda18271_std_map = { 994static struct tda18271_std_map hauppauge_tda18271_std_map = {
982 .atsc_6 = { .if_freq = 3250, .agc_mode = 3, .std = 4, 995 .atsc_6 = { .if_freq = 3250, .agc_mode = 3, .std = 4,
983 .if_lvl = 1, .rfagc_top = 0x58, }, 996 .if_lvl = 1, .rfagc_top = 0x58, },
@@ -1106,6 +1119,19 @@ static int dvb_init(struct saa7134_dev *dev)
1106 &tda827x_cfg_2) < 0) 1119 &tda827x_cfg_2) < 0)
1107 goto dettach_frontend; 1120 goto dettach_frontend;
1108 break; 1121 break;
1122 case SAA7134_BOARD_HAUPPAUGE_HVR1110R3:
1123 fe0->dvb.frontend = dvb_attach(tda10048_attach,
1124 &hcw_tda10048_config,
1125 &dev->i2c_adap);
1126 if (fe0->dvb.frontend != NULL) {
1127 dvb_attach(tda829x_attach, fe0->dvb.frontend,
1128 &dev->i2c_adap, 0x4b,
1129 &tda829x_no_probe);
1130 dvb_attach(tda18271_attach, fe0->dvb.frontend,
1131 0x60, &dev->i2c_adap,
1132 &hcw_tda18271_config);
1133 }
1134 break;
1109 case SAA7134_BOARD_PHILIPS_TIGER: 1135 case SAA7134_BOARD_PHILIPS_TIGER:
1110 if (configure_tda827x_fe(dev, &philips_tiger_config, 1136 if (configure_tda827x_fe(dev, &philips_tiger_config,
1111 &tda827x_cfg_0) < 0) 1137 &tda827x_cfg_0) < 0)
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 9db3472667e5..add1757f8930 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -255,6 +255,16 @@ static int empress_s_fmt_vid_cap(struct file *file, void *priv,
255 return 0; 255 return 0;
256} 256}
257 257
258static int empress_try_fmt_vid_cap(struct file *file, void *priv,
259 struct v4l2_format *f)
260{
261 struct saa7134_dev *dev = file->private_data;
262
263 f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
264 f->fmt.pix.sizeimage = TS_PACKET_SIZE * dev->ts.nr_packets;
265
266 return 0;
267}
258 268
259static int empress_reqbufs(struct file *file, void *priv, 269static int empress_reqbufs(struct file *file, void *priv,
260 struct v4l2_requestbuffers *p) 270 struct v4l2_requestbuffers *p)
@@ -450,6 +460,7 @@ static const struct v4l2_file_operations ts_fops =
450static const struct v4l2_ioctl_ops ts_ioctl_ops = { 460static const struct v4l2_ioctl_ops ts_ioctl_ops = {
451 .vidioc_querycap = empress_querycap, 461 .vidioc_querycap = empress_querycap,
452 .vidioc_enum_fmt_vid_cap = empress_enum_fmt_vid_cap, 462 .vidioc_enum_fmt_vid_cap = empress_enum_fmt_vid_cap,
463 .vidioc_try_fmt_vid_cap = empress_try_fmt_vid_cap,
453 .vidioc_s_fmt_vid_cap = empress_s_fmt_vid_cap, 464 .vidioc_s_fmt_vid_cap = empress_s_fmt_vid_cap,
454 .vidioc_g_fmt_vid_cap = empress_g_fmt_vid_cap, 465 .vidioc_g_fmt_vid_cap = empress_g_fmt_vid_cap,
455 .vidioc_reqbufs = empress_reqbufs, 466 .vidioc_reqbufs = empress_reqbufs,
@@ -491,11 +502,8 @@ static void empress_signal_update(struct work_struct *work)
491 502
492 if (dev->nosignal) { 503 if (dev->nosignal) {
493 dprintk("no video signal\n"); 504 dprintk("no video signal\n");
494 ts_reset_encoder(dev);
495 } else { 505 } else {
496 dprintk("video signal acquired\n"); 506 dprintk("video signal acquired\n");
497 if (atomic_read(&dev->empress_users))
498 ts_init_encoder(dev);
499 } 507 }
500} 508}
501 509
diff --git a/drivers/media/video/saa7134/saa7134-i2c.c b/drivers/media/video/saa7134/saa7134-i2c.c
index f3e285aa2fb4..8096dace5f6c 100644
--- a/drivers/media/video/saa7134/saa7134-i2c.c
+++ b/drivers/media/video/saa7134/saa7134-i2c.c
@@ -259,7 +259,7 @@ static int saa7134_i2c_xfer(struct i2c_adapter *i2c_adap,
259 /* workaround for a saa7134 i2c bug 259 /* workaround for a saa7134 i2c bug
260 * needed to talk to the mt352 demux 260 * needed to talk to the mt352 demux
261 * thanks to pinnacle for the hint */ 261 * thanks to pinnacle for the hint */
262 int quirk = 0xfd; 262 int quirk = 0xfe;
263 d1printk(" [%02x quirk]",quirk); 263 d1printk(" [%02x quirk]",quirk);
264 i2c_send_byte(dev,START,quirk); 264 i2c_send_byte(dev,START,quirk);
265 i2c_recv_byte(dev); 265 i2c_recv_byte(dev);
@@ -321,33 +321,6 @@ static u32 functionality(struct i2c_adapter *adap)
321 return I2C_FUNC_SMBUS_EMUL; 321 return I2C_FUNC_SMBUS_EMUL;
322} 322}
323 323
324static int attach_inform(struct i2c_client *client)
325{
326 struct saa7134_dev *dev = client->adapter->algo_data;
327
328 d1printk( "%s i2c attach [addr=0x%x,client=%s]\n",
329 client->driver->driver.name, client->addr, client->name);
330
331 /* Am I an i2c remote control? */
332
333 switch (client->addr) {
334 case 0x7a:
335 case 0x47:
336 case 0x71:
337 case 0x2d:
338 case 0x30:
339 {
340 struct IR_i2c *ir = i2c_get_clientdata(client);
341 d1printk("%s i2c IR detected (%s).\n",
342 client->driver->driver.name, ir->phys);
343 saa7134_set_i2c_ir(dev,ir);
344 break;
345 }
346 }
347
348 return 0;
349}
350
351static struct i2c_algorithm saa7134_algo = { 324static struct i2c_algorithm saa7134_algo = {
352 .master_xfer = saa7134_i2c_xfer, 325 .master_xfer = saa7134_i2c_xfer,
353 .functionality = functionality, 326 .functionality = functionality,
@@ -358,7 +331,6 @@ static struct i2c_adapter saa7134_adap_template = {
358 .name = "saa7134", 331 .name = "saa7134",
359 .id = I2C_HW_SAA7134, 332 .id = I2C_HW_SAA7134,
360 .algo = &saa7134_algo, 333 .algo = &saa7134_algo,
361 .client_register = attach_inform,
362}; 334};
363 335
364static struct i2c_client saa7134_client_template = { 336static struct i2c_client saa7134_client_template = {
@@ -433,6 +405,9 @@ int saa7134_i2c_register(struct saa7134_dev *dev)
433 saa7134_i2c_eeprom(dev,dev->eedata,sizeof(dev->eedata)); 405 saa7134_i2c_eeprom(dev,dev->eedata,sizeof(dev->eedata));
434 if (i2c_scan) 406 if (i2c_scan)
435 do_i2c_scan(dev->name,&dev->i2c_client); 407 do_i2c_scan(dev->name,&dev->i2c_client);
408
409 /* Instantiate the IR receiver device, if present */
410 saa7134_probe_i2c_ir(dev);
436 return 0; 411 return 0;
437} 412}
438 413
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 8a106d36e723..6e219c2db841 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -60,7 +60,7 @@ MODULE_PARM_DESC(disable_other_ir, "disable full codes of "
60#define dprintk(fmt, arg...) if (ir_debug) \ 60#define dprintk(fmt, arg...) if (ir_debug) \
61 printk(KERN_DEBUG "%s/ir: " fmt, dev->name , ## arg) 61 printk(KERN_DEBUG "%s/ir: " fmt, dev->name , ## arg)
62#define i2cdprintk(fmt, arg...) if (ir_debug) \ 62#define i2cdprintk(fmt, arg...) if (ir_debug) \
63 printk(KERN_DEBUG "%s/ir: " fmt, ir->c.name , ## arg) 63 printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg)
64 64
65/* Helper functions for RC5 and NEC decoding at GPIO16 or GPIO18 */ 65/* Helper functions for RC5 and NEC decoding at GPIO16 or GPIO18 */
66static int saa7134_rc5_irq(struct saa7134_dev *dev); 66static int saa7134_rc5_irq(struct saa7134_dev *dev);
@@ -134,10 +134,10 @@ static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir, u32 *ir_key,
134 int gpio; 134 int gpio;
135 135
136 /* <dev> is needed to access GPIO. Used by the saa_readl macro. */ 136 /* <dev> is needed to access GPIO. Used by the saa_readl macro. */
137 struct saa7134_dev *dev = ir->c.adapter->algo_data; 137 struct saa7134_dev *dev = ir->c->adapter->algo_data;
138 if (dev == NULL) { 138 if (dev == NULL) {
139 dprintk("get_key_msi_tvanywhere_plus: " 139 dprintk("get_key_msi_tvanywhere_plus: "
140 "gir->c.adapter->algo_data is NULL!\n"); 140 "gir->c->adapter->algo_data is NULL!\n");
141 return -EIO; 141 return -EIO;
142 } 142 }
143 143
@@ -156,7 +156,7 @@ static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir, u32 *ir_key,
156 156
157 /* GPIO says there is a button press. Get it. */ 157 /* GPIO says there is a button press. Get it. */
158 158
159 if (1 != i2c_master_recv(&ir->c, &b, 1)) { 159 if (1 != i2c_master_recv(ir->c, &b, 1)) {
160 i2cdprintk("read error\n"); 160 i2cdprintk("read error\n");
161 return -EIO; 161 return -EIO;
162 } 162 }
@@ -179,7 +179,7 @@ static int get_key_purpletv(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
179 unsigned char b; 179 unsigned char b;
180 180
181 /* poll IR chip */ 181 /* poll IR chip */
182 if (1 != i2c_master_recv(&ir->c,&b,1)) { 182 if (1 != i2c_master_recv(ir->c, &b, 1)) {
183 i2cdprintk("read error\n"); 183 i2cdprintk("read error\n");
184 return -EIO; 184 return -EIO;
185 } 185 }
@@ -202,7 +202,7 @@ static int get_key_hvr1110(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
202 unsigned char buf[5], cod4, code3, code4; 202 unsigned char buf[5], cod4, code3, code4;
203 203
204 /* poll IR chip */ 204 /* poll IR chip */
205 if (5 != i2c_master_recv(&ir->c,buf,5)) 205 if (5 != i2c_master_recv(ir->c, buf, 5))
206 return -EIO; 206 return -EIO;
207 207
208 cod4 = buf[4]; 208 cod4 = buf[4];
@@ -224,7 +224,7 @@ static int get_key_beholdm6xx(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
224 unsigned char data[12]; 224 unsigned char data[12];
225 u32 gpio; 225 u32 gpio;
226 226
227 struct saa7134_dev *dev = ir->c.adapter->algo_data; 227 struct saa7134_dev *dev = ir->c->adapter->algo_data;
228 228
229 /* rising SAA7134_GPIO_GPRESCAN reads the status */ 229 /* rising SAA7134_GPIO_GPRESCAN reads the status */
230 saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN); 230 saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
@@ -235,9 +235,9 @@ static int get_key_beholdm6xx(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
235 if (0x400000 & ~gpio) 235 if (0x400000 & ~gpio)
236 return 0; /* No button press */ 236 return 0; /* No button press */
237 237
238 ir->c.addr = 0x5a >> 1; 238 ir->c->addr = 0x5a >> 1;
239 239
240 if (12 != i2c_master_recv(&ir->c, data, 12)) { 240 if (12 != i2c_master_recv(ir->c, data, 12)) {
241 i2cdprintk("read error\n"); 241 i2cdprintk("read error\n");
242 return -EIO; 242 return -EIO;
243 } 243 }
@@ -267,7 +267,7 @@ static int get_key_pinnacle(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw,
267 unsigned int start = 0,parity = 0,code = 0; 267 unsigned int start = 0,parity = 0,code = 0;
268 268
269 /* poll IR chip */ 269 /* poll IR chip */
270 if (4 != i2c_master_recv(&ir->c, b, 4)) { 270 if (4 != i2c_master_recv(ir->c, b, 4)) {
271 i2cdprintk("read error\n"); 271 i2cdprintk("read error\n");
272 return -EIO; 272 return -EIO;
273 } 273 }
@@ -447,6 +447,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
447 case SAA7134_BOARD_AVERMEDIA_STUDIO_305: 447 case SAA7134_BOARD_AVERMEDIA_STUDIO_305:
448 case SAA7134_BOARD_AVERMEDIA_STUDIO_307: 448 case SAA7134_BOARD_AVERMEDIA_STUDIO_307:
449 case SAA7134_BOARD_AVERMEDIA_STUDIO_507: 449 case SAA7134_BOARD_AVERMEDIA_STUDIO_507:
450 case SAA7134_BOARD_AVERMEDIA_STUDIO_507UA:
450 case SAA7134_BOARD_AVERMEDIA_GO_007_FM: 451 case SAA7134_BOARD_AVERMEDIA_GO_007_FM:
451 case SAA7134_BOARD_AVERMEDIA_M102: 452 case SAA7134_BOARD_AVERMEDIA_M102:
452 case SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS: 453 case SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS:
@@ -506,7 +507,10 @@ int saa7134_input_init1(struct saa7134_dev *dev)
506 case SAA7134_BOARD_BEHOLD_407FM: 507 case SAA7134_BOARD_BEHOLD_407FM:
507 case SAA7134_BOARD_BEHOLD_409: 508 case SAA7134_BOARD_BEHOLD_409:
508 case SAA7134_BOARD_BEHOLD_505FM: 509 case SAA7134_BOARD_BEHOLD_505FM:
510 case SAA7134_BOARD_BEHOLD_505RDS:
509 case SAA7134_BOARD_BEHOLD_507_9FM: 511 case SAA7134_BOARD_BEHOLD_507_9FM:
512 case SAA7134_BOARD_BEHOLD_507RDS_MK3:
513 case SAA7134_BOARD_BEHOLD_507RDS_MK5:
510 ir_codes = ir_codes_manli; 514 ir_codes = ir_codes_manli;
511 mask_keycode = 0x003f00; 515 mask_keycode = 0x003f00;
512 mask_keyup = 0x004000; 516 mask_keyup = 0x004000;
@@ -678,55 +682,101 @@ void saa7134_input_fini(struct saa7134_dev *dev)
678 dev->remote = NULL; 682 dev->remote = NULL;
679} 683}
680 684
681void saa7134_set_i2c_ir(struct saa7134_dev *dev, struct IR_i2c *ir) 685void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
682{ 686{
687 struct i2c_board_info info;
688 struct IR_i2c_init_data init_data;
689 const unsigned short addr_list[] = {
690 0x7a, 0x47, 0x71, 0x2d,
691 I2C_CLIENT_END
692 };
693
694 struct i2c_msg msg_msi = {
695 .addr = 0x50,
696 .flags = I2C_M_RD,
697 .len = 0,
698 .buf = NULL,
699 };
700
701 int rc;
702
683 if (disable_ir) { 703 if (disable_ir) {
684 dprintk("Found supported i2c remote, but IR has been disabled\n"); 704 dprintk("IR has been disabled, not probing for i2c remote\n");
685 ir->get_key=NULL;
686 return; 705 return;
687 } 706 }
688 707
708 memset(&info, 0, sizeof(struct i2c_board_info));
709 memset(&init_data, 0, sizeof(struct IR_i2c_init_data));
710 strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
711
689 switch (dev->board) { 712 switch (dev->board) {
690 case SAA7134_BOARD_PINNACLE_PCTV_110i: 713 case SAA7134_BOARD_PINNACLE_PCTV_110i:
691 case SAA7134_BOARD_PINNACLE_PCTV_310i: 714 case SAA7134_BOARD_PINNACLE_PCTV_310i:
692 snprintf(ir->c.name, sizeof(ir->c.name), "Pinnacle PCTV"); 715 init_data.name = "Pinnacle PCTV";
693 if (pinnacle_remote == 0) { 716 if (pinnacle_remote == 0) {
694 ir->get_key = get_key_pinnacle_color; 717 init_data.get_key = get_key_pinnacle_color;
695 ir->ir_codes = ir_codes_pinnacle_color; 718 init_data.ir_codes = ir_codes_pinnacle_color;
696 } else { 719 } else {
697 ir->get_key = get_key_pinnacle_grey; 720 init_data.get_key = get_key_pinnacle_grey;
698 ir->ir_codes = ir_codes_pinnacle_grey; 721 init_data.ir_codes = ir_codes_pinnacle_grey;
699 } 722 }
700 break; 723 break;
701 case SAA7134_BOARD_UPMOST_PURPLE_TV: 724 case SAA7134_BOARD_UPMOST_PURPLE_TV:
702 snprintf(ir->c.name, sizeof(ir->c.name), "Purple TV"); 725 init_data.name = "Purple TV";
703 ir->get_key = get_key_purpletv; 726 init_data.get_key = get_key_purpletv;
704 ir->ir_codes = ir_codes_purpletv; 727 init_data.ir_codes = ir_codes_purpletv;
705 break; 728 break;
706 case SAA7134_BOARD_MSI_TVATANYWHERE_PLUS: 729 case SAA7134_BOARD_MSI_TVATANYWHERE_PLUS:
707 snprintf(ir->c.name, sizeof(ir->c.name), "MSI TV@nywhere Plus"); 730 init_data.name = "MSI TV@nywhere Plus";
708 ir->get_key = get_key_msi_tvanywhere_plus; 731 init_data.get_key = get_key_msi_tvanywhere_plus;
709 ir->ir_codes = ir_codes_msi_tvanywhere_plus; 732 init_data.ir_codes = ir_codes_msi_tvanywhere_plus;
733 info.addr = 0x30;
734 /* MSI TV@nywhere Plus controller doesn't seem to
735 respond to probes unless we read something from
736 an existing device. Weird...
737 REVISIT: might no longer be needed */
738 rc = i2c_transfer(&dev->i2c_adap, &msg_msi, 1);
739 dprintk(KERN_DEBUG "probe 0x%02x @ %s: %s\n",
740 msg_msi.addr, dev->i2c_adap.name,
741 (1 == rc) ? "yes" : "no");
710 break; 742 break;
711 case SAA7134_BOARD_HAUPPAUGE_HVR1110: 743 case SAA7134_BOARD_HAUPPAUGE_HVR1110:
712 snprintf(ir->c.name, sizeof(ir->c.name), "HVR 1110"); 744 init_data.name = "HVR 1110";
713 ir->get_key = get_key_hvr1110; 745 init_data.get_key = get_key_hvr1110;
714 ir->ir_codes = ir_codes_hauppauge_new; 746 init_data.ir_codes = ir_codes_hauppauge_new;
715 break; 747 break;
716 case SAA7134_BOARD_BEHOLD_607_9FM: 748 case SAA7134_BOARD_BEHOLD_607FM_MK3:
749 case SAA7134_BOARD_BEHOLD_607FM_MK5:
750 case SAA7134_BOARD_BEHOLD_609FM_MK3:
751 case SAA7134_BOARD_BEHOLD_609FM_MK5:
752 case SAA7134_BOARD_BEHOLD_607RDS_MK3:
753 case SAA7134_BOARD_BEHOLD_607RDS_MK5:
754 case SAA7134_BOARD_BEHOLD_609RDS_MK3:
755 case SAA7134_BOARD_BEHOLD_609RDS_MK5:
717 case SAA7134_BOARD_BEHOLD_M6: 756 case SAA7134_BOARD_BEHOLD_M6:
718 case SAA7134_BOARD_BEHOLD_M63: 757 case SAA7134_BOARD_BEHOLD_M63:
719 case SAA7134_BOARD_BEHOLD_M6_EXTRA: 758 case SAA7134_BOARD_BEHOLD_M6_EXTRA:
720 case SAA7134_BOARD_BEHOLD_H6: 759 case SAA7134_BOARD_BEHOLD_H6:
721 snprintf(ir->c.name, sizeof(ir->c.name), "BeholdTV"); 760 init_data.name = "BeholdTV";
722 ir->get_key = get_key_beholdm6xx; 761 init_data.get_key = get_key_beholdm6xx;
723 ir->ir_codes = ir_codes_behold; 762 init_data.ir_codes = ir_codes_behold;
724 break; 763 break;
725 default: 764 case SAA7134_BOARD_AVERMEDIA_CARDBUS_501:
726 dprintk("Shouldn't get here: Unknown board %x for I2C IR?\n",dev->board); 765 case SAA7134_BOARD_AVERMEDIA_CARDBUS_506:
766 info.addr = 0x40;
727 break; 767 break;
728 } 768 }
729 769
770 if (init_data.name)
771 info.platform_data = &init_data;
772 /* No need to probe if address is known */
773 if (info.addr) {
774 i2c_new_device(&dev->i2c_adap, &info);
775 return;
776 }
777
778 /* Address not known, fallback to probing */
779 i2c_new_probed_device(&dev->i2c_adap, &info, addr_list);
730} 780}
731 781
732static int saa7134_rc5_irq(struct saa7134_dev *dev) 782static int saa7134_rc5_irq(struct saa7134_dev *dev)
diff --git a/drivers/media/video/saa7134/saa7134-ts.c b/drivers/media/video/saa7134/saa7134-ts.c
index cc8b923afbc0..3fa652279ac0 100644
--- a/drivers/media/video/saa7134/saa7134-ts.c
+++ b/drivers/media/video/saa7134/saa7134-ts.c
@@ -65,35 +65,10 @@ static int buffer_activate(struct saa7134_dev *dev,
65 /* start DMA */ 65 /* start DMA */
66 saa7134_set_dmabits(dev); 66 saa7134_set_dmabits(dev);
67 67
68 mod_timer(&dev->ts_q.timeout, jiffies+BUFFER_TIMEOUT); 68 mod_timer(&dev->ts_q.timeout, jiffies+TS_BUFFER_TIMEOUT);
69
70 if (dev->ts_state == SAA7134_TS_BUFF_DONE) {
71 /* Clear TS cache */
72 dev->buff_cnt = 0;
73 saa_writeb(SAA7134_TS_SERIAL1, 0x00);
74 saa_writeb(SAA7134_TS_SERIAL1, 0x03);
75 saa_writeb(SAA7134_TS_SERIAL1, 0x00);
76 saa_writeb(SAA7134_TS_SERIAL1, 0x01);
77
78 /* TS clock non-inverted */
79 saa_writeb(SAA7134_TS_SERIAL1, 0x00);
80
81 /* Start TS stream */
82 switch (saa7134_boards[dev->board].ts_type) {
83 case SAA7134_MPEG_TS_PARALLEL:
84 saa_writeb(SAA7134_TS_SERIAL0, 0x40);
85 saa_writeb(SAA7134_TS_PARALLEL, 0xec);
86 break;
87 case SAA7134_MPEG_TS_SERIAL:
88 saa_writeb(SAA7134_TS_SERIAL0, 0xd8);
89 saa_writeb(SAA7134_TS_PARALLEL, 0x6c);
90 saa_writeb(SAA7134_TS_PARALLEL_SERIAL, 0xbc);
91 saa_writeb(SAA7134_TS_SERIAL1, 0x02);
92 break;
93 }
94 69
95 dev->ts_state = SAA7134_TS_STARTED; 70 if (!dev->ts_started)
96 } 71 saa7134_ts_start(dev);
97 72
98 return 0; 73 return 0;
99} 74}
@@ -104,7 +79,6 @@ static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
104 struct saa7134_dev *dev = q->priv_data; 79 struct saa7134_dev *dev = q->priv_data;
105 struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb); 80 struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
106 unsigned int lines, llength, size; 81 unsigned int lines, llength, size;
107 u32 control;
108 int err; 82 int err;
109 83
110 dprintk("buffer_prepare [%p,%s]\n",buf,v4l2_field_names[field]); 84 dprintk("buffer_prepare [%p,%s]\n",buf,v4l2_field_names[field]);
@@ -121,8 +95,11 @@ static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
121 } 95 }
122 96
123 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { 97 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
98
124 struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); 99 struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
125 100
101 dprintk("buffer_prepare: needs_init\n");
102
126 buf->vb.width = llength; 103 buf->vb.width = llength;
127 buf->vb.height = lines; 104 buf->vb.height = lines;
128 buf->vb.size = size; 105 buf->vb.size = size;
@@ -139,23 +116,6 @@ static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
139 goto oops; 116 goto oops;
140 } 117 }
141 118
142 dev->buff_cnt++;
143
144 if (dev->buff_cnt == dev->ts.nr_bufs) {
145 dev->ts_state = SAA7134_TS_BUFF_DONE;
146 /* dma: setup channel 5 (= TS) */
147 control = SAA7134_RS_CONTROL_BURST_16 |
148 SAA7134_RS_CONTROL_ME |
149 (buf->pt->dma >> 12);
150
151 saa_writeb(SAA7134_TS_DMA0, (lines - 1) & 0xff);
152 saa_writeb(SAA7134_TS_DMA1, ((lines - 1) >> 8) & 0xff);
153 /* TSNOPIT=0, TSCOLAP=0 */
154 saa_writeb(SAA7134_TS_DMA2, (((lines - 1) >> 16) & 0x3f) | 0x00);
155 saa_writel(SAA7134_RS_PITCH(5), TS_PACKET_SIZE);
156 saa_writel(SAA7134_RS_CONTROL(5), control);
157 }
158
159 buf->vb.state = VIDEOBUF_PREPARED; 119 buf->vb.state = VIDEOBUF_PREPARED;
160 buf->activate = buffer_activate; 120 buf->activate = buffer_activate;
161 buf->vb.field = field; 121 buf->vb.field = field;
@@ -175,8 +135,7 @@ buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
175 if (0 == *count) 135 if (0 == *count)
176 *count = dev->ts.nr_bufs; 136 *count = dev->ts.nr_bufs;
177 *count = saa7134_buffer_count(*size,*count); 137 *count = saa7134_buffer_count(*size,*count);
178 dev->buff_cnt = 0; 138
179 dev->ts_state = SAA7134_TS_STOPPED;
180 return 0; 139 return 0;
181} 140}
182 141
@@ -193,11 +152,9 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
193 struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb); 152 struct saa7134_buf *buf = container_of(vb,struct saa7134_buf,vb);
194 struct saa7134_dev *dev = q->priv_data; 153 struct saa7134_dev *dev = q->priv_data;
195 154
196 if (dev->ts_state == SAA7134_TS_STARTED) { 155 if (dev->ts_started)
197 /* Stop TS transport */ 156 saa7134_ts_stop(dev);
198 saa_writeb(SAA7134_TS_PARALLEL, 0x6c); 157
199 dev->ts_state = SAA7134_TS_STOPPED;
200 }
201 saa7134_dma_free(q,buf); 158 saa7134_dma_free(q,buf);
202} 159}
203 160
@@ -214,7 +171,7 @@ EXPORT_SYMBOL_GPL(saa7134_ts_qops);
214 171
215static unsigned int tsbufs = 8; 172static unsigned int tsbufs = 8;
216module_param(tsbufs, int, 0444); 173module_param(tsbufs, int, 0444);
217MODULE_PARM_DESC(tsbufs,"number of ts buffers, range 2-32"); 174MODULE_PARM_DESC(tsbufs, "number of ts buffers for read/write IO, range 2-32");
218 175
219static unsigned int ts_nr_packets = 64; 176static unsigned int ts_nr_packets = 64;
220module_param(ts_nr_packets, int, 0444); 177module_param(ts_nr_packets, int, 0444);
@@ -256,6 +213,7 @@ int saa7134_ts_init1(struct saa7134_dev *dev)
256 dev->ts_q.timeout.data = (unsigned long)(&dev->ts_q); 213 dev->ts_q.timeout.data = (unsigned long)(&dev->ts_q);
257 dev->ts_q.dev = dev; 214 dev->ts_q.dev = dev;
258 dev->ts_q.need_two = 1; 215 dev->ts_q.need_two = 1;
216 dev->ts_started = 0;
259 saa7134_pgtable_alloc(dev->pci,&dev->ts.pt_ts); 217 saa7134_pgtable_alloc(dev->pci,&dev->ts.pt_ts);
260 218
261 /* init TS hw */ 219 /* init TS hw */
@@ -264,13 +222,67 @@ int saa7134_ts_init1(struct saa7134_dev *dev)
264 return 0; 222 return 0;
265} 223}
266 224
225/* Function for stop TS */
226int saa7134_ts_stop(struct saa7134_dev *dev)
227{
228 dprintk("TS stop\n");
229
230 BUG_ON(!dev->ts_started);
231
232 /* Stop TS stream */
233 switch (saa7134_boards[dev->board].ts_type) {
234 case SAA7134_MPEG_TS_PARALLEL:
235 saa_writeb(SAA7134_TS_PARALLEL, 0x6c);
236 dev->ts_started = 0;
237 break;
238 case SAA7134_MPEG_TS_SERIAL:
239 saa_writeb(SAA7134_TS_SERIAL0, 0x40);
240 dev->ts_started = 0;
241 break;
242 }
243 return 0;
244}
245
246/* Function for start TS */
247int saa7134_ts_start(struct saa7134_dev *dev)
248{
249 dprintk("TS start\n");
250
251 BUG_ON(dev->ts_started);
252
253 saa_writeb(SAA7134_TS_SERIAL1, 0x00);
254 saa_writeb(SAA7134_TS_SERIAL1, 0x03);
255 saa_writeb(SAA7134_TS_SERIAL1, 0x00);
256 saa_writeb(SAA7134_TS_SERIAL1, 0x01);
257
258 /* TS clock non-inverted */
259 saa_writeb(SAA7134_TS_SERIAL1, 0x00);
260
261 /* Start TS stream */
262 switch (saa7134_boards[dev->board].ts_type) {
263 case SAA7134_MPEG_TS_PARALLEL:
264 saa_writeb(SAA7134_TS_SERIAL0, 0x40);
265 saa_writeb(SAA7134_TS_PARALLEL, 0xec);
266 break;
267 case SAA7134_MPEG_TS_SERIAL:
268 saa_writeb(SAA7134_TS_SERIAL0, 0xd8);
269 saa_writeb(SAA7134_TS_PARALLEL, 0x6c);
270 saa_writeb(SAA7134_TS_PARALLEL_SERIAL, 0xbc);
271 saa_writeb(SAA7134_TS_SERIAL1, 0x02);
272 break;
273 }
274
275 dev->ts_started = 1;
276
277 return 0;
278}
279
267int saa7134_ts_fini(struct saa7134_dev *dev) 280int saa7134_ts_fini(struct saa7134_dev *dev)
268{ 281{
269 saa7134_pgtable_free(dev->pci,&dev->ts.pt_ts); 282 saa7134_pgtable_free(dev->pci,&dev->ts.pt_ts);
270 return 0; 283 return 0;
271} 284}
272 285
273
274void saa7134_irq_ts_done(struct saa7134_dev *dev, unsigned long status) 286void saa7134_irq_ts_done(struct saa7134_dev *dev, unsigned long status)
275{ 287{
276 enum v4l2_field field; 288 enum v4l2_field field;
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 493cad941460..e305c1674cee 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1057,6 +1057,7 @@ static int buffer_prepare(struct videobuf_queue *q,
1057 buf->vb.field = field; 1057 buf->vb.field = field;
1058 buf->fmt = fh->fmt; 1058 buf->fmt = fh->fmt;
1059 buf->pt = &fh->pt_cap; 1059 buf->pt = &fh->pt_cap;
1060 dev->video_q.curr = NULL;
1060 1061
1061 err = videobuf_iolock(q,&buf->vb,&dev->ovbuf); 1062 err = videobuf_iolock(q,&buf->vb,&dev->ovbuf);
1062 if (err) 1063 if (err)
@@ -1423,11 +1424,13 @@ video_poll(struct file *file, struct poll_table_struct *wait)
1423{ 1424{
1424 struct saa7134_fh *fh = file->private_data; 1425 struct saa7134_fh *fh = file->private_data;
1425 struct videobuf_buffer *buf = NULL; 1426 struct videobuf_buffer *buf = NULL;
1427 unsigned int rc = 0;
1426 1428
1427 if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) 1429 if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type)
1428 return videobuf_poll_stream(file, &fh->vbi, wait); 1430 return videobuf_poll_stream(file, &fh->vbi, wait);
1429 1431
1430 if (res_check(fh,RESOURCE_VIDEO)) { 1432 if (res_check(fh,RESOURCE_VIDEO)) {
1433 mutex_lock(&fh->cap.vb_lock);
1431 if (!list_empty(&fh->cap.stream)) 1434 if (!list_empty(&fh->cap.stream))
1432 buf = list_entry(fh->cap.stream.next, struct videobuf_buffer, stream); 1435 buf = list_entry(fh->cap.stream.next, struct videobuf_buffer, stream);
1433 } else { 1436 } else {
@@ -1446,13 +1449,14 @@ video_poll(struct file *file, struct poll_table_struct *wait)
1446 } 1449 }
1447 1450
1448 if (!buf) 1451 if (!buf)
1449 return POLLERR; 1452 goto err;
1450 1453
1451 poll_wait(file, &buf->done, wait); 1454 poll_wait(file, &buf->done, wait);
1452 if (buf->state == VIDEOBUF_DONE || 1455 if (buf->state == VIDEOBUF_DONE ||
1453 buf->state == VIDEOBUF_ERROR) 1456 buf->state == VIDEOBUF_ERROR)
1454 return POLLIN|POLLRDNORM; 1457 rc = POLLIN|POLLRDNORM;
1455 return 0; 1458 mutex_unlock(&fh->cap.vb_lock);
1459 return rc;
1456 1460
1457err: 1461err:
1458 mutex_unlock(&fh->cap.vb_lock); 1462 mutex_unlock(&fh->cap.vb_lock);
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 0cbaf90d4874..82268848f26a 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -252,7 +252,7 @@ struct saa7134_format {
252#define SAA7134_BOARD_BEHOLD_505FM 126 252#define SAA7134_BOARD_BEHOLD_505FM 126
253#define SAA7134_BOARD_BEHOLD_507_9FM 127 253#define SAA7134_BOARD_BEHOLD_507_9FM 127
254#define SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM 128 254#define SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM 128
255#define SAA7134_BOARD_BEHOLD_607_9FM 129 255#define SAA7134_BOARD_BEHOLD_607FM_MK3 129
256#define SAA7134_BOARD_BEHOLD_M6 130 256#define SAA7134_BOARD_BEHOLD_M6 130
257#define SAA7134_BOARD_TWINHAN_DTV_DVB_3056 131 257#define SAA7134_BOARD_TWINHAN_DTV_DVB_3056 131
258#define SAA7134_BOARD_GENIUS_TVGO_A11MCE 132 258#define SAA7134_BOARD_GENIUS_TVGO_A11MCE 132
@@ -280,6 +280,18 @@ struct saa7134_format {
280#define SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS 154 280#define SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS 154
281#define SAA7134_BOARD_HAUPPAUGE_HVR1120 155 281#define SAA7134_BOARD_HAUPPAUGE_HVR1120 155
282#define SAA7134_BOARD_HAUPPAUGE_HVR1110R3 156 282#define SAA7134_BOARD_HAUPPAUGE_HVR1110R3 156
283#define SAA7134_BOARD_AVERMEDIA_STUDIO_507UA 157
284#define SAA7134_BOARD_AVERMEDIA_CARDBUS_501 158
285#define SAA7134_BOARD_BEHOLD_505RDS 159
286#define SAA7134_BOARD_BEHOLD_507RDS_MK3 160
287#define SAA7134_BOARD_BEHOLD_507RDS_MK5 161
288#define SAA7134_BOARD_BEHOLD_607FM_MK5 162
289#define SAA7134_BOARD_BEHOLD_609FM_MK3 163
290#define SAA7134_BOARD_BEHOLD_609FM_MK5 164
291#define SAA7134_BOARD_BEHOLD_607RDS_MK3 165
292#define SAA7134_BOARD_BEHOLD_607RDS_MK5 166
293#define SAA7134_BOARD_BEHOLD_609RDS_MK3 167
294#define SAA7134_BOARD_BEHOLD_609RDS_MK5 168
283 295
284#define SAA7134_MAXBOARDS 32 296#define SAA7134_MAXBOARDS 32
285#define SAA7134_INPUT_MAX 8 297#define SAA7134_INPUT_MAX 8
@@ -364,6 +376,7 @@ struct saa7134_board {
364#define INTERLACE_OFF 2 376#define INTERLACE_OFF 2
365 377
366#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */ 378#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
379#define TS_BUFFER_TIMEOUT msecs_to_jiffies(1000) /* 1 second */
367 380
368struct saa7134_dev; 381struct saa7134_dev;
369struct saa7134_dma; 382struct saa7134_dma;
@@ -480,12 +493,6 @@ struct saa7134_mpeg_ops {
480 void (*signal_change)(struct saa7134_dev *dev); 493 void (*signal_change)(struct saa7134_dev *dev);
481}; 494};
482 495
483enum saa7134_ts_status {
484 SAA7134_TS_STOPPED,
485 SAA7134_TS_BUFF_DONE,
486 SAA7134_TS_STARTED,
487};
488
489/* global device status */ 496/* global device status */
490struct saa7134_dev { 497struct saa7134_dev {
491 struct list_head devlist; 498 struct list_head devlist;
@@ -580,8 +587,7 @@ struct saa7134_dev {
580 /* SAA7134_MPEG_* */ 587 /* SAA7134_MPEG_* */
581 struct saa7134_ts ts; 588 struct saa7134_ts ts;
582 struct saa7134_dmaqueue ts_q; 589 struct saa7134_dmaqueue ts_q;
583 enum saa7134_ts_status ts_state; 590 int ts_started;
584 unsigned int buff_cnt;
585 struct saa7134_mpeg_ops *mops; 591 struct saa7134_mpeg_ops *mops;
586 592
587 /* SAA7134_MPEG_EMPRESS only */ 593 /* SAA7134_MPEG_EMPRESS only */
@@ -739,6 +745,9 @@ void saa7134_ts_unregister(struct saa7134_mpeg_ops *ops);
739 745
740int saa7134_ts_init_hw(struct saa7134_dev *dev); 746int saa7134_ts_init_hw(struct saa7134_dev *dev);
741 747
748int saa7134_ts_start(struct saa7134_dev *dev);
749int saa7134_ts_stop(struct saa7134_dev *dev);
750
742/* ----------------------------------------------------------- */ 751/* ----------------------------------------------------------- */
743/* saa7134-vbi.c */ 752/* saa7134-vbi.c */
744 753
@@ -786,7 +795,7 @@ void saa7134_irq_oss_done(struct saa7134_dev *dev, unsigned long status);
786int saa7134_input_init1(struct saa7134_dev *dev); 795int saa7134_input_init1(struct saa7134_dev *dev);
787void saa7134_input_fini(struct saa7134_dev *dev); 796void saa7134_input_fini(struct saa7134_dev *dev);
788void saa7134_input_irq(struct saa7134_dev *dev); 797void saa7134_input_irq(struct saa7134_dev *dev);
789void saa7134_set_i2c_ir(struct saa7134_dev *dev, struct IR_i2c *ir); 798void saa7134_probe_i2c_ir(struct saa7134_dev *dev);
790void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir); 799void saa7134_ir_start(struct saa7134_dev *dev, struct card_ir *ir);
791void saa7134_ir_stop(struct saa7134_dev *dev); 800void saa7134_ir_stop(struct saa7134_dev *dev);
792 801
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index 5990ab38a124..c8f05297d0f0 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -38,7 +38,7 @@ static const char version[] = "0.24";
38static int flickerless; 38static int flickerless;
39static int video_nr = -1; 39static int video_nr = -1;
40 40
41static struct usb_device_id device_table [] = { 41static struct usb_device_id device_table[] = {
42 { USB_DEVICE(0x03e8, 0x0004) },/* Endpoints/Aox SE401 */ 42 { USB_DEVICE(0x03e8, 0x0004) },/* Endpoints/Aox SE401 */
43 { USB_DEVICE(0x0471, 0x030b) },/* Philips PCVC665K */ 43 { USB_DEVICE(0x0471, 0x030b) },/* Philips PCVC665K */
44 { USB_DEVICE(0x047d, 0x5001) },/* Kensington 67014 */ 44 { USB_DEVICE(0x047d, 0x5001) },/* Kensington 67014 */
@@ -53,7 +53,8 @@ MODULE_AUTHOR("Jeroen Vreeken <pe1rxq@amsat.org>");
53MODULE_DESCRIPTION("SE401 USB Camera Driver"); 53MODULE_DESCRIPTION("SE401 USB Camera Driver");
54MODULE_LICENSE("GPL"); 54MODULE_LICENSE("GPL");
55module_param(flickerless, int, 0); 55module_param(flickerless, int, 0);
56MODULE_PARM_DESC(flickerless, "Net frequency to adjust exposure time to (0/50/60)"); 56MODULE_PARM_DESC(flickerless,
57 "Net frequency to adjust exposure time to (0/50/60)");
57module_param(video_nr, int, 0); 58module_param(video_nr, int, 0);
58 59
59static struct usb_driver se401_driver; 60static struct usb_driver se401_driver;
@@ -78,8 +79,8 @@ static void *rvmalloc(unsigned long size)
78 adr = (unsigned long) mem; 79 adr = (unsigned long) mem;
79 while (size > 0) { 80 while (size > 0) {
80 SetPageReserved(vmalloc_to_page((void *)adr)); 81 SetPageReserved(vmalloc_to_page((void *)adr));
81 adr += PAGE_SIZE; 82 adr += PAGE_SIZE;
82 size -= PAGE_SIZE; 83 size -= PAGE_SIZE;
83 } 84 }
84 85
85 return mem; 86 return mem;
@@ -95,8 +96,8 @@ static void rvfree(void *mem, unsigned long size)
95 adr = (unsigned long) mem; 96 adr = (unsigned long) mem;
96 while ((long) size > 0) { 97 while ((long) size > 0) {
97 ClearPageReserved(vmalloc_to_page((void *)adr)); 98 ClearPageReserved(vmalloc_to_page((void *)adr));
98 adr += PAGE_SIZE; 99 adr += PAGE_SIZE;
99 size -= PAGE_SIZE; 100 size -= PAGE_SIZE;
100 } 101 }
101 vfree(mem); 102 vfree(mem);
102} 103}
@@ -112,7 +113,7 @@ static void rvfree(void *mem, unsigned long size)
112static int se401_sndctrl(int set, struct usb_se401 *se401, unsigned short req, 113static int se401_sndctrl(int set, struct usb_se401 *se401, unsigned short req,
113 unsigned short value, unsigned char *cp, int size) 114 unsigned short value, unsigned char *cp, int size)
114{ 115{
115 return usb_control_msg ( 116 return usb_control_msg(
116 se401->dev, 117 se401->dev,
117 set ? usb_sndctrlpipe(se401->dev, 0) : usb_rcvctrlpipe(se401->dev, 0), 118 set ? usb_sndctrlpipe(se401->dev, 0) : usb_rcvctrlpipe(se401->dev, 0),
118 req, 119 req,
@@ -132,7 +133,7 @@ static int se401_set_feature(struct usb_se401 *se401, unsigned short selector,
132 and the param in index, but in the logs of the windows driver they do 133 and the param in index, but in the logs of the windows driver they do
133 this the other way around... 134 this the other way around...
134 */ 135 */
135 return usb_control_msg ( 136 return usb_control_msg(
136 se401->dev, 137 se401->dev,
137 usb_sndctrlpipe(se401->dev, 0), 138 usb_sndctrlpipe(se401->dev, 0),
138 SE401_REQ_SET_EXT_FEATURE, 139 SE401_REQ_SET_EXT_FEATURE,
@@ -152,7 +153,7 @@ static unsigned short se401_get_feature(struct usb_se401 *se401,
152 wrong here to.... 153 wrong here to....
153 */ 154 */
154 unsigned char cp[2]; 155 unsigned char cp[2];
155 usb_control_msg ( 156 usb_control_msg(
156 se401->dev, 157 se401->dev,
157 usb_rcvctrlpipe(se401->dev, 0), 158 usb_rcvctrlpipe(se401->dev, 0),
158 SE401_REQ_GET_EXT_FEATURE, 159 SE401_REQ_GET_EXT_FEATURE,
@@ -175,46 +176,51 @@ static unsigned short se401_get_feature(struct usb_se401 *se401,
175 176
176static int se401_send_pict(struct usb_se401 *se401) 177static int se401_send_pict(struct usb_se401 *se401)
177{ 178{
178 se401_set_feature(se401, HV7131_REG_TITL, se401->expose_l);/* integration time low */ 179 /* integration time low */
179 se401_set_feature(se401, HV7131_REG_TITM, se401->expose_m);/* integration time mid */ 180 se401_set_feature(se401, HV7131_REG_TITL, se401->expose_l);
180 se401_set_feature(se401, HV7131_REG_TITU, se401->expose_h);/* integration time mid */ 181 /* integration time mid */
181 se401_set_feature(se401, HV7131_REG_ARLV, se401->resetlevel);/* reset level value */ 182 se401_set_feature(se401, HV7131_REG_TITM, se401->expose_m);
182 se401_set_feature(se401, HV7131_REG_ARCG, se401->rgain);/* red color gain */ 183 /* integration time mid */
183 se401_set_feature(se401, HV7131_REG_AGCG, se401->ggain);/* green color gain */ 184 se401_set_feature(se401, HV7131_REG_TITU, se401->expose_h);
184 se401_set_feature(se401, HV7131_REG_ABCG, se401->bgain);/* blue color gain */ 185 /* reset level value */
186 se401_set_feature(se401, HV7131_REG_ARLV, se401->resetlevel);
187 /* red color gain */
188 se401_set_feature(se401, HV7131_REG_ARCG, se401->rgain);
189 /* green color gain */
190 se401_set_feature(se401, HV7131_REG_AGCG, se401->ggain);
191 /* blue color gain */
192 se401_set_feature(se401, HV7131_REG_ABCG, se401->bgain);
185 193
186 return 0; 194 return 0;
187} 195}
188 196
189static void se401_set_exposure(struct usb_se401 *se401, int brightness) 197static void se401_set_exposure(struct usb_se401 *se401, int brightness)
190{ 198{
191 int integration=brightness<<5; 199 int integration = brightness << 5;
192 200
193 if (flickerless==50) { 201 if (flickerless == 50)
194 integration=integration-integration%106667; 202 integration = integration-integration % 106667;
195 } 203 if (flickerless == 60)
196 if (flickerless==60) { 204 integration = integration-integration % 88889;
197 integration=integration-integration%88889; 205 se401->brightness = integration >> 5;
198 } 206 se401->expose_h = (integration >> 16) & 0xff;
199 se401->brightness=integration>>5; 207 se401->expose_m = (integration >> 8) & 0xff;
200 se401->expose_h=(integration>>16)&0xff; 208 se401->expose_l = integration & 0xff;
201 se401->expose_m=(integration>>8)&0xff;
202 se401->expose_l=integration&0xff;
203} 209}
204 210
205static int se401_get_pict(struct usb_se401 *se401, struct video_picture *p) 211static int se401_get_pict(struct usb_se401 *se401, struct video_picture *p)
206{ 212{
207 p->brightness=se401->brightness; 213 p->brightness = se401->brightness;
208 if (se401->enhance) { 214 if (se401->enhance)
209 p->whiteness=32768; 215 p->whiteness = 32768;
210 } else { 216 else
211 p->whiteness=0; 217 p->whiteness = 0;
212 } 218
213 p->colour=65535; 219 p->colour = 65535;
214 p->contrast=65535; 220 p->contrast = 65535;
215 p->hue=se401->rgain<<10; 221 p->hue = se401->rgain << 10;
216 p->palette=se401->palette; 222 p->palette = se401->palette;
217 p->depth=3; /* rgb24 */ 223 p->depth = 3; /* rgb24 */
218 return 0; 224 return 0;
219} 225}
220 226
@@ -223,20 +229,19 @@ static int se401_set_pict(struct usb_se401 *se401, struct video_picture *p)
223{ 229{
224 if (p->palette != VIDEO_PALETTE_RGB24) 230 if (p->palette != VIDEO_PALETTE_RGB24)
225 return 1; 231 return 1;
226 se401->palette=p->palette; 232 se401->palette = p->palette;
227 if (p->hue!=se401->hue) { 233 if (p->hue != se401->hue) {
228 se401->rgain= p->hue>>10; 234 se401->rgain = p->hue >> 10;
229 se401->bgain= 0x40-(p->hue>>10); 235 se401->bgain = 0x40-(p->hue >> 10);
230 se401->hue=p->hue; 236 se401->hue = p->hue;
231 } 237 }
232 if (p->brightness!=se401->brightness) { 238 if (p->brightness != se401->brightness)
233 se401_set_exposure(se401, p->brightness); 239 se401_set_exposure(se401, p->brightness);
234 } 240
235 if (p->whiteness>=32768) { 241 if (p->whiteness >= 32768)
236 se401->enhance=1; 242 se401->enhance = 1;
237 } else { 243 else
238 se401->enhance=0; 244 se401->enhance = 0;
239 }
240 se401_send_pict(se401); 245 se401_send_pict(se401);
241 se401_send_pict(se401); 246 se401_send_pict(se401);
242 return 0; 247 return 0;
@@ -249,7 +254,7 @@ static int se401_set_pict(struct usb_se401 *se401, struct video_picture *p)
249static void se401_auto_resetlevel(struct usb_se401 *se401) 254static void se401_auto_resetlevel(struct usb_se401 *se401)
250{ 255{
251 unsigned int ahrc, alrc; 256 unsigned int ahrc, alrc;
252 int oldreset=se401->resetlevel; 257 int oldreset = se401->resetlevel;
253 258
254 /* For some reason this normally read-only register doesn't get reset 259 /* For some reason this normally read-only register doesn't get reset
255 to zero after reading them just once... 260 to zero after reading them just once...
@@ -258,24 +263,24 @@ static void se401_auto_resetlevel(struct usb_se401 *se401)
258 se401_get_feature(se401, HV7131_REG_HIREFNOL); 263 se401_get_feature(se401, HV7131_REG_HIREFNOL);
259 se401_get_feature(se401, HV7131_REG_LOREFNOH); 264 se401_get_feature(se401, HV7131_REG_LOREFNOH);
260 se401_get_feature(se401, HV7131_REG_LOREFNOL); 265 se401_get_feature(se401, HV7131_REG_LOREFNOL);
261 ahrc=256*se401_get_feature(se401, HV7131_REG_HIREFNOH) + 266 ahrc = 256*se401_get_feature(se401, HV7131_REG_HIREFNOH) +
262 se401_get_feature(se401, HV7131_REG_HIREFNOL); 267 se401_get_feature(se401, HV7131_REG_HIREFNOL);
263 alrc=256*se401_get_feature(se401, HV7131_REG_LOREFNOH) + 268 alrc = 256*se401_get_feature(se401, HV7131_REG_LOREFNOH) +
264 se401_get_feature(se401, HV7131_REG_LOREFNOL); 269 se401_get_feature(se401, HV7131_REG_LOREFNOL);
265 270
266 /* Not an exact science, but it seems to work pretty well... */ 271 /* Not an exact science, but it seems to work pretty well... */
267 if (alrc > 10) { 272 if (alrc > 10) {
268 while (alrc>=10 && se401->resetlevel < 63) { 273 while (alrc >= 10 && se401->resetlevel < 63) {
269 se401->resetlevel++; 274 se401->resetlevel++;
270 alrc /=2; 275 alrc /= 2;
271 } 276 }
272 } else if (ahrc > 20) { 277 } else if (ahrc > 20) {
273 while (ahrc>=20 && se401->resetlevel > 0) { 278 while (ahrc >= 20 && se401->resetlevel > 0) {
274 se401->resetlevel--; 279 se401->resetlevel--;
275 ahrc /=2; 280 ahrc /= 2;
276 } 281 }
277 } 282 }
278 if (se401->resetlevel!=oldreset) 283 if (se401->resetlevel != oldreset)
279 se401_set_feature(se401, HV7131_REG_ARLV, se401->resetlevel); 284 se401_set_feature(se401, HV7131_REG_ARLV, se401->resetlevel);
280 285
281 return; 286 return;
@@ -300,21 +305,22 @@ static void se401_button_irq(struct urb *urb)
300 case -ENOENT: 305 case -ENOENT:
301 case -ESHUTDOWN: 306 case -ESHUTDOWN:
302 /* this urb is terminated, clean up */ 307 /* this urb is terminated, clean up */
303 dbg("%s - urb shutting down with status: %d", __func__, urb->status); 308 dbg("%s - urb shutting down with status: %d",
309 __func__, urb->status);
304 return; 310 return;
305 default: 311 default:
306 dbg("%s - nonzero urb status received: %d", __func__, urb->status); 312 dbg("%s - nonzero urb status received: %d",
313 __func__, urb->status);
307 goto exit; 314 goto exit;
308 } 315 }
309 316
310 if (urb->actual_length >=2) { 317 if (urb->actual_length >= 2)
311 if (se401->button) 318 if (se401->button)
312 se401->buttonpressed=1; 319 se401->buttonpressed = 1;
313 }
314exit: 320exit:
315 status = usb_submit_urb (urb, GFP_ATOMIC); 321 status = usb_submit_urb(urb, GFP_ATOMIC);
316 if (status) 322 if (status)
317 err ("%s - usb_submit_urb failed with result %d", 323 err("%s - usb_submit_urb failed with result %d",
318 __func__, status); 324 __func__, status);
319} 325}
320 326
@@ -336,55 +342,52 @@ static void se401_video_irq(struct urb *urb)
336 keeps sending them forever... 342 keeps sending them forever...
337 */ 343 */
338 if (length && !urb->status) { 344 if (length && !urb->status) {
339 se401->nullpackets=0; 345 se401->nullpackets = 0;
340 switch(se401->scratch[se401->scratch_next].state) { 346 switch (se401->scratch[se401->scratch_next].state) {
341 case BUFFER_READY: 347 case BUFFER_READY:
342 case BUFFER_BUSY: { 348 case BUFFER_BUSY:
343 se401->dropped++; 349 se401->dropped++;
344 break; 350 break;
345 } 351 case BUFFER_UNUSED:
346 case BUFFER_UNUSED: { 352 memcpy(se401->scratch[se401->scratch_next].data,
347 memcpy(se401->scratch[se401->scratch_next].data, (unsigned char *)urb->transfer_buffer, length); 353 (unsigned char *)urb->transfer_buffer, length);
348 se401->scratch[se401->scratch_next].state=BUFFER_READY; 354 se401->scratch[se401->scratch_next].state
349 se401->scratch[se401->scratch_next].offset=se401->bayeroffset; 355 = BUFFER_READY;
350 se401->scratch[se401->scratch_next].length=length; 356 se401->scratch[se401->scratch_next].offset
351 if (waitqueue_active(&se401->wq)) { 357 = se401->bayeroffset;
352 wake_up_interruptible(&se401->wq); 358 se401->scratch[se401->scratch_next].length = length;
353 } 359 if (waitqueue_active(&se401->wq))
354 se401->scratch_overflow=0; 360 wake_up_interruptible(&se401->wq);
355 se401->scratch_next++; 361 se401->scratch_overflow = 0;
356 if (se401->scratch_next>=SE401_NUMSCRATCH) 362 se401->scratch_next++;
357 se401->scratch_next=0; 363 if (se401->scratch_next >= SE401_NUMSCRATCH)
358 break; 364 se401->scratch_next = 0;
359 } 365 break;
360 }
361 se401->bayeroffset+=length;
362 if (se401->bayeroffset>=se401->cheight*se401->cwidth) {
363 se401->bayeroffset=0;
364 } 366 }
367 se401->bayeroffset += length;
368 if (se401->bayeroffset >= se401->cheight * se401->cwidth)
369 se401->bayeroffset = 0;
365 } else { 370 } else {
366 se401->nullpackets++; 371 se401->nullpackets++;
367 if (se401->nullpackets > SE401_MAX_NULLPACKETS) { 372 if (se401->nullpackets > SE401_MAX_NULLPACKETS)
368 if (waitqueue_active(&se401->wq)) { 373 if (waitqueue_active(&se401->wq))
369 wake_up_interruptible(&se401->wq); 374 wake_up_interruptible(&se401->wq);
370 }
371 }
372 } 375 }
373 376
374 /* Resubmit urb for new data */ 377 /* Resubmit urb for new data */
375 urb->status=0; 378 urb->status = 0;
376 urb->dev=se401->dev; 379 urb->dev = se401->dev;
377 if(usb_submit_urb(urb, GFP_KERNEL)) 380 if (usb_submit_urb(urb, GFP_KERNEL))
378 dev_info(&urb->dev->dev, "urb burned down\n"); 381 dev_info(&urb->dev->dev, "urb burned down\n");
379 return; 382 return;
380} 383}
381 384
382static void se401_send_size(struct usb_se401 *se401, int width, int height) 385static void se401_send_size(struct usb_se401 *se401, int width, int height)
383{ 386{
384 int i=0; 387 int i = 0;
385 int mode=0x03; /* No compression */ 388 int mode = 0x03; /* No compression */
386 int sendheight=height; 389 int sendheight = height;
387 int sendwidth=width; 390 int sendwidth = width;
388 391
389 /* JangGu compression can only be used with the camera supported sizes, 392 /* JangGu compression can only be used with the camera supported sizes,
390 but bayer seems to work with any size that fits on the sensor. 393 but bayer seems to work with any size that fits on the sensor.
@@ -392,18 +395,21 @@ static void se401_send_size(struct usb_se401 *se401, int width, int height)
392 4 or 16 times subcapturing, if not we use uncompressed bayer data 395 4 or 16 times subcapturing, if not we use uncompressed bayer data
393 but this will result in cutouts of the maximum size.... 396 but this will result in cutouts of the maximum size....
394 */ 397 */
395 while (i<se401->sizes && !(se401->width[i]==width && se401->height[i]==height)) 398 while (i < se401->sizes && !(se401->width[i] == width &&
399 se401->height[i] == height))
396 i++; 400 i++;
397 while (i<se401->sizes) { 401 while (i < se401->sizes) {
398 if (se401->width[i]==width*2 && se401->height[i]==height*2) { 402 if (se401->width[i] == width * 2 &&
399 sendheight=se401->height[i]; 403 se401->height[i] == height * 2) {
400 sendwidth=se401->width[i]; 404 sendheight = se401->height[i];
401 mode=0x40; 405 sendwidth = se401->width[i];
406 mode = 0x40;
402 } 407 }
403 if (se401->width[i]==width*4 && se401->height[i]==height*4) { 408 if (se401->width[i] == width * 4 &&
404 sendheight=se401->height[i]; 409 se401->height[i] == height * 4) {
405 sendwidth=se401->width[i]; 410 sendheight = se401->height[i];
406 mode=0x42; 411 sendwidth = se401->width[i];
412 mode = 0x42;
407 } 413 }
408 i++; 414 i++;
409 } 415 }
@@ -412,13 +418,10 @@ static void se401_send_size(struct usb_se401 *se401, int width, int height)
412 se401_sndctrl(1, se401, SE401_REQ_SET_HEIGHT, sendheight, NULL, 0); 418 se401_sndctrl(1, se401, SE401_REQ_SET_HEIGHT, sendheight, NULL, 0);
413 se401_set_feature(se401, SE401_OPERATINGMODE, mode); 419 se401_set_feature(se401, SE401_OPERATINGMODE, mode);
414 420
415 if (mode==0x03) { 421 if (mode == 0x03)
416 se401->format=FMT_BAYER; 422 se401->format = FMT_BAYER;
417 } else { 423 else
418 se401->format=FMT_JANGGU; 424 se401->format = FMT_JANGGU;
419 }
420
421 return;
422} 425}
423 426
424/* 427/*
@@ -429,29 +432,31 @@ static void se401_send_size(struct usb_se401 *se401, int width, int height)
429static int se401_start_stream(struct usb_se401 *se401) 432static int se401_start_stream(struct usb_se401 *se401)
430{ 433{
431 struct urb *urb; 434 struct urb *urb;
432 int err=0, i; 435 int err = 0, i;
433 se401->streaming=1; 436 se401->streaming = 1;
434 437
435 se401_sndctrl(1, se401, SE401_REQ_CAMERA_POWER, 1, NULL, 0); 438 se401_sndctrl(1, se401, SE401_REQ_CAMERA_POWER, 1, NULL, 0);
436 se401_sndctrl(1, se401, SE401_REQ_LED_CONTROL, 1, NULL, 0); 439 se401_sndctrl(1, se401, SE401_REQ_LED_CONTROL, 1, NULL, 0);
437 440
438 /* Set picture settings */ 441 /* Set picture settings */
439 se401_set_feature(se401, HV7131_REG_MODE_B, 0x05);/*windowed + pix intg */ 442 /* windowed + pix intg */
443 se401_set_feature(se401, HV7131_REG_MODE_B, 0x05);
440 se401_send_pict(se401); 444 se401_send_pict(se401);
441 445
442 se401_send_size(se401, se401->cwidth, se401->cheight); 446 se401_send_size(se401, se401->cwidth, se401->cheight);
443 447
444 se401_sndctrl(1, se401, SE401_REQ_START_CONTINUOUS_CAPTURE, 0, NULL, 0); 448 se401_sndctrl(1, se401, SE401_REQ_START_CONTINUOUS_CAPTURE,
449 0, NULL, 0);
445 450
446 /* Do some memory allocation */ 451 /* Do some memory allocation */
447 for (i=0; i<SE401_NUMFRAMES; i++) { 452 for (i = 0; i < SE401_NUMFRAMES; i++) {
448 se401->frame[i].data=se401->fbuf + i * se401->maxframesize; 453 se401->frame[i].data = se401->fbuf + i * se401->maxframesize;
449 se401->frame[i].curpix=0; 454 se401->frame[i].curpix = 0;
450 } 455 }
451 for (i=0; i<SE401_NUMSBUF; i++) { 456 for (i = 0; i < SE401_NUMSBUF; i++) {
452 se401->sbuf[i].data=kmalloc(SE401_PACKETSIZE, GFP_KERNEL); 457 se401->sbuf[i].data = kmalloc(SE401_PACKETSIZE, GFP_KERNEL);
453 if (!se401->sbuf[i].data) { 458 if (!se401->sbuf[i].data) {
454 for(i = i - 1; i >= 0; i--) { 459 for (i = i - 1; i >= 0; i--) {
455 kfree(se401->sbuf[i].data); 460 kfree(se401->sbuf[i].data);
456 se401->sbuf[i].data = NULL; 461 se401->sbuf[i].data = NULL;
457 } 462 }
@@ -459,26 +464,26 @@ static int se401_start_stream(struct usb_se401 *se401)
459 } 464 }
460 } 465 }
461 466
462 se401->bayeroffset=0; 467 se401->bayeroffset = 0;
463 se401->scratch_next=0; 468 se401->scratch_next = 0;
464 se401->scratch_use=0; 469 se401->scratch_use = 0;
465 se401->scratch_overflow=0; 470 se401->scratch_overflow = 0;
466 for (i=0; i<SE401_NUMSCRATCH; i++) { 471 for (i = 0; i < SE401_NUMSCRATCH; i++) {
467 se401->scratch[i].data=kmalloc(SE401_PACKETSIZE, GFP_KERNEL); 472 se401->scratch[i].data = kmalloc(SE401_PACKETSIZE, GFP_KERNEL);
468 if (!se401->scratch[i].data) { 473 if (!se401->scratch[i].data) {
469 for(i = i - 1; i >= 0; i--) { 474 for (i = i - 1; i >= 0; i--) {
470 kfree(se401->scratch[i].data); 475 kfree(se401->scratch[i].data);
471 se401->scratch[i].data = NULL; 476 se401->scratch[i].data = NULL;
472 } 477 }
473 goto nomem_sbuf; 478 goto nomem_sbuf;
474 } 479 }
475 se401->scratch[i].state=BUFFER_UNUSED; 480 se401->scratch[i].state = BUFFER_UNUSED;
476 } 481 }
477 482
478 for (i=0; i<SE401_NUMSBUF; i++) { 483 for (i = 0; i < SE401_NUMSBUF; i++) {
479 urb=usb_alloc_urb(0, GFP_KERNEL); 484 urb = usb_alloc_urb(0, GFP_KERNEL);
480 if(!urb) { 485 if (!urb) {
481 for(i = i - 1; i >= 0; i--) { 486 for (i = i - 1; i >= 0; i--) {
482 usb_kill_urb(se401->urb[i]); 487 usb_kill_urb(se401->urb[i]);
483 usb_free_urb(se401->urb[i]); 488 usb_free_urb(se401->urb[i]);
484 se401->urb[i] = NULL; 489 se401->urb[i] = NULL;
@@ -492,24 +497,24 @@ static int se401_start_stream(struct usb_se401 *se401)
492 se401_video_irq, 497 se401_video_irq,
493 se401); 498 se401);
494 499
495 se401->urb[i]=urb; 500 se401->urb[i] = urb;
496 501
497 err=usb_submit_urb(se401->urb[i], GFP_KERNEL); 502 err = usb_submit_urb(se401->urb[i], GFP_KERNEL);
498 if(err) 503 if (err)
499 err("urb burned down"); 504 err("urb burned down");
500 } 505 }
501 506
502 se401->framecount=0; 507 se401->framecount = 0;
503 508
504 return 0; 509 return 0;
505 510
506 nomem_scratch: 511 nomem_scratch:
507 for (i=0; i<SE401_NUMSCRATCH; i++) { 512 for (i = 0; i < SE401_NUMSCRATCH; i++) {
508 kfree(se401->scratch[i].data); 513 kfree(se401->scratch[i].data);
509 se401->scratch[i].data = NULL; 514 se401->scratch[i].data = NULL;
510 } 515 }
511 nomem_sbuf: 516 nomem_sbuf:
512 for (i=0; i<SE401_NUMSBUF; i++) { 517 for (i = 0; i < SE401_NUMSBUF; i++) {
513 kfree(se401->sbuf[i].data); 518 kfree(se401->sbuf[i].data);
514 se401->sbuf[i].data = NULL; 519 se401->sbuf[i].data = NULL;
515 } 520 }
@@ -523,22 +528,23 @@ static int se401_stop_stream(struct usb_se401 *se401)
523 if (!se401->streaming || !se401->dev) 528 if (!se401->streaming || !se401->dev)
524 return 1; 529 return 1;
525 530
526 se401->streaming=0; 531 se401->streaming = 0;
527 532
528 se401_sndctrl(1, se401, SE401_REQ_STOP_CONTINUOUS_CAPTURE, 0, NULL, 0); 533 se401_sndctrl(1, se401, SE401_REQ_STOP_CONTINUOUS_CAPTURE, 0, NULL, 0);
529 534
530 se401_sndctrl(1, se401, SE401_REQ_LED_CONTROL, 0, NULL, 0); 535 se401_sndctrl(1, se401, SE401_REQ_LED_CONTROL, 0, NULL, 0);
531 se401_sndctrl(1, se401, SE401_REQ_CAMERA_POWER, 0, NULL, 0); 536 se401_sndctrl(1, se401, SE401_REQ_CAMERA_POWER, 0, NULL, 0);
532 537
533 for (i=0; i<SE401_NUMSBUF; i++) if (se401->urb[i]) { 538 for (i = 0; i < SE401_NUMSBUF; i++)
534 usb_kill_urb(se401->urb[i]); 539 if (se401->urb[i]) {
535 usb_free_urb(se401->urb[i]); 540 usb_kill_urb(se401->urb[i]);
536 se401->urb[i]=NULL; 541 usb_free_urb(se401->urb[i]);
537 kfree(se401->sbuf[i].data); 542 se401->urb[i] = NULL;
538 } 543 kfree(se401->sbuf[i].data);
539 for (i=0; i<SE401_NUMSCRATCH; i++) { 544 }
545 for (i = 0; i < SE401_NUMSCRATCH; i++) {
540 kfree(se401->scratch[i].data); 546 kfree(se401->scratch[i].data);
541 se401->scratch[i].data=NULL; 547 se401->scratch[i].data = NULL;
542 } 548 }
543 549
544 return 0; 550 return 0;
@@ -546,9 +552,9 @@ static int se401_stop_stream(struct usb_se401 *se401)
546 552
547static int se401_set_size(struct usb_se401 *se401, int width, int height) 553static int se401_set_size(struct usb_se401 *se401, int width, int height)
548{ 554{
549 int wasstreaming=se401->streaming; 555 int wasstreaming = se401->streaming;
550 /* Check to see if we need to change */ 556 /* Check to see if we need to change */
551 if (se401->cwidth==width && se401->cheight==height) 557 if (se401->cwidth == width && se401->cheight == height)
552 return 0; 558 return 0;
553 559
554 /* Check for a valid mode */ 560 /* Check for a valid mode */
@@ -556,16 +562,16 @@ static int se401_set_size(struct usb_se401 *se401, int width, int height)
556 return 1; 562 return 1;
557 if ((width & 1) || (height & 1)) 563 if ((width & 1) || (height & 1))
558 return 1; 564 return 1;
559 if (width>se401->width[se401->sizes-1]) 565 if (width > se401->width[se401->sizes-1])
560 return 1; 566 return 1;
561 if (height>se401->height[se401->sizes-1]) 567 if (height > se401->height[se401->sizes-1])
562 return 1; 568 return 1;
563 569
564 /* Stop a current stream and start it again at the new size */ 570 /* Stop a current stream and start it again at the new size */
565 if (wasstreaming) 571 if (wasstreaming)
566 se401_stop_stream(se401); 572 se401_stop_stream(se401);
567 se401->cwidth=width; 573 se401->cwidth = width;
568 se401->cheight=height; 574 se401->cheight = height;
569 if (wasstreaming) 575 if (wasstreaming)
570 se401_start_stream(se401); 576 se401_start_stream(se401);
571 return 0; 577 return 0;
@@ -586,68 +592,68 @@ static int se401_set_size(struct usb_se401 *se401, int width, int height)
586static inline void enhance_picture(unsigned char *frame, int len) 592static inline void enhance_picture(unsigned char *frame, int len)
587{ 593{
588 while (len--) { 594 while (len--) {
589 *frame=(((*frame^255)*(*frame^255))/255)^255; 595 *frame = (((*frame^255)*(*frame^255))/255)^255;
590 frame++; 596 frame++;
591 } 597 }
592} 598}
593 599
594static inline void decode_JangGu_integrate(struct usb_se401 *se401, int data) 600static inline void decode_JangGu_integrate(struct usb_se401 *se401, int data)
595{ 601{
596 struct se401_frame *frame=&se401->frame[se401->curframe]; 602 struct se401_frame *frame = &se401->frame[se401->curframe];
597 int linelength=se401->cwidth*3; 603 int linelength = se401->cwidth * 3;
598 604
599 if (frame->curlinepix >= linelength) { 605 if (frame->curlinepix >= linelength) {
600 frame->curlinepix=0; 606 frame->curlinepix = 0;
601 frame->curline+=linelength; 607 frame->curline += linelength;
602 } 608 }
603 609
604 /* First three are absolute, all others relative. 610 /* First three are absolute, all others relative.
605 * Format is rgb from right to left (mirrorred image), 611 * Format is rgb from right to left (mirrorred image),
606 * we flip it to get bgr from left to right. */ 612 * we flip it to get bgr from left to right. */
607 if (frame->curlinepix < 3) { 613 if (frame->curlinepix < 3)
608 *(frame->curline-frame->curlinepix)=1+data*4; 614 *(frame->curline-frame->curlinepix) = 1 + data * 4;
609 } else { 615 else
610 *(frame->curline-frame->curlinepix)= 616 *(frame->curline-frame->curlinepix) =
611 *(frame->curline-frame->curlinepix+3)+data*4; 617 *(frame->curline-frame->curlinepix + 3) + data * 4;
612 }
613 frame->curlinepix++; 618 frame->curlinepix++;
614} 619}
615 620
616static inline void decode_JangGu_vlc (struct usb_se401 *se401, unsigned char *data, int bit_exp, int packetlength) 621static inline void decode_JangGu_vlc(struct usb_se401 *se401,
622 unsigned char *data, int bit_exp, int packetlength)
617{ 623{
618 int pos=0; 624 int pos = 0;
619 int vlc_cod=0; 625 int vlc_cod = 0;
620 int vlc_size=0; 626 int vlc_size = 0;
621 int vlc_data=0; 627 int vlc_data = 0;
622 int bit_cur; 628 int bit_cur;
623 int bit; 629 int bit;
624 data+=4; 630 data += 4;
625 while (pos < packetlength) { 631 while (pos < packetlength) {
626 bit_cur=8; 632 bit_cur = 8;
627 while (bit_cur && bit_exp) { 633 while (bit_cur && bit_exp) {
628 bit=((*data)>>(bit_cur-1))&1; 634 bit = ((*data) >> (bit_cur-1))&1;
629 if (!vlc_cod) { 635 if (!vlc_cod) {
630 if (bit) { 636 if (bit) {
631 vlc_size++; 637 vlc_size++;
632 } else { 638 } else {
633 if (!vlc_size) { 639 if (!vlc_size)
634 decode_JangGu_integrate(se401, 0); 640 decode_JangGu_integrate(se401, 0);
635 } else { 641 else {
636 vlc_cod=2; 642 vlc_cod = 2;
637 vlc_data=0; 643 vlc_data = 0;
638 } 644 }
639 } 645 }
640 } else { 646 } else {
641 if (vlc_cod==2) { 647 if (vlc_cod == 2) {
642 if (!bit) 648 if (!bit)
643 vlc_data = -(1<<vlc_size) + 1; 649 vlc_data = -(1 << vlc_size) + 1;
644 vlc_cod--; 650 vlc_cod--;
645 } 651 }
646 vlc_size--; 652 vlc_size--;
647 vlc_data+=bit<<vlc_size; 653 vlc_data += bit << vlc_size;
648 if (!vlc_size) { 654 if (!vlc_size) {
649 decode_JangGu_integrate(se401, vlc_data); 655 decode_JangGu_integrate(se401, vlc_data);
650 vlc_cod=0; 656 vlc_cod = 0;
651 } 657 }
652 } 658 }
653 bit_cur--; 659 bit_cur--;
@@ -658,186 +664,188 @@ static inline void decode_JangGu_vlc (struct usb_se401 *se401, unsigned char *da
658 } 664 }
659} 665}
660 666
661static inline void decode_JangGu (struct usb_se401 *se401, struct se401_scratch *buffer) 667static inline void decode_JangGu(struct usb_se401 *se401,
668 struct se401_scratch *buffer)
662{ 669{
663 unsigned char *data=buffer->data; 670 unsigned char *data = buffer->data;
664 int len=buffer->length; 671 int len = buffer->length;
665 int bit_exp=0, pix_exp=0, frameinfo=0, packetlength=0, size; 672 int bit_exp = 0, pix_exp = 0, frameinfo = 0, packetlength = 0, size;
666 int datapos=0; 673 int datapos = 0;
667 674
668 /* New image? */ 675 /* New image? */
669 if (!se401->frame[se401->curframe].curpix) { 676 if (!se401->frame[se401->curframe].curpix) {
670 se401->frame[se401->curframe].curlinepix=0; 677 se401->frame[se401->curframe].curlinepix = 0;
671 se401->frame[se401->curframe].curline= 678 se401->frame[se401->curframe].curline =
672 se401->frame[se401->curframe].data+ 679 se401->frame[se401->curframe].data+
673 se401->cwidth*3-1; 680 se401->cwidth * 3 - 1;
674 if (se401->frame[se401->curframe].grabstate==FRAME_READY) 681 if (se401->frame[se401->curframe].grabstate == FRAME_READY)
675 se401->frame[se401->curframe].grabstate=FRAME_GRABBING; 682 se401->frame[se401->curframe].grabstate = FRAME_GRABBING;
676 se401->vlcdatapos=0; 683 se401->vlcdatapos = 0;
677 } 684 }
678 while (datapos < len) { 685 while (datapos < len) {
679 size=1024-se401->vlcdatapos; 686 size = 1024 - se401->vlcdatapos;
680 if (size+datapos > len) 687 if (size+datapos > len)
681 size=len-datapos; 688 size = len-datapos;
682 memcpy(se401->vlcdata+se401->vlcdatapos, data+datapos, size); 689 memcpy(se401->vlcdata+se401->vlcdatapos, data+datapos, size);
683 se401->vlcdatapos+=size; 690 se401->vlcdatapos += size;
684 packetlength=0; 691 packetlength = 0;
685 if (se401->vlcdatapos >= 4) { 692 if (se401->vlcdatapos >= 4) {
686 bit_exp=se401->vlcdata[3]+(se401->vlcdata[2]<<8); 693 bit_exp = se401->vlcdata[3] + (se401->vlcdata[2] << 8);
687 pix_exp=se401->vlcdata[1]+((se401->vlcdata[0]&0x3f)<<8); 694 pix_exp = se401->vlcdata[1] +
688 frameinfo=se401->vlcdata[0]&0xc0; 695 ((se401->vlcdata[0] & 0x3f) << 8);
689 packetlength=((bit_exp+47)>>4)<<1; 696 frameinfo = se401->vlcdata[0] & 0xc0;
697 packetlength = ((bit_exp + 47) >> 4) << 1;
690 if (packetlength > 1024) { 698 if (packetlength > 1024) {
691 se401->vlcdatapos=0; 699 se401->vlcdatapos = 0;
692 datapos=len; 700 datapos = len;
693 packetlength=0; 701 packetlength = 0;
694 se401->error++; 702 se401->error++;
695 se401->frame[se401->curframe].curpix=0; 703 se401->frame[se401->curframe].curpix = 0;
696 } 704 }
697 } 705 }
698 if (packetlength && se401->vlcdatapos >= packetlength) { 706 if (packetlength && se401->vlcdatapos >= packetlength) {
699 decode_JangGu_vlc(se401, se401->vlcdata, bit_exp, packetlength); 707 decode_JangGu_vlc(se401, se401->vlcdata, bit_exp,
700 se401->frame[se401->curframe].curpix+=pix_exp*3; 708 packetlength);
701 datapos+=size-(se401->vlcdatapos-packetlength); 709 se401->frame[se401->curframe].curpix += pix_exp * 3;
702 se401->vlcdatapos=0; 710 datapos += size-(se401->vlcdatapos-packetlength);
703 if (se401->frame[se401->curframe].curpix>=se401->cwidth*se401->cheight*3) { 711 se401->vlcdatapos = 0;
704 if (se401->frame[se401->curframe].curpix==se401->cwidth*se401->cheight*3) { 712 if (se401->frame[se401->curframe].curpix >= se401->cwidth * se401->cheight * 3) {
705 if (se401->frame[se401->curframe].grabstate==FRAME_GRABBING) { 713 if (se401->frame[se401->curframe].curpix == se401->cwidth * se401->cheight * 3) {
706 se401->frame[se401->curframe].grabstate=FRAME_DONE; 714 if (se401->frame[se401->curframe].grabstate == FRAME_GRABBING) {
715 se401->frame[se401->curframe].grabstate = FRAME_DONE;
707 se401->framecount++; 716 se401->framecount++;
708 se401->readcount++; 717 se401->readcount++;
709 } 718 }
710 if (se401->frame[(se401->curframe+1)&(SE401_NUMFRAMES-1)].grabstate==FRAME_READY) { 719 if (se401->frame[(se401->curframe + 1) & (SE401_NUMFRAMES - 1)].grabstate == FRAME_READY)
711 se401->curframe=(se401->curframe+1) & (SE401_NUMFRAMES-1); 720 se401->curframe = (se401->curframe + 1) & (SE401_NUMFRAMES - 1);
712 } 721 } else
713 } else {
714 se401->error++; 722 se401->error++;
715 } 723 se401->frame[se401->curframe].curpix = 0;
716 se401->frame[se401->curframe].curpix=0; 724 datapos = len;
717 datapos=len;
718 } 725 }
719 } else { 726 } else
720 datapos+=size; 727 datapos += size;
721 }
722 } 728 }
723} 729}
724 730
725static inline void decode_bayer (struct usb_se401 *se401, struct se401_scratch *buffer) 731static inline void decode_bayer(struct usb_se401 *se401,
732 struct se401_scratch *buffer)
726{ 733{
727 unsigned char *data=buffer->data; 734 unsigned char *data = buffer->data;
728 int len=buffer->length; 735 int len = buffer->length;
729 int offset=buffer->offset; 736 int offset = buffer->offset;
730 int datasize=se401->cwidth*se401->cheight; 737 int datasize = se401->cwidth * se401->cheight;
731 struct se401_frame *frame=&se401->frame[se401->curframe]; 738 struct se401_frame *frame = &se401->frame[se401->curframe];
739 unsigned char *framedata = frame->data, *curline, *nextline;
740 int width = se401->cwidth;
741 int blineoffset = 0, bline;
742 int linelength = width * 3, i;
732 743
733 unsigned char *framedata=frame->data, *curline, *nextline;
734 int width=se401->cwidth;
735 int blineoffset=0, bline;
736 int linelength=width*3, i;
737 744
745 if (frame->curpix == 0) {
746 if (frame->grabstate == FRAME_READY)
747 frame->grabstate = FRAME_GRABBING;
738 748
739 if (frame->curpix==0) { 749 frame->curline = framedata + linelength;
740 if (frame->grabstate==FRAME_READY) { 750 frame->curlinepix = 0;
741 frame->grabstate=FRAME_GRABBING;
742 }
743 frame->curline=framedata+linelength;
744 frame->curlinepix=0;
745 } 751 }
746 752
747 if (offset!=frame->curpix) { 753 if (offset != frame->curpix) {
748 /* Regard frame as lost :( */ 754 /* Regard frame as lost :( */
749 frame->curpix=0; 755 frame->curpix = 0;
750 se401->error++; 756 se401->error++;
751 return; 757 return;
752 } 758 }
753 759
754 /* Check if we have to much data */ 760 /* Check if we have to much data */
755 if (frame->curpix+len > datasize) { 761 if (frame->curpix + len > datasize)
756 len=datasize-frame->curpix; 762 len = datasize-frame->curpix;
757 } 763
758 if (se401->cheight%4) 764 if (se401->cheight % 4)
759 blineoffset=1; 765 blineoffset = 1;
760 bline=frame->curpix/se401->cwidth+blineoffset; 766 bline = frame->curpix / se401->cwidth+blineoffset;
761 767
762 curline=frame->curline; 768 curline = frame->curline;
763 nextline=curline+linelength; 769 nextline = curline + linelength;
764 if (nextline >= framedata+datasize*3) 770 if (nextline >= framedata+datasize * 3)
765 nextline=curline; 771 nextline = curline;
766 while (len) { 772 while (len) {
767 if (frame->curlinepix>=width) { 773 if (frame->curlinepix >= width) {
768 frame->curlinepix-=width; 774 frame->curlinepix -= width;
769 bline=frame->curpix/width+blineoffset; 775 bline = frame->curpix / width + blineoffset;
770 curline+=linelength*2; 776 curline += linelength*2;
771 nextline+=linelength*2; 777 nextline += linelength*2;
772 if (curline >= framedata+datasize*3) { 778 if (curline >= framedata+datasize * 3) {
773 frame->curlinepix++; 779 frame->curlinepix++;
774 curline-=3; 780 curline -= 3;
775 nextline-=3; 781 nextline -= 3;
776 len--; 782 len--;
777 data++; 783 data++;
778 frame->curpix++; 784 frame->curpix++;
779 } 785 }
780 if (nextline >= framedata+datasize*3) 786 if (nextline >= framedata+datasize*3)
781 nextline=curline; 787 nextline = curline;
782 } 788 }
783 if ((bline&1)) { 789 if (bline & 1) {
784 if ((frame->curlinepix&1)) { 790 if (frame->curlinepix & 1) {
785 *(curline+2)=*data; 791 *(curline + 2) = *data;
786 *(curline-1)=*data; 792 *(curline - 1) = *data;
787 *(nextline+2)=*data; 793 *(nextline + 2) = *data;
788 *(nextline-1)=*data; 794 *(nextline - 1) = *data;
789 } else { 795 } else {
790 *(curline+1)= 796 *(curline + 1) =
791 (*(curline+1)+*data)/2; 797 (*(curline + 1) + *data) / 2;
792 *(curline-2)= 798 *(curline-2) =
793 (*(curline-2)+*data)/2; 799 (*(curline - 2) + *data) / 2;
794 *(nextline+1)=*data; 800 *(nextline + 1) = *data;
795 *(nextline-2)=*data; 801 *(nextline - 2) = *data;
796 } 802 }
797 } else { 803 } else {
798 if ((frame->curlinepix&1)) { 804 if (frame->curlinepix & 1) {
799 *(curline+1)= 805 *(curline + 1) =
800 (*(curline+1)+*data)/2; 806 (*(curline + 1) + *data) / 2;
801 *(curline-2)= 807 *(curline - 2) =
802 (*(curline-2)+*data)/2; 808 (*(curline - 2) + *data) / 2;
803 *(nextline+1)=*data; 809 *(nextline + 1) = *data;
804 *(nextline-2)=*data; 810 *(nextline - 2) = *data;
805 } else { 811 } else {
806 *curline=*data; 812 *curline = *data;
807 *(curline-3)=*data; 813 *(curline - 3) = *data;
808 *nextline=*data; 814 *nextline = *data;
809 *(nextline-3)=*data; 815 *(nextline - 3) = *data;
810 } 816 }
811 } 817 }
812 frame->curlinepix++; 818 frame->curlinepix++;
813 curline-=3; 819 curline -= 3;
814 nextline-=3; 820 nextline -= 3;
815 len--; 821 len--;
816 data++; 822 data++;
817 frame->curpix++; 823 frame->curpix++;
818 } 824 }
819 frame->curline=curline; 825 frame->curline = curline;
820 826
821 if (frame->curpix>=datasize) { 827 if (frame->curpix >= datasize) {
822 /* Fix the top line */ 828 /* Fix the top line */
823 framedata+=linelength; 829 framedata += linelength;
824 for (i=0; i<linelength; i++) { 830 for (i = 0; i < linelength; i++) {
825 framedata--; 831 framedata--;
826 *framedata=*(framedata+linelength); 832 *framedata = *(framedata + linelength);
827 } 833 }
828 /* Fix the left side (green is already present) */ 834 /* Fix the left side (green is already present) */
829 for (i=0; i<se401->cheight; i++) { 835 for (i = 0; i < se401->cheight; i++) {
830 *framedata=*(framedata+3); 836 *framedata = *(framedata + 3);
831 *(framedata+1)=*(framedata+4); 837 *(framedata + 1) = *(framedata + 4);
832 *(framedata+2)=*(framedata+5); 838 *(framedata + 2) = *(framedata + 5);
833 framedata+=linelength; 839 framedata += linelength;
834 } 840 }
835 frame->curpix=0; 841 frame->curpix = 0;
836 frame->grabstate=FRAME_DONE; 842 frame->grabstate = FRAME_DONE;
837 se401->framecount++; 843 se401->framecount++;
838 se401->readcount++; 844 se401->readcount++;
839 if (se401->frame[(se401->curframe+1)&(SE401_NUMFRAMES-1)].grabstate==FRAME_READY) { 845 if (se401->frame[(se401->curframe + 1) &
840 se401->curframe=(se401->curframe+1) & (SE401_NUMFRAMES-1); 846 (SE401_NUMFRAMES - 1)].grabstate == FRAME_READY) {
847 se401->curframe = (se401->curframe+1) &
848 (SE401_NUMFRAMES-1);
841 } 849 }
842 } 850 }
843} 851}
@@ -845,72 +853,76 @@ static inline void decode_bayer (struct usb_se401 *se401, struct se401_scratch *
845static int se401_newframe(struct usb_se401 *se401, int framenr) 853static int se401_newframe(struct usb_se401 *se401, int framenr)
846{ 854{
847 DECLARE_WAITQUEUE(wait, current); 855 DECLARE_WAITQUEUE(wait, current);
848 int errors=0; 856 int errors = 0;
849 857
850 while (se401->streaming && 858 while (se401->streaming &&
851 (se401->frame[framenr].grabstate==FRAME_READY || 859 (se401->frame[framenr].grabstate == FRAME_READY ||
852 se401->frame[framenr].grabstate==FRAME_GRABBING) ) { 860 se401->frame[framenr].grabstate == FRAME_GRABBING)) {
853 if(!se401->frame[framenr].curpix) { 861 if (!se401->frame[framenr].curpix)
854 errors++; 862 errors++;
855 } 863
856 wait_interruptible( 864 wait_interruptible(
857 se401->scratch[se401->scratch_use].state!=BUFFER_READY, 865 se401->scratch[se401->scratch_use].state != BUFFER_READY,
858 &se401->wq, 866 &se401->wq, &wait);
859 &wait
860 );
861 if (se401->nullpackets > SE401_MAX_NULLPACKETS) { 867 if (se401->nullpackets > SE401_MAX_NULLPACKETS) {
862 se401->nullpackets=0; 868 se401->nullpackets = 0;
863 dev_info(&se401->dev->dev, 869 dev_info(&se401->dev->dev,
864 "too many null length packets, restarting capture\n"); 870 "too many null length packets, restarting capture\n");
865 se401_stop_stream(se401); 871 se401_stop_stream(se401);
866 se401_start_stream(se401); 872 se401_start_stream(se401);
867 } else { 873 } else {
868 if (se401->scratch[se401->scratch_use].state!=BUFFER_READY) { 874 if (se401->scratch[se401->scratch_use].state !=
869 se401->frame[framenr].grabstate=FRAME_ERROR; 875 BUFFER_READY) {
876 se401->frame[framenr].grabstate = FRAME_ERROR;
870 return -EIO; 877 return -EIO;
871 } 878 }
872 se401->scratch[se401->scratch_use].state=BUFFER_BUSY; 879 se401->scratch[se401->scratch_use].state = BUFFER_BUSY;
873 if (se401->format==FMT_JANGGU) { 880 if (se401->format == FMT_JANGGU)
874 decode_JangGu(se401, &se401->scratch[se401->scratch_use]); 881 decode_JangGu(se401,
875 } else { 882 &se401->scratch[se401->scratch_use]);
876 decode_bayer(se401, &se401->scratch[se401->scratch_use]); 883 else
877 } 884 decode_bayer(se401,
878 se401->scratch[se401->scratch_use].state=BUFFER_UNUSED; 885 &se401->scratch[se401->scratch_use]);
886
887 se401->scratch[se401->scratch_use].state =
888 BUFFER_UNUSED;
879 se401->scratch_use++; 889 se401->scratch_use++;
880 if (se401->scratch_use>=SE401_NUMSCRATCH) 890 if (se401->scratch_use >= SE401_NUMSCRATCH)
881 se401->scratch_use=0; 891 se401->scratch_use = 0;
882 if (errors > SE401_MAX_ERRORS) { 892 if (errors > SE401_MAX_ERRORS) {
883 errors=0; 893 errors = 0;
884 dev_info(&se401->dev->dev, 894 dev_info(&se401->dev->dev,
885 "too many errors, restarting capture\n"); 895 "too many errors, restarting capture\n");
886 se401_stop_stream(se401); 896 se401_stop_stream(se401);
887 se401_start_stream(se401); 897 se401_start_stream(se401);
888 } 898 }
889 } 899 }
890 } 900 }
891 901
892 if (se401->frame[framenr].grabstate==FRAME_DONE) 902 if (se401->frame[framenr].grabstate == FRAME_DONE)
893 if (se401->enhance) 903 if (se401->enhance)
894 enhance_picture(se401->frame[framenr].data, se401->cheight*se401->cwidth*3); 904 enhance_picture(se401->frame[framenr].data,
905 se401->cheight * se401->cwidth * 3);
895 return 0; 906 return 0;
896} 907}
897 908
898static void usb_se401_remove_disconnected (struct usb_se401 *se401) 909static void usb_se401_remove_disconnected(struct usb_se401 *se401)
899{ 910{
900 int i; 911 int i;
901 912
902 se401->dev = NULL; 913 se401->dev = NULL;
903 914
904 for (i=0; i<SE401_NUMSBUF; i++) 915 for (i = 0; i < SE401_NUMSBUF; i++)
905 if (se401->urb[i]) { 916 if (se401->urb[i]) {
906 usb_kill_urb(se401->urb[i]); 917 usb_kill_urb(se401->urb[i]);
907 usb_free_urb(se401->urb[i]); 918 usb_free_urb(se401->urb[i]);
908 se401->urb[i] = NULL; 919 se401->urb[i] = NULL;
909 kfree(se401->sbuf[i].data); 920 kfree(se401->sbuf[i].data);
910 } 921 }
911 for (i=0; i<SE401_NUMSCRATCH; i++) { 922
923 for (i = 0; i < SE401_NUMSCRATCH; i++)
912 kfree(se401->scratch[i].data); 924 kfree(se401->scratch[i].data);
913 } 925
914 if (se401->inturb) { 926 if (se401->inturb) {
915 usb_kill_urb(se401->inturb); 927 usb_kill_urb(se401->inturb);
916 usb_free_urb(se401->inturb); 928 usb_free_urb(se401->inturb);
@@ -965,11 +977,11 @@ static int se401_close(struct file *file)
965 dev_info(&se401->dev->dev, "device unregistered\n"); 977 dev_info(&se401->dev->dev, "device unregistered\n");
966 usb_se401_remove_disconnected(se401); 978 usb_se401_remove_disconnected(se401);
967 } else { 979 } else {
968 for (i=0; i<SE401_NUMFRAMES; i++) 980 for (i = 0; i < SE401_NUMFRAMES; i++)
969 se401->frame[i].grabstate=FRAME_UNUSED; 981 se401->frame[i].grabstate = FRAME_UNUSED;
970 if (se401->streaming) 982 if (se401->streaming)
971 se401_stop_stream(se401); 983 se401_stop_stream(se401);
972 se401->user=0; 984 se401->user = 0;
973 } 985 }
974 file->private_data = NULL; 986 file->private_data = NULL;
975 return 0; 987 return 0;
@@ -1065,7 +1077,7 @@ static long se401_do_ioctl(struct file *file, unsigned int cmd, void *arg)
1065 memset(vm, 0, sizeof(*vm)); 1077 memset(vm, 0, sizeof(*vm));
1066 vm->size = SE401_NUMFRAMES * se401->maxframesize; 1078 vm->size = SE401_NUMFRAMES * se401->maxframesize;
1067 vm->frames = SE401_NUMFRAMES; 1079 vm->frames = SE401_NUMFRAMES;
1068 for (i=0; i<SE401_NUMFRAMES; i++) 1080 for (i = 0; i < SE401_NUMFRAMES; i++)
1069 vm->offsets[i] = se401->maxframesize * i; 1081 vm->offsets[i] = se401->maxframesize * i;
1070 return 0; 1082 return 0;
1071 } 1083 }
@@ -1083,16 +1095,16 @@ static long se401_do_ioctl(struct file *file, unsigned int cmd, void *arg)
1083 /* Is this according to the v4l spec??? */ 1095 /* Is this according to the v4l spec??? */
1084 if (se401_set_size(se401, vm->width, vm->height)) 1096 if (se401_set_size(se401, vm->width, vm->height))
1085 return -EINVAL; 1097 return -EINVAL;
1086 se401->frame[vm->frame].grabstate=FRAME_READY; 1098 se401->frame[vm->frame].grabstate = FRAME_READY;
1087 1099
1088 if (!se401->streaming) 1100 if (!se401->streaming)
1089 se401_start_stream(se401); 1101 se401_start_stream(se401);
1090 1102
1091 /* Set the picture properties */ 1103 /* Set the picture properties */
1092 if (se401->framecount==0) 1104 if (se401->framecount == 0)
1093 se401_send_pict(se401); 1105 se401_send_pict(se401);
1094 /* Calibrate the reset level after a few frames. */ 1106 /* Calibrate the reset level after a few frames. */
1095 if (se401->framecount%20==1) 1107 if (se401->framecount % 20 == 1)
1096 se401_auto_resetlevel(se401); 1108 se401_auto_resetlevel(se401);
1097 1109
1098 return 0; 1110 return 0;
@@ -1100,13 +1112,13 @@ static long se401_do_ioctl(struct file *file, unsigned int cmd, void *arg)
1100 case VIDIOCSYNC: 1112 case VIDIOCSYNC:
1101 { 1113 {
1102 int *frame = arg; 1114 int *frame = arg;
1103 int ret=0; 1115 int ret = 0;
1104 1116
1105 if(*frame <0 || *frame >= SE401_NUMFRAMES) 1117 if (*frame < 0 || *frame >= SE401_NUMFRAMES)
1106 return -EINVAL; 1118 return -EINVAL;
1107 1119
1108 ret=se401_newframe(se401, *frame); 1120 ret = se401_newframe(se401, *frame);
1109 se401->frame[*frame].grabstate=FRAME_UNUSED; 1121 se401->frame[*frame].grabstate = FRAME_UNUSED;
1110 return ret; 1122 return ret;
1111 } 1123 }
1112 case VIDIOCGFBUF: 1124 case VIDIOCGFBUF:
@@ -1147,36 +1159,36 @@ static long se401_ioctl(struct file *file,
1147static ssize_t se401_read(struct file *file, char __user *buf, 1159static ssize_t se401_read(struct file *file, char __user *buf,
1148 size_t count, loff_t *ppos) 1160 size_t count, loff_t *ppos)
1149{ 1161{
1150 int realcount=count, ret=0; 1162 int realcount = count, ret = 0;
1151 struct video_device *dev = file->private_data; 1163 struct video_device *dev = file->private_data;
1152 struct usb_se401 *se401 = (struct usb_se401 *)dev; 1164 struct usb_se401 *se401 = (struct usb_se401 *)dev;
1153 1165
1154 1166
1155 if (se401->dev == NULL) 1167 if (se401->dev == NULL)
1156 return -EIO; 1168 return -EIO;
1157 if (realcount > se401->cwidth*se401->cheight*3) 1169 if (realcount > se401->cwidth*se401->cheight*3)
1158 realcount=se401->cwidth*se401->cheight*3; 1170 realcount = se401->cwidth*se401->cheight*3;
1159 1171
1160 /* Shouldn't happen: */ 1172 /* Shouldn't happen: */
1161 if (se401->frame[0].grabstate==FRAME_GRABBING) 1173 if (se401->frame[0].grabstate == FRAME_GRABBING)
1162 return -EBUSY; 1174 return -EBUSY;
1163 se401->frame[0].grabstate=FRAME_READY; 1175 se401->frame[0].grabstate = FRAME_READY;
1164 se401->frame[1].grabstate=FRAME_UNUSED; 1176 se401->frame[1].grabstate = FRAME_UNUSED;
1165 se401->curframe=0; 1177 se401->curframe = 0;
1166 1178
1167 if (!se401->streaming) 1179 if (!se401->streaming)
1168 se401_start_stream(se401); 1180 se401_start_stream(se401);
1169 1181
1170 /* Set the picture properties */ 1182 /* Set the picture properties */
1171 if (se401->framecount==0) 1183 if (se401->framecount == 0)
1172 se401_send_pict(se401); 1184 se401_send_pict(se401);
1173 /* Calibrate the reset level after a few frames. */ 1185 /* Calibrate the reset level after a few frames. */
1174 if (se401->framecount%20==1) 1186 if (se401->framecount%20 == 1)
1175 se401_auto_resetlevel(se401); 1187 se401_auto_resetlevel(se401);
1176 1188
1177 ret=se401_newframe(se401, 0); 1189 ret = se401_newframe(se401, 0);
1178 1190
1179 se401->frame[0].grabstate=FRAME_UNUSED; 1191 se401->frame[0].grabstate = FRAME_UNUSED;
1180 if (ret) 1192 if (ret)
1181 return ret; 1193 return ret;
1182 if (copy_to_user(buf, se401->frame[0].data, realcount)) 1194 if (copy_to_user(buf, se401->frame[0].data, realcount))
@@ -1195,11 +1207,12 @@ static int se401_mmap(struct file *file, struct vm_area_struct *vma)
1195 1207
1196 mutex_lock(&se401->lock); 1208 mutex_lock(&se401->lock);
1197 1209
1198 if (se401->dev == NULL) { 1210 if (se401->dev == NULL) {
1199 mutex_unlock(&se401->lock); 1211 mutex_unlock(&se401->lock);
1200 return -EIO; 1212 return -EIO;
1201 } 1213 }
1202 if (size > (((SE401_NUMFRAMES * se401->maxframesize) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))) { 1214 if (size > (((SE401_NUMFRAMES * se401->maxframesize) + PAGE_SIZE - 1)
1215 & ~(PAGE_SIZE - 1))) {
1203 mutex_unlock(&se401->lock); 1216 mutex_unlock(&se401->lock);
1204 return -EINVAL; 1217 return -EINVAL;
1205 } 1218 }
@@ -1210,10 +1223,10 @@ static int se401_mmap(struct file *file, struct vm_area_struct *vma)
1210 mutex_unlock(&se401->lock); 1223 mutex_unlock(&se401->lock);
1211 return -EAGAIN; 1224 return -EAGAIN;
1212 } 1225 }
1213 start += PAGE_SIZE; 1226 start += PAGE_SIZE;
1214 pos += PAGE_SIZE; 1227 pos += PAGE_SIZE;
1215 if (size > PAGE_SIZE) 1228 if (size > PAGE_SIZE)
1216 size -= PAGE_SIZE; 1229 size -= PAGE_SIZE;
1217 else 1230 else
1218 size = 0; 1231 size = 0;
1219 } 1232 }
@@ -1223,7 +1236,7 @@ static int se401_mmap(struct file *file, struct vm_area_struct *vma)
1223} 1236}
1224 1237
1225static const struct v4l2_file_operations se401_fops = { 1238static const struct v4l2_file_operations se401_fops = {
1226 .owner = THIS_MODULE, 1239 .owner = THIS_MODULE,
1227 .open = se401_open, 1240 .open = se401_open,
1228 .release = se401_close, 1241 .release = se401_close,
1229 .read = se401_read, 1242 .read = se401_read,
@@ -1241,71 +1254,76 @@ static struct video_device se401_template = {
1241/***************************/ 1254/***************************/
1242static int se401_init(struct usb_se401 *se401, int button) 1255static int se401_init(struct usb_se401 *se401, int button)
1243{ 1256{
1244 int i=0, rc; 1257 int i = 0, rc;
1245 unsigned char cp[0x40]; 1258 unsigned char cp[0x40];
1246 char temp[200]; 1259 char temp[200];
1260 int slen;
1247 1261
1248 /* led on */ 1262 /* led on */
1249 se401_sndctrl(1, se401, SE401_REQ_LED_CONTROL, 1, NULL, 0); 1263 se401_sndctrl(1, se401, SE401_REQ_LED_CONTROL, 1, NULL, 0);
1250 1264
1251 /* get camera descriptor */ 1265 /* get camera descriptor */
1252 rc=se401_sndctrl(0, se401, SE401_REQ_GET_CAMERA_DESCRIPTOR, 0, cp, sizeof(cp)); 1266 rc = se401_sndctrl(0, se401, SE401_REQ_GET_CAMERA_DESCRIPTOR, 0,
1253 if (cp[1]!=0x41) { 1267 cp, sizeof(cp));
1268 if (cp[1] != 0x41) {
1254 err("Wrong descriptor type"); 1269 err("Wrong descriptor type");
1255 return 1; 1270 return 1;
1256 } 1271 }
1257 sprintf (temp, "ExtraFeatures: %d", cp[3]); 1272 slen = snprintf(temp, 200, "ExtraFeatures: %d", cp[3]);
1258 1273
1259 se401->sizes=cp[4]+cp[5]*256; 1274 se401->sizes = cp[4] + cp[5] * 256;
1260 se401->width=kmalloc(se401->sizes*sizeof(int), GFP_KERNEL); 1275 se401->width = kmalloc(se401->sizes*sizeof(int), GFP_KERNEL);
1261 if (!se401->width) 1276 if (!se401->width)
1262 return 1; 1277 return 1;
1263 se401->height=kmalloc(se401->sizes*sizeof(int), GFP_KERNEL); 1278 se401->height = kmalloc(se401->sizes*sizeof(int), GFP_KERNEL);
1264 if (!se401->height) { 1279 if (!se401->height) {
1265 kfree(se401->width); 1280 kfree(se401->width);
1266 return 1; 1281 return 1;
1267 } 1282 }
1268 for (i=0; i<se401->sizes; i++) { 1283 for (i = 0; i < se401->sizes; i++) {
1269 se401->width[i]=cp[6+i*4+0]+cp[6+i*4+1]*256; 1284 se401->width[i] = cp[6 + i * 4 + 0] + cp[6 + i*4 + 1] * 256;
1270 se401->height[i]=cp[6+i*4+2]+cp[6+i*4+3]*256; 1285 se401->height[i] = cp[6 + i * 4 + 2] + cp[6 + i * 4 + 3] * 256;
1271 } 1286 }
1272 sprintf (temp, "%s Sizes:", temp); 1287 slen += snprintf(temp + slen, 200 - slen, " Sizes:");
1273 for (i=0; i<se401->sizes; i++) { 1288 for (i = 0; i < se401->sizes; i++) {
1274 sprintf(temp, "%s %dx%d", temp, se401->width[i], se401->height[i]); 1289 slen += snprintf(temp + slen, 200 - slen,
1290 " %dx%d", se401->width[i], se401->height[i]);
1275 } 1291 }
1276 dev_info(&se401->dev->dev, "%s\n", temp); 1292 dev_info(&se401->dev->dev, "%s\n", temp);
1277 se401->maxframesize=se401->width[se401->sizes-1]*se401->height[se401->sizes-1]*3; 1293 se401->maxframesize = se401->width[se401->sizes-1] *
1294 se401->height[se401->sizes - 1] * 3;
1278 1295
1279 rc=se401_sndctrl(0, se401, SE401_REQ_GET_WIDTH, 0, cp, sizeof(cp)); 1296 rc = se401_sndctrl(0, se401, SE401_REQ_GET_WIDTH, 0, cp, sizeof(cp));
1280 se401->cwidth=cp[0]+cp[1]*256; 1297 se401->cwidth = cp[0]+cp[1]*256;
1281 rc=se401_sndctrl(0, se401, SE401_REQ_GET_HEIGHT, 0, cp, sizeof(cp)); 1298 rc = se401_sndctrl(0, se401, SE401_REQ_GET_HEIGHT, 0, cp, sizeof(cp));
1282 se401->cheight=cp[0]+cp[1]*256; 1299 se401->cheight = cp[0]+cp[1]*256;
1283 1300
1284 if (!(cp[2] & SE401_FORMAT_BAYER)) { 1301 if (!(cp[2] & SE401_FORMAT_BAYER)) {
1285 err("Bayer format not supported!"); 1302 err("Bayer format not supported!");
1286 return 1; 1303 return 1;
1287 } 1304 }
1288 /* set output mode (BAYER) */ 1305 /* set output mode (BAYER) */
1289 se401_sndctrl(1, se401, SE401_REQ_SET_OUTPUT_MODE, SE401_FORMAT_BAYER, NULL, 0); 1306 se401_sndctrl(1, se401, SE401_REQ_SET_OUTPUT_MODE,
1307 SE401_FORMAT_BAYER, NULL, 0);
1290 1308
1291 rc=se401_sndctrl(0, se401, SE401_REQ_GET_BRT, 0, cp, sizeof(cp)); 1309 rc = se401_sndctrl(0, se401, SE401_REQ_GET_BRT, 0, cp, sizeof(cp));
1292 se401->brightness=cp[0]+cp[1]*256; 1310 se401->brightness = cp[0]+cp[1]*256;
1293 /* some default values */ 1311 /* some default values */
1294 se401->resetlevel=0x2d; 1312 se401->resetlevel = 0x2d;
1295 se401->rgain=0x20; 1313 se401->rgain = 0x20;
1296 se401->ggain=0x20; 1314 se401->ggain = 0x20;
1297 se401->bgain=0x20; 1315 se401->bgain = 0x20;
1298 se401_set_exposure(se401, 20000); 1316 se401_set_exposure(se401, 20000);
1299 se401->palette=VIDEO_PALETTE_RGB24; 1317 se401->palette = VIDEO_PALETTE_RGB24;
1300 se401->enhance=1; 1318 se401->enhance = 1;
1301 se401->dropped=0; 1319 se401->dropped = 0;
1302 se401->error=0; 1320 se401->error = 0;
1303 se401->framecount=0; 1321 se401->framecount = 0;
1304 se401->readcount=0; 1322 se401->readcount = 0;
1305 1323
1306 /* Start interrupt transfers for snapshot button */ 1324 /* Start interrupt transfers for snapshot button */
1307 if (button) { 1325 if (button) {
1308 se401->inturb=usb_alloc_urb(0, GFP_KERNEL); 1326 se401->inturb = usb_alloc_urb(0, GFP_KERNEL);
1309 if (!se401->inturb) { 1327 if (!se401->inturb) {
1310 dev_info(&se401->dev->dev, 1328 dev_info(&se401->dev->dev,
1311 "Allocation of inturb failed\n"); 1329 "Allocation of inturb failed\n");
@@ -1323,7 +1341,7 @@ static int se401_init(struct usb_se401 *se401, int button)
1323 return 1; 1341 return 1;
1324 } 1342 }
1325 } else 1343 } else
1326 se401->inturb=NULL; 1344 se401->inturb = NULL;
1327 1345
1328 /* Flash the led */ 1346 /* Flash the led */
1329 se401_sndctrl(1, se401, SE401_REQ_CAMERA_POWER, 1, NULL, 0); 1347 se401_sndctrl(1, se401, SE401_REQ_CAMERA_POWER, 1, NULL, 0);
@@ -1340,8 +1358,8 @@ static int se401_probe(struct usb_interface *intf,
1340 struct usb_device *dev = interface_to_usbdev(intf); 1358 struct usb_device *dev = interface_to_usbdev(intf);
1341 struct usb_interface_descriptor *interface; 1359 struct usb_interface_descriptor *interface;
1342 struct usb_se401 *se401; 1360 struct usb_se401 *se401;
1343 char *camera_name=NULL; 1361 char *camera_name = NULL;
1344 int button=1; 1362 int button = 1;
1345 1363
1346 /* We don't handle multi-config cameras */ 1364 /* We don't handle multi-config cameras */
1347 if (dev->descriptor.bNumConfigurations != 1) 1365 if (dev->descriptor.bNumConfigurations != 1)
@@ -1350,22 +1368,22 @@ static int se401_probe(struct usb_interface *intf,
1350 interface = &intf->cur_altsetting->desc; 1368 interface = &intf->cur_altsetting->desc;
1351 1369
1352 /* Is it an se401? */ 1370 /* Is it an se401? */
1353 if (le16_to_cpu(dev->descriptor.idVendor) == 0x03e8 && 1371 if (le16_to_cpu(dev->descriptor.idVendor) == 0x03e8 &&
1354 le16_to_cpu(dev->descriptor.idProduct) == 0x0004) { 1372 le16_to_cpu(dev->descriptor.idProduct) == 0x0004) {
1355 camera_name="Endpoints/Aox SE401"; 1373 camera_name = "Endpoints/Aox SE401";
1356 } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x0471 && 1374 } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x0471 &&
1357 le16_to_cpu(dev->descriptor.idProduct) == 0x030b) { 1375 le16_to_cpu(dev->descriptor.idProduct) == 0x030b) {
1358 camera_name="Philips PCVC665K"; 1376 camera_name = "Philips PCVC665K";
1359 } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x047d && 1377 } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x047d &&
1360 le16_to_cpu(dev->descriptor.idProduct) == 0x5001) { 1378 le16_to_cpu(dev->descriptor.idProduct) == 0x5001) {
1361 camera_name="Kensington VideoCAM 67014"; 1379 camera_name = "Kensington VideoCAM 67014";
1362 } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x047d && 1380 } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x047d &&
1363 le16_to_cpu(dev->descriptor.idProduct) == 0x5002) { 1381 le16_to_cpu(dev->descriptor.idProduct) == 0x5002) {
1364 camera_name="Kensington VideoCAM 6701(5/7)"; 1382 camera_name = "Kensington VideoCAM 6701(5/7)";
1365 } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x047d && 1383 } else if (le16_to_cpu(dev->descriptor.idVendor) == 0x047d &&
1366 le16_to_cpu(dev->descriptor.idProduct) == 0x5003) { 1384 le16_to_cpu(dev->descriptor.idProduct) == 0x5003) {
1367 camera_name="Kensington VideoCAM 67016"; 1385 camera_name = "Kensington VideoCAM 67016";
1368 button=0; 1386 button = 0;
1369 } else 1387 } else
1370 return -ENODEV; 1388 return -ENODEV;
1371 1389
@@ -1378,7 +1396,8 @@ static int se401_probe(struct usb_interface *intf,
1378 /* We found one */ 1396 /* We found one */
1379 dev_info(&intf->dev, "SE401 camera found: %s\n", camera_name); 1397 dev_info(&intf->dev, "SE401 camera found: %s\n", camera_name);
1380 1398
1381 if ((se401 = kzalloc(sizeof(*se401), GFP_KERNEL)) == NULL) { 1399 se401 = kzalloc(sizeof(*se401), GFP_KERNEL);
1400 if (se401 == NULL) {
1382 err("couldn't kmalloc se401 struct"); 1401 err("couldn't kmalloc se401 struct");
1383 return -ENOMEM; 1402 return -ENOMEM;
1384 } 1403 }
@@ -1396,12 +1415,14 @@ static int se401_probe(struct usb_interface *intf,
1396 } 1415 }
1397 1416
1398 memcpy(&se401->vdev, &se401_template, sizeof(se401_template)); 1417 memcpy(&se401->vdev, &se401_template, sizeof(se401_template));
1399 memcpy(se401->vdev.name, se401->camera_name, strlen(se401->camera_name)); 1418 memcpy(se401->vdev.name, se401->camera_name,
1419 strlen(se401->camera_name));
1400 init_waitqueue_head(&se401->wq); 1420 init_waitqueue_head(&se401->wq);
1401 mutex_init(&se401->lock); 1421 mutex_init(&se401->lock);
1402 wmb(); 1422 wmb();
1403 1423
1404 if (video_register_device(&se401->vdev, VFL_TYPE_GRABBER, video_nr) < 0) { 1424 if (video_register_device(&se401->vdev,
1425 VFL_TYPE_GRABBER, video_nr) < 0) {
1405 kfree(se401); 1426 kfree(se401);
1406 err("video_register_device failed"); 1427 err("video_register_device failed");
1407 return -EIO; 1428 return -EIO;
@@ -1409,20 +1430,20 @@ static int se401_probe(struct usb_interface *intf,
1409 dev_info(&intf->dev, "registered new video device: video%d\n", 1430 dev_info(&intf->dev, "registered new video device: video%d\n",
1410 se401->vdev.num); 1431 se401->vdev.num);
1411 1432
1412 usb_set_intfdata (intf, se401); 1433 usb_set_intfdata(intf, se401);
1413 return 0; 1434 return 0;
1414} 1435}
1415 1436
1416static void se401_disconnect(struct usb_interface *intf) 1437static void se401_disconnect(struct usb_interface *intf)
1417{ 1438{
1418 struct usb_se401 *se401 = usb_get_intfdata (intf); 1439 struct usb_se401 *se401 = usb_get_intfdata(intf);
1419 1440
1420 usb_set_intfdata (intf, NULL); 1441 usb_set_intfdata(intf, NULL);
1421 if (se401) { 1442 if (se401) {
1422 video_unregister_device(&se401->vdev); 1443 video_unregister_device(&se401->vdev);
1423 if (!se401->user){ 1444 if (!se401->user)
1424 usb_se401_remove_disconnected(se401); 1445 usb_se401_remove_disconnected(se401);
1425 } else { 1446 else {
1426 se401->frame[0].grabstate = FRAME_ERROR; 1447 se401->frame[0].grabstate = FRAME_ERROR;
1427 se401->frame[0].grabstate = FRAME_ERROR; 1448 se401->frame[0].grabstate = FRAME_ERROR;
1428 1449
@@ -1435,10 +1456,10 @@ static void se401_disconnect(struct usb_interface *intf)
1435} 1456}
1436 1457
1437static struct usb_driver se401_driver = { 1458static struct usb_driver se401_driver = {
1438 .name = "se401", 1459 .name = "se401",
1439 .id_table = device_table, 1460 .id_table = device_table,
1440 .probe = se401_probe, 1461 .probe = se401_probe,
1441 .disconnect = se401_disconnect, 1462 .disconnect = se401_disconnect,
1442}; 1463};
1443 1464
1444 1465
@@ -1451,9 +1472,10 @@ static struct usb_driver se401_driver = {
1451 1472
1452static int __init usb_se401_init(void) 1473static int __init usb_se401_init(void)
1453{ 1474{
1454 printk(KERN_INFO "SE401 usb camera driver version %s registering\n", version); 1475 printk(KERN_INFO "SE401 usb camera driver version %s registering\n",
1476 version);
1455 if (flickerless) 1477 if (flickerless)
1456 if (flickerless!=50 && flickerless!=60) { 1478 if (flickerless != 50 && flickerless != 60) {
1457 printk(KERN_ERR "Invallid flickerless value, use 0, 50 or 60.\n"); 1479 printk(KERN_ERR "Invallid flickerless value, use 0, 50 or 60.\n");
1458 return -1; 1480 return -1;
1459 } 1481 }
diff --git a/drivers/media/video/se401.h b/drivers/media/video/se401.h
index 2ce685db5d8b..bf7d2e9765b0 100644
--- a/drivers/media/video/se401.h
+++ b/drivers/media/video/se401.h
@@ -2,7 +2,7 @@
2#ifndef __LINUX_se401_H 2#ifndef __LINUX_se401_H
3#define __LINUX_se401_H 3#define __LINUX_se401_H
4 4
5#include <asm/uaccess.h> 5#include <linux/uaccess.h>
6#include <linux/videodev.h> 6#include <linux/videodev.h>
7#include <media/v4l2-common.h> 7#include <media/v4l2-common.h>
8#include <media/v4l2-ioctl.h> 8#include <media/v4l2-ioctl.h>
@@ -12,9 +12,10 @@
12 12
13#ifdef se401_DEBUG 13#ifdef se401_DEBUG
14# define PDEBUG(level, fmt, args...) \ 14# define PDEBUG(level, fmt, args...) \
15if (debug >= level) info("[" __PRETTY_FUNCTION__ ":%d] " fmt, __LINE__ , ## args) 15if (debug >= level) \
16 info("[" __PRETTY_FUNCTION__ ":%d] " fmt, __LINE__ , ## args)
16#else 17#else
17# define PDEBUG(level, fmt, args...) do {} while(0) 18# define PDEBUG(level, fmt, args...) do {} while (0)
18#endif 19#endif
19 20
20/* An almost drop-in replacement for sleep_on_interruptible */ 21/* An almost drop-in replacement for sleep_on_interruptible */
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index b5e37a530c62..d369e8409ab8 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -81,7 +81,6 @@ struct sh_mobile_ceu_buffer {
81}; 81};
82 82
83struct sh_mobile_ceu_dev { 83struct sh_mobile_ceu_dev {
84 struct device *dev;
85 struct soc_camera_host ici; 84 struct soc_camera_host ici;
86 struct soc_camera_device *icd; 85 struct soc_camera_device *icd;
87 86
@@ -617,7 +616,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, int idx,
617 xlate->cam_fmt = icd->formats + idx; 616 xlate->cam_fmt = icd->formats + idx;
618 xlate->buswidth = icd->formats[idx].depth; 617 xlate->buswidth = icd->formats[idx].depth;
619 xlate++; 618 xlate++;
620 dev_dbg(&ici->dev, "Providing format %s using %s\n", 619 dev_dbg(ici->dev, "Providing format %s using %s\n",
621 sh_mobile_ceu_formats[k].name, 620 sh_mobile_ceu_formats[k].name,
622 icd->formats[idx].name); 621 icd->formats[idx].name);
623 } 622 }
@@ -630,7 +629,7 @@ add_single_format:
630 xlate->cam_fmt = icd->formats + idx; 629 xlate->cam_fmt = icd->formats + idx;
631 xlate->buswidth = icd->formats[idx].depth; 630 xlate->buswidth = icd->formats[idx].depth;
632 xlate++; 631 xlate++;
633 dev_dbg(&ici->dev, 632 dev_dbg(ici->dev,
634 "Providing format %s in pass-through mode\n", 633 "Providing format %s in pass-through mode\n",
635 icd->formats[idx].name); 634 icd->formats[idx].name);
636 } 635 }
@@ -657,7 +656,7 @@ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
657 656
658 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 657 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
659 if (!xlate) { 658 if (!xlate) {
660 dev_warn(&ici->dev, "Format %x not found\n", pixfmt); 659 dev_warn(ici->dev, "Format %x not found\n", pixfmt);
661 return -EINVAL; 660 return -EINVAL;
662 } 661 }
663 662
@@ -684,7 +683,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
684 683
685 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); 684 xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
686 if (!xlate) { 685 if (!xlate) {
687 dev_warn(&ici->dev, "Format %x not found\n", pixfmt); 686 dev_warn(ici->dev, "Format %x not found\n", pixfmt);
688 return -EINVAL; 687 return -EINVAL;
689 } 688 }
690 689
@@ -782,7 +781,7 @@ static void sh_mobile_ceu_init_videobuf(struct videobuf_queue *q,
782 781
783 videobuf_queue_dma_contig_init(q, 782 videobuf_queue_dma_contig_init(q,
784 &sh_mobile_ceu_videobuf_ops, 783 &sh_mobile_ceu_videobuf_ops,
785 &ici->dev, &pcdev->lock, 784 ici->dev, &pcdev->lock,
786 V4L2_BUF_TYPE_VIDEO_CAPTURE, 785 V4L2_BUF_TYPE_VIDEO_CAPTURE,
787 pcdev->is_interlaced ? 786 pcdev->is_interlaced ?
788 V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE, 787 V4L2_FIELD_INTERLACED : V4L2_FIELD_NONE,
@@ -829,7 +828,6 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
829 goto exit; 828 goto exit;
830 } 829 }
831 830
832 platform_set_drvdata(pdev, pcdev);
833 INIT_LIST_HEAD(&pcdev->capture); 831 INIT_LIST_HEAD(&pcdev->capture);
834 spin_lock_init(&pcdev->lock); 832 spin_lock_init(&pcdev->lock);
835 833
@@ -840,7 +838,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
840 goto exit_kfree; 838 goto exit_kfree;
841 } 839 }
842 840
843 base = ioremap_nocache(res->start, res->end - res->start + 1); 841 base = ioremap_nocache(res->start, resource_size(res));
844 if (!base) { 842 if (!base) {
845 err = -ENXIO; 843 err = -ENXIO;
846 dev_err(&pdev->dev, "Unable to ioremap CEU registers.\n"); 844 dev_err(&pdev->dev, "Unable to ioremap CEU registers.\n");
@@ -850,13 +848,12 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
850 pcdev->irq = irq; 848 pcdev->irq = irq;
851 pcdev->base = base; 849 pcdev->base = base;
852 pcdev->video_limit = 0; /* only enabled if second resource exists */ 850 pcdev->video_limit = 0; /* only enabled if second resource exists */
853 pcdev->dev = &pdev->dev;
854 851
855 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 852 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
856 if (res) { 853 if (res) {
857 err = dma_declare_coherent_memory(&pdev->dev, res->start, 854 err = dma_declare_coherent_memory(&pdev->dev, res->start,
858 res->start, 855 res->start,
859 (res->end - res->start) + 1, 856 resource_size(res),
860 DMA_MEMORY_MAP | 857 DMA_MEMORY_MAP |
861 DMA_MEMORY_EXCLUSIVE); 858 DMA_MEMORY_EXCLUSIVE);
862 if (!err) { 859 if (!err) {
@@ -865,7 +862,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
865 goto exit_iounmap; 862 goto exit_iounmap;
866 } 863 }
867 864
868 pcdev->video_limit = (res->end - res->start) + 1; 865 pcdev->video_limit = resource_size(res);
869 } 866 }
870 867
871 /* request irq */ 868 /* request irq */
@@ -885,7 +882,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
885 } 882 }
886 883
887 pcdev->ici.priv = pcdev; 884 pcdev->ici.priv = pcdev;
888 pcdev->ici.dev.parent = &pdev->dev; 885 pcdev->ici.dev = &pdev->dev;
889 pcdev->ici.nr = pdev->id; 886 pcdev->ici.nr = pdev->id;
890 pcdev->ici.drv_name = dev_name(&pdev->dev); 887 pcdev->ici.drv_name = dev_name(&pdev->dev);
891 pcdev->ici.ops = &sh_mobile_ceu_host_ops; 888 pcdev->ici.ops = &sh_mobile_ceu_host_ops;
@@ -913,9 +910,11 @@ exit:
913 910
914static int sh_mobile_ceu_remove(struct platform_device *pdev) 911static int sh_mobile_ceu_remove(struct platform_device *pdev)
915{ 912{
916 struct sh_mobile_ceu_dev *pcdev = platform_get_drvdata(pdev); 913 struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
914 struct sh_mobile_ceu_dev *pcdev = container_of(soc_host,
915 struct sh_mobile_ceu_dev, ici);
917 916
918 soc_camera_host_unregister(&pcdev->ici); 917 soc_camera_host_unregister(soc_host);
919 clk_put(pcdev->clk); 918 clk_put(pcdev->clk);
920 free_irq(pcdev->irq, pcdev); 919 free_irq(pcdev->irq, pcdev);
921 if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) 920 if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 0e890cc23377..16f595d4337a 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -16,19 +16,21 @@
16 * published by the Free Software Foundation. 16 * published by the Free Software Foundation.
17 */ 17 */
18 18
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/device.h> 19#include <linux/device.h>
22#include <linux/list.h>
23#include <linux/err.h> 20#include <linux/err.h>
21#include <linux/i2c.h>
22#include <linux/init.h>
23#include <linux/list.h>
24#include <linux/module.h>
24#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/platform_device.h>
25#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
26 28
29#include <media/soc_camera.h>
27#include <media/v4l2-common.h> 30#include <media/v4l2-common.h>
28#include <media/v4l2-ioctl.h>
29#include <media/v4l2-dev.h> 31#include <media/v4l2-dev.h>
32#include <media/v4l2-ioctl.h>
30#include <media/videobuf-core.h> 33#include <media/videobuf-core.h>
31#include <media/soc_camera.h>
32 34
33/* Default to VGA resolution */ 35/* Default to VGA resolution */
34#define DEFAULT_WIDTH 640 36#define DEFAULT_WIDTH 640
@@ -279,7 +281,7 @@ static int soc_camera_set_fmt(struct soc_camera_file *icf,
279 return ret; 281 return ret;
280 } else if (!icd->current_fmt || 282 } else if (!icd->current_fmt ||
281 icd->current_fmt->fourcc != pix->pixelformat) { 283 icd->current_fmt->fourcc != pix->pixelformat) {
282 dev_err(&ici->dev, 284 dev_err(ici->dev,
283 "Host driver hasn't set up current format correctly!\n"); 285 "Host driver hasn't set up current format correctly!\n");
284 return -EINVAL; 286 return -EINVAL;
285 } 287 }
@@ -794,7 +796,7 @@ static void scan_add_host(struct soc_camera_host *ici)
794 796
795 list_for_each_entry(icd, &devices, list) { 797 list_for_each_entry(icd, &devices, list) {
796 if (icd->iface == ici->nr) { 798 if (icd->iface == ici->nr) {
797 icd->dev.parent = &ici->dev; 799 icd->dev.parent = ici->dev;
798 device_register_link(icd); 800 device_register_link(icd);
799 } 801 }
800 } 802 }
@@ -818,7 +820,7 @@ static int scan_add_device(struct soc_camera_device *icd)
818 list_for_each_entry(ici, &hosts, list) { 820 list_for_each_entry(ici, &hosts, list) {
819 if (icd->iface == ici->nr) { 821 if (icd->iface == ici->nr) {
820 ret = 1; 822 ret = 1;
821 icd->dev.parent = &ici->dev; 823 icd->dev.parent = ici->dev;
822 break; 824 break;
823 } 825 }
824 } 826 }
@@ -952,7 +954,6 @@ static void dummy_release(struct device *dev)
952 954
953int soc_camera_host_register(struct soc_camera_host *ici) 955int soc_camera_host_register(struct soc_camera_host *ici)
954{ 956{
955 int ret;
956 struct soc_camera_host *ix; 957 struct soc_camera_host *ix;
957 958
958 if (!ici || !ici->ops || 959 if (!ici || !ici->ops ||
@@ -965,12 +966,10 @@ int soc_camera_host_register(struct soc_camera_host *ici)
965 !ici->ops->reqbufs || 966 !ici->ops->reqbufs ||
966 !ici->ops->add || 967 !ici->ops->add ||
967 !ici->ops->remove || 968 !ici->ops->remove ||
968 !ici->ops->poll) 969 !ici->ops->poll ||
970 !ici->dev)
969 return -EINVAL; 971 return -EINVAL;
970 972
971 /* Number might be equal to the platform device ID */
972 dev_set_name(&ici->dev, "camera_host%d", ici->nr);
973
974 mutex_lock(&list_lock); 973 mutex_lock(&list_lock);
975 list_for_each_entry(ix, &hosts, list) { 974 list_for_each_entry(ix, &hosts, list) {
976 if (ix->nr == ici->nr) { 975 if (ix->nr == ici->nr) {
@@ -979,26 +978,14 @@ int soc_camera_host_register(struct soc_camera_host *ici)
979 } 978 }
980 } 979 }
981 980
981 dev_set_drvdata(ici->dev, ici);
982
982 list_add_tail(&ici->list, &hosts); 983 list_add_tail(&ici->list, &hosts);
983 mutex_unlock(&list_lock); 984 mutex_unlock(&list_lock);
984 985
985 ici->dev.release = dummy_release;
986
987 ret = device_register(&ici->dev);
988
989 if (ret)
990 goto edevr;
991
992 scan_add_host(ici); 986 scan_add_host(ici);
993 987
994 return 0; 988 return 0;
995
996edevr:
997 mutex_lock(&list_lock);
998 list_del(&ici->list);
999 mutex_unlock(&list_lock);
1000
1001 return ret;
1002} 989}
1003EXPORT_SYMBOL(soc_camera_host_register); 990EXPORT_SYMBOL(soc_camera_host_register);
1004 991
@@ -1012,7 +999,7 @@ void soc_camera_host_unregister(struct soc_camera_host *ici)
1012 list_del(&ici->list); 999 list_del(&ici->list);
1013 1000
1014 list_for_each_entry(icd, &devices, list) { 1001 list_for_each_entry(icd, &devices, list) {
1015 if (icd->dev.parent == &ici->dev) { 1002 if (icd->dev.parent == ici->dev) {
1016 device_unregister(&icd->dev); 1003 device_unregister(&icd->dev);
1017 /* Not before device_unregister(), .remove 1004 /* Not before device_unregister(), .remove
1018 * needs parent to call ici->ops->remove() */ 1005 * needs parent to call ici->ops->remove() */
@@ -1023,7 +1010,7 @@ void soc_camera_host_unregister(struct soc_camera_host *ici)
1023 1010
1024 mutex_unlock(&list_lock); 1011 mutex_unlock(&list_lock);
1025 1012
1026 device_unregister(&ici->dev); 1013 dev_set_drvdata(ici->dev, NULL);
1027} 1014}
1028EXPORT_SYMBOL(soc_camera_host_unregister); 1015EXPORT_SYMBOL(soc_camera_host_unregister);
1029 1016
@@ -1130,7 +1117,7 @@ int soc_camera_video_start(struct soc_camera_device *icd)
1130 vdev = video_device_alloc(); 1117 vdev = video_device_alloc();
1131 if (!vdev) 1118 if (!vdev)
1132 goto evidallocd; 1119 goto evidallocd;
1133 dev_dbg(&ici->dev, "Allocated video_device %p\n", vdev); 1120 dev_dbg(ici->dev, "Allocated video_device %p\n", vdev);
1134 1121
1135 strlcpy(vdev->name, ici->drv_name, sizeof(vdev->name)); 1122 strlcpy(vdev->name, ici->drv_name, sizeof(vdev->name));
1136 1123
@@ -1174,6 +1161,57 @@ void soc_camera_video_stop(struct soc_camera_device *icd)
1174} 1161}
1175EXPORT_SYMBOL(soc_camera_video_stop); 1162EXPORT_SYMBOL(soc_camera_video_stop);
1176 1163
1164static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
1165{
1166 struct soc_camera_link *icl = pdev->dev.platform_data;
1167 struct i2c_adapter *adap;
1168 struct i2c_client *client;
1169
1170 if (!icl)
1171 return -EINVAL;
1172
1173 adap = i2c_get_adapter(icl->i2c_adapter_id);
1174 if (!adap) {
1175 dev_warn(&pdev->dev, "Cannot get adapter #%d. No driver?\n",
1176 icl->i2c_adapter_id);
1177 /* -ENODEV and -ENXIO do not produce an error on probe()... */
1178 return -ENOENT;
1179 }
1180
1181 icl->board_info->platform_data = icl;
1182 client = i2c_new_device(adap, icl->board_info);
1183 if (!client) {
1184 i2c_put_adapter(adap);
1185 return -ENOMEM;
1186 }
1187
1188 platform_set_drvdata(pdev, client);
1189
1190 return 0;
1191}
1192
1193static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev)
1194{
1195 struct i2c_client *client = platform_get_drvdata(pdev);
1196
1197 if (!client)
1198 return -ENODEV;
1199
1200 i2c_unregister_device(client);
1201 i2c_put_adapter(client->adapter);
1202
1203 return 0;
1204}
1205
1206static struct platform_driver __refdata soc_camera_pdrv = {
1207 .probe = soc_camera_pdrv_probe,
1208 .remove = __devexit_p(soc_camera_pdrv_remove),
1209 .driver = {
1210 .name = "soc-camera-pdrv",
1211 .owner = THIS_MODULE,
1212 },
1213};
1214
1177static int __init soc_camera_init(void) 1215static int __init soc_camera_init(void)
1178{ 1216{
1179 int ret = bus_register(&soc_camera_bus_type); 1217 int ret = bus_register(&soc_camera_bus_type);
@@ -1183,8 +1221,14 @@ static int __init soc_camera_init(void)
1183 if (ret) 1221 if (ret)
1184 goto edrvr; 1222 goto edrvr;
1185 1223
1224 ret = platform_driver_register(&soc_camera_pdrv);
1225 if (ret)
1226 goto epdr;
1227
1186 return 0; 1228 return 0;
1187 1229
1230epdr:
1231 driver_unregister(&ic_drv);
1188edrvr: 1232edrvr:
1189 bus_unregister(&soc_camera_bus_type); 1233 bus_unregister(&soc_camera_bus_type);
1190 return ret; 1234 return ret;
@@ -1192,6 +1236,7 @@ edrvr:
1192 1236
1193static void __exit soc_camera_exit(void) 1237static void __exit soc_camera_exit(void)
1194{ 1238{
1239 platform_driver_unregister(&soc_camera_pdrv);
1195 driver_unregister(&ic_drv); 1240 driver_unregister(&ic_drv);
1196 bus_unregister(&soc_camera_bus_type); 1241 bus_unregister(&soc_camera_bus_type);
1197} 1242}
@@ -1202,3 +1247,4 @@ module_exit(soc_camera_exit);
1202MODULE_DESCRIPTION("Image capture bus driver"); 1247MODULE_DESCRIPTION("Image capture bus driver");
1203MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>"); 1248MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
1204MODULE_LICENSE("GPL"); 1249MODULE_LICENSE("GPL");
1250MODULE_ALIAS("platform:soc-camera-pdrv");
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index 1a6d39cbd6f3..2e5937047278 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -1137,7 +1137,7 @@ static int stk_vidioc_querybuf(struct file *filp,
1137 struct stk_camera *dev = priv; 1137 struct stk_camera *dev = priv;
1138 struct stk_sio_buffer *sbuf; 1138 struct stk_sio_buffer *sbuf;
1139 1139
1140 if (buf->index < 0 || buf->index >= dev->n_sbufs) 1140 if (buf->index >= dev->n_sbufs)
1141 return -EINVAL; 1141 return -EINVAL;
1142 sbuf = dev->sio_bufs + buf->index; 1142 sbuf = dev->sio_bufs + buf->index;
1143 *buf = sbuf->v4lbuf; 1143 *buf = sbuf->v4lbuf;
@@ -1154,7 +1154,7 @@ static int stk_vidioc_qbuf(struct file *filp,
1154 if (buf->memory != V4L2_MEMORY_MMAP) 1154 if (buf->memory != V4L2_MEMORY_MMAP)
1155 return -EINVAL; 1155 return -EINVAL;
1156 1156
1157 if (buf->index < 0 || buf->index >= dev->n_sbufs) 1157 if (buf->index >= dev->n_sbufs)
1158 return -EINVAL; 1158 return -EINVAL;
1159 sbuf = dev->sio_bufs + buf->index; 1159 sbuf = dev->sio_bufs + buf->index;
1160 if (sbuf->v4lbuf.flags & V4L2_BUF_FLAG_QUEUED) 1160 if (sbuf->v4lbuf.flags & V4L2_BUF_FLAG_QUEUED)
diff --git a/drivers/media/video/tda7432.c b/drivers/media/video/tda7432.c
index 005f8a468031..80f1cee23fa5 100644
--- a/drivers/media/video/tda7432.c
+++ b/drivers/media/video/tda7432.c
@@ -20,20 +20,6 @@
20 * loudness - set between 0 and 15 for varying degrees of loudness effect 20 * loudness - set between 0 and 15 for varying degrees of loudness effect
21 * 21 *
22 * maxvol - set maximium volume to +20db (1), default is 0db(0) 22 * maxvol - set maximium volume to +20db (1), default is 0db(0)
23 *
24 *
25 * Revision: 0.7 - maxvol module parm to set maximium volume 0db or +20db
26 * store if muted so we can return it
27 * change balance only if flaged to
28 * Revision: 0.6 - added tone controls
29 * Revision: 0.5 - Fixed odd balance problem
30 * Revision: 0.4 - added muting
31 * Revision: 0.3 - Fixed silly reversed volume controls. :)
32 * Revision: 0.2 - Cleaned up #defines
33 * fixed volume control
34 * Added I2C_DRIVERID_TDA7432
35 * added loudness insmod control
36 * Revision: 0.1 - initial version
37 */ 23 */
38 24
39#include <linux/module.h> 25#include <linux/module.h>
diff --git a/drivers/media/video/tea6415c.c b/drivers/media/video/tea6415c.c
index d4a9ed45764b..1585839bd0bd 100644
--- a/drivers/media/video/tea6415c.c
+++ b/drivers/media/video/tea6415c.c
@@ -141,7 +141,6 @@ static const struct v4l2_subdev_ops tea6415c_ops = {
141 .video = &tea6415c_video_ops, 141 .video = &tea6415c_video_ops,
142}; 142};
143 143
144/* this function is called by i2c_probe */
145static int tea6415c_probe(struct i2c_client *client, 144static int tea6415c_probe(struct i2c_client *client,
146 const struct i2c_device_id *id) 145 const struct i2c_device_id *id)
147{ 146{
diff --git a/drivers/media/video/tea6420.c b/drivers/media/video/tea6420.c
index ced6eadf347a..0446524d3543 100644
--- a/drivers/media/video/tea6420.c
+++ b/drivers/media/video/tea6420.c
@@ -112,7 +112,6 @@ static const struct v4l2_subdev_ops tea6420_ops = {
112 .audio = &tea6420_audio_ops, 112 .audio = &tea6420_audio_ops,
113}; 113};
114 114
115/* this function is called by i2c_probe */
116static int tea6420_probe(struct i2c_client *client, 115static int tea6420_probe(struct i2c_client *client,
117 const struct i2c_device_id *id) 116 const struct i2c_device_id *id)
118{ 117{
diff --git a/drivers/media/video/ths7303.c b/drivers/media/video/ths7303.c
new file mode 100644
index 000000000000..21781f8a0e8e
--- /dev/null
+++ b/drivers/media/video/ths7303.c
@@ -0,0 +1,151 @@
1/*
2 * ths7303- THS7303 Video Amplifier driver
3 *
4 * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed .as is. WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/ctype.h>
19#include <linux/i2c.h>
20#include <linux/device.h>
21#include <linux/delay.h>
22#include <linux/module.h>
23#include <linux/uaccess.h>
24#include <linux/videodev2.h>
25
26#include <media/v4l2-device.h>
27#include <media/v4l2-subdev.h>
28#include <media/v4l2-chip-ident.h>
29
30MODULE_DESCRIPTION("TI THS7303 video amplifier driver");
31MODULE_AUTHOR("Chaithrika U S");
32MODULE_LICENSE("GPL");
33
34static int debug;
35module_param(debug, int, 0644);
36MODULE_PARM_DESC(debug, "Debug level 0-1");
37
38/* following function is used to set ths7303 */
39static int ths7303_setvalue(struct v4l2_subdev *sd, v4l2_std_id std)
40{
41 int err = 0;
42 u8 val;
43 struct i2c_client *client;
44
45 client = v4l2_get_subdevdata(sd);
46
47 if (std & (V4L2_STD_ALL & ~V4L2_STD_SECAM)) {
48 val = 0x02;
49 v4l2_dbg(1, debug, sd, "setting value for SDTV format\n");
50 } else {
51 val = 0x00;
52 v4l2_dbg(1, debug, sd, "disabling all channels\n");
53 }
54
55 err |= i2c_smbus_write_byte_data(client, 0x01, val);
56 err |= i2c_smbus_write_byte_data(client, 0x02, val);
57 err |= i2c_smbus_write_byte_data(client, 0x03, val);
58
59 if (err)
60 v4l2_err(sd, "write failed\n");
61
62 return err;
63}
64
65static int ths7303_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm)
66{
67 return ths7303_setvalue(sd, norm);
68}
69
70static int ths7303_g_chip_ident(struct v4l2_subdev *sd,
71 struct v4l2_dbg_chip_ident *chip)
72{
73 struct i2c_client *client = v4l2_get_subdevdata(sd);
74
75 return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_THS7303, 0);
76}
77
78static const struct v4l2_subdev_video_ops ths7303_video_ops = {
79 .s_std_output = ths7303_s_std_output,
80};
81
82static const struct v4l2_subdev_core_ops ths7303_core_ops = {
83 .g_chip_ident = ths7303_g_chip_ident,
84};
85
86static const struct v4l2_subdev_ops ths7303_ops = {
87 .core = &ths7303_core_ops,
88 .video = &ths7303_video_ops,
89};
90
91static int ths7303_probe(struct i2c_client *client,
92 const struct i2c_device_id *id)
93{
94 struct v4l2_subdev *sd;
95 v4l2_std_id std_id = V4L2_STD_NTSC;
96
97 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
98 return -ENODEV;
99
100 v4l_info(client, "chip found @ 0x%x (%s)\n",
101 client->addr << 1, client->adapter->name);
102
103 sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
104 if (sd == NULL)
105 return -ENOMEM;
106
107 v4l2_i2c_subdev_init(sd, client, &ths7303_ops);
108
109 return ths7303_setvalue(sd, std_id);
110}
111
112static int ths7303_remove(struct i2c_client *client)
113{
114 struct v4l2_subdev *sd = i2c_get_clientdata(client);
115
116 v4l2_device_unregister_subdev(sd);
117 kfree(sd);
118
119 return 0;
120}
121
122static const struct i2c_device_id ths7303_id[] = {
123 {"ths7303", 0},
124 {},
125};
126
127MODULE_DEVICE_TABLE(i2c, ths7303_id);
128
129static struct i2c_driver ths7303_driver = {
130 .driver = {
131 .owner = THIS_MODULE,
132 .name = "ths7303",
133 },
134 .probe = ths7303_probe,
135 .remove = ths7303_remove,
136 .id_table = ths7303_id,
137};
138
139static int __init ths7303_init(void)
140{
141 return i2c_add_driver(&ths7303_driver);
142}
143
144static void __exit ths7303_exit(void)
145{
146 i2c_del_driver(&ths7303_driver);
147}
148
149module_init(ths7303_init);
150module_exit(ths7303_exit);
151
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 78c377a399cb..537594211a90 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -309,32 +309,6 @@ static void set_freq(struct i2c_client *c, unsigned long freq)
309 } 309 }
310} 310}
311 311
312static void tuner_i2c_address_check(struct tuner *t)
313{
314 if ((t->type == UNSET || t->type == TUNER_ABSENT) ||
315 ((t->i2c->addr < 0x64) || (t->i2c->addr > 0x6f)))
316 return;
317
318 /* We already know that the XC5000 can only be located at
319 * i2c address 0x61, 0x62, 0x63 or 0x64 */
320 if ((t->type == TUNER_XC5000) &&
321 ((t->i2c->addr <= 0x64)) && (t->i2c->addr >= 0x61))
322 return;
323
324 tuner_warn("====================== WARNING! ======================\n");
325 tuner_warn("Support for tuners in i2c address range 0x64 thru 0x6f\n");
326 tuner_warn("will soon be dropped. This message indicates that your\n");
327 tuner_warn("hardware has a %s tuner at i2c address 0x%02x.\n",
328 t->name, t->i2c->addr);
329 tuner_warn("To ensure continued support for your device, please\n");
330 tuner_warn("send a copy of this message, along with full dmesg\n");
331 tuner_warn("output to v4l-dvb-maintainer@linuxtv.org\n");
332 tuner_warn("Please use subject line: \"obsolete tuner i2c address.\"\n");
333 tuner_warn("driver: %s, addr: 0x%02x, type: %d (%s)\n",
334 t->i2c->adapter->name, t->i2c->addr, t->type, t->name);
335 tuner_warn("====================== WARNING! ======================\n");
336}
337
338static struct xc5000_config xc5000_cfg; 312static struct xc5000_config xc5000_cfg;
339 313
340static void set_type(struct i2c_client *c, unsigned int type, 314static void set_type(struct i2c_client *c, unsigned int type,
@@ -438,18 +412,12 @@ static void set_type(struct i2c_client *c, unsigned int type,
438 break; 412 break;
439 case TUNER_XC5000: 413 case TUNER_XC5000:
440 { 414 {
441 struct dvb_tuner_ops *xc_tuner_ops;
442
443 xc5000_cfg.i2c_address = t->i2c->addr; 415 xc5000_cfg.i2c_address = t->i2c->addr;
444 /* if_khz will be set when the digital dvb_attach() occurs */ 416 /* if_khz will be set when the digital dvb_attach() occurs */
445 xc5000_cfg.if_khz = 0; 417 xc5000_cfg.if_khz = 0;
446 if (!dvb_attach(xc5000_attach, 418 if (!dvb_attach(xc5000_attach,
447 &t->fe, t->i2c->adapter, &xc5000_cfg)) 419 &t->fe, t->i2c->adapter, &xc5000_cfg))
448 goto attach_failed; 420 goto attach_failed;
449
450 xc_tuner_ops = &t->fe.ops.tuner_ops;
451 if (xc_tuner_ops->init)
452 xc_tuner_ops->init(&t->fe);
453 break; 421 break;
454 } 422 }
455 default: 423 default:
@@ -490,7 +458,6 @@ static void set_type(struct i2c_client *c, unsigned int type,
490 tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n", 458 tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
491 c->adapter->name, c->driver->driver.name, c->addr << 1, type, 459 c->adapter->name, c->driver->driver.name, c->addr << 1, type,
492 t->mode_mask); 460 t->mode_mask);
493 tuner_i2c_address_check(t);
494 return; 461 return;
495 462
496attach_failed: 463attach_failed:
diff --git a/drivers/media/video/tveeprom.c b/drivers/media/video/tveeprom.c
index e24a38c7fa46..ac02808106c1 100644
--- a/drivers/media/video/tveeprom.c
+++ b/drivers/media/video/tveeprom.c
@@ -184,7 +184,7 @@ hauppauge_tuner[] =
184 { TUNER_ABSENT, "Silicon TDA8275C1 8290 FM"}, 184 { TUNER_ABSENT, "Silicon TDA8275C1 8290 FM"},
185 { TUNER_ABSENT, "Thompson DTT757"}, 185 { TUNER_ABSENT, "Thompson DTT757"},
186 /* 80-89 */ 186 /* 80-89 */
187 { TUNER_PHILIPS_FM1216ME_MK3, "Philips FQ1216LME MK3"}, 187 { TUNER_PHILIPS_FQ1216LME_MK3, "Philips FQ1216LME MK3"},
188 { TUNER_LG_PAL_NEW_TAPC, "LG TAPC G701D"}, 188 { TUNER_LG_PAL_NEW_TAPC, "LG TAPC G701D"},
189 { TUNER_LG_NTSC_NEW_TAPC, "LG TAPC H791F"}, 189 { TUNER_LG_NTSC_NEW_TAPC, "LG TAPC H791F"},
190 { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MB 3"}, 190 { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MB 3"},
@@ -210,7 +210,7 @@ hauppauge_tuner[] =
210 { TUNER_TEA5767, "Philips TEA5768HL FM Radio"}, 210 { TUNER_TEA5767, "Philips TEA5768HL FM Radio"},
211 { TUNER_ABSENT, "Panasonic ENV57H12D5"}, 211 { TUNER_ABSENT, "Panasonic ENV57H12D5"},
212 { TUNER_PHILIPS_FM1236_MK3, "TCL MFNM05-4"}, 212 { TUNER_PHILIPS_FM1236_MK3, "TCL MFNM05-4"},
213 { TUNER_ABSENT, "TCL MNM05-4"}, 213 { TUNER_PHILIPS_FM1236_MK3, "TCL MNM05-4"},
214 { TUNER_PHILIPS_FM1216ME_MK3, "TCL MPE05-2"}, 214 { TUNER_PHILIPS_FM1216ME_MK3, "TCL MPE05-2"},
215 { TUNER_ABSENT, "TCL MQNM05-4"}, 215 { TUNER_ABSENT, "TCL MQNM05-4"},
216 { TUNER_ABSENT, "LG TAPC-W701D"}, 216 { TUNER_ABSENT, "LG TAPC-W701D"},
@@ -229,7 +229,7 @@ hauppauge_tuner[] =
229 { TUNER_ABSENT, "Samsung THPD5222FG30A"}, 229 { TUNER_ABSENT, "Samsung THPD5222FG30A"},
230 /* 120-129 */ 230 /* 120-129 */
231 { TUNER_XC2028, "Xceive XC3028"}, 231 { TUNER_XC2028, "Xceive XC3028"},
232 { TUNER_ABSENT, "Philips FQ1216LME MK5"}, 232 { TUNER_PHILIPS_FQ1216LME_MK3, "Philips FQ1216LME MK5"},
233 { TUNER_ABSENT, "Philips FQD1216LME"}, 233 { TUNER_ABSENT, "Philips FQD1216LME"},
234 { TUNER_ABSENT, "Conexant CX24118A"}, 234 { TUNER_ABSENT, "Conexant CX24118A"},
235 { TUNER_ABSENT, "TCL DMF11WIP"}, 235 { TUNER_ABSENT, "TCL DMF11WIP"},
diff --git a/drivers/media/video/tvp514x.c b/drivers/media/video/tvp514x.c
index 4262e60b8116..3750f7fadb12 100644
--- a/drivers/media/video/tvp514x.c
+++ b/drivers/media/video/tvp514x.c
@@ -692,7 +692,7 @@ static int ioctl_s_routing(struct v4l2_int_device *s,
692 break; /* Input detected */ 692 break; /* Input detected */
693 } 693 }
694 694
695 if ((current_std == STD_INVALID) || (try_count <= 0)) 695 if ((current_std == STD_INVALID) || (try_count < 0))
696 return -EINVAL; 696 return -EINVAL;
697 697
698 decoder->current_std = current_std; 698 decoder->current_std = current_std;
diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
index 900ec2129ca1..31d57f2d09e1 100644
--- a/drivers/media/video/usbvideo/konicawc.c
+++ b/drivers/media/video/usbvideo/konicawc.c
@@ -240,7 +240,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
240 input_dev->dev.parent = &dev->dev; 240 input_dev->dev.parent = &dev->dev;
241 241
242 input_dev->evbit[0] = BIT_MASK(EV_KEY); 242 input_dev->evbit[0] = BIT_MASK(EV_KEY);
243 input_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0); 243 input_dev->keybit[BIT_WORD(KEY_CAMERA)] = BIT_MASK(KEY_CAMERA);
244 244
245 error = input_register_device(cam->input); 245 error = input_register_device(cam->input);
246 if (error) { 246 if (error) {
@@ -263,7 +263,7 @@ static void konicawc_unregister_input(struct konicawc *cam)
263static void konicawc_report_buttonstat(struct konicawc *cam) 263static void konicawc_report_buttonstat(struct konicawc *cam)
264{ 264{
265 if (cam->input) { 265 if (cam->input) {
266 input_report_key(cam->input, BTN_0, cam->buttonsts); 266 input_report_key(cam->input, KEY_CAMERA, cam->buttonsts);
267 input_sync(cam->input); 267 input_sync(cam->input);
268 } 268 }
269} 269}
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index fd112f0b9d35..803d3e4e29a2 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -103,7 +103,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
103 input_dev->dev.parent = &dev->dev; 103 input_dev->dev.parent = &dev->dev;
104 104
105 input_dev->evbit[0] = BIT_MASK(EV_KEY); 105 input_dev->evbit[0] = BIT_MASK(EV_KEY);
106 input_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0); 106 input_dev->keybit[BIT_WORD(KEY_CAMERA)] = BIT_MASK(KEY_CAMERA);
107 107
108 error = input_register_device(cam->input); 108 error = input_register_device(cam->input);
109 if (error) { 109 if (error) {
@@ -126,7 +126,7 @@ static void qcm_unregister_input(struct qcm *cam)
126static void qcm_report_buttonstat(struct qcm *cam) 126static void qcm_report_buttonstat(struct qcm *cam)
127{ 127{
128 if (cam->input) { 128 if (cam->input) {
129 input_report_key(cam->input, BTN_0, cam->button_sts); 129 input_report_key(cam->input, KEY_CAMERA, cam->button_sts);
130 input_sync(cam->input); 130 input_sync(cam->input);
131 } 131 }
132} 132}
diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
index 8bc03b9e1315..6ba16abeebdd 100644
--- a/drivers/media/video/usbvision/usbvision-core.c
+++ b/drivers/media/video/usbvision/usbvision-core.c
@@ -390,10 +390,9 @@ int usbvision_scratch_alloc(struct usb_usbvision *usbvision)
390 390
391void usbvision_scratch_free(struct usb_usbvision *usbvision) 391void usbvision_scratch_free(struct usb_usbvision *usbvision)
392{ 392{
393 if (usbvision->scratch != NULL) { 393 vfree(usbvision->scratch);
394 vfree(usbvision->scratch); 394 usbvision->scratch = NULL;
395 usbvision->scratch = NULL; 395
396 }
397} 396}
398 397
399/* 398/*
@@ -506,10 +505,9 @@ int usbvision_decompress_alloc(struct usb_usbvision *usbvision)
506 */ 505 */
507void usbvision_decompress_free(struct usb_usbvision *usbvision) 506void usbvision_decompress_free(struct usb_usbvision *usbvision)
508{ 507{
509 if (usbvision->IntraFrameBuffer != NULL) { 508 vfree(usbvision->IntraFrameBuffer);
510 vfree(usbvision->IntraFrameBuffer); 509 usbvision->IntraFrameBuffer = NULL;
511 usbvision->IntraFrameBuffer = NULL; 510
512 }
513} 511}
514 512
515/************************************************************ 513/************************************************************
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index d7056a5b7f9b..90b58914f984 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -541,7 +541,7 @@ static int vidioc_enum_input (struct file *file, void *priv,
541 struct usb_usbvision *usbvision = video_drvdata(file); 541 struct usb_usbvision *usbvision = video_drvdata(file);
542 int chan; 542 int chan;
543 543
544 if ((vi->index >= usbvision->video_inputs) || (vi->index < 0) ) 544 if (vi->index >= usbvision->video_inputs)
545 return -EINVAL; 545 return -EINVAL;
546 if (usbvision->have_tuner) { 546 if (usbvision->have_tuner) {
547 chan = vi->index; 547 chan = vi->index;
@@ -1794,7 +1794,7 @@ static struct usb_driver usbvision_driver = {
1794 .name = "usbvision", 1794 .name = "usbvision",
1795 .id_table = usbvision_table, 1795 .id_table = usbvision_table,
1796 .probe = usbvision_probe, 1796 .probe = usbvision_probe,
1797 .disconnect = usbvision_disconnect 1797 .disconnect = __devexit_p(usbvision_disconnect),
1798}; 1798};
1799 1799
1800/* 1800/*
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 0d7e38d6ff6a..36a6ba92df27 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -1372,21 +1372,19 @@ end:
1372} 1372}
1373 1373
1374/* 1374/*
1375 * Prune an entity of its bogus controls. This currently includes processing 1375 * Prune an entity of its bogus controls using a blacklist. Bogus controls
1376 * unit auto controls for which no corresponding manual control is available. 1376 * are currently the ones that crash the camera or unconditionally return an
1377 * Such auto controls make little sense if any, and are known to crash at 1377 * error when queried.
1378 * least the SiGma Micro webcam.
1379 */ 1378 */
1380static void 1379static void
1381uvc_ctrl_prune_entity(struct uvc_entity *entity) 1380uvc_ctrl_prune_entity(struct uvc_device *dev, struct uvc_entity *entity)
1382{ 1381{
1383 static const struct { 1382 static const struct {
1384 u8 idx_manual; 1383 struct usb_device_id id;
1385 u8 idx_auto; 1384 u8 index;
1386 } blacklist[] = { 1385 } blacklist[] = {
1387 { 2, 11 }, /* Hue */ 1386 { { USB_DEVICE(0x1c4f, 0x3000) }, 6 }, /* WB Temperature */
1388 { 6, 12 }, /* White Balance Temperature */ 1387 { { USB_DEVICE(0x5986, 0x0241) }, 2 }, /* Hue */
1389 { 7, 13 }, /* White Balance Component */
1390 }; 1388 };
1391 1389
1392 u8 *controls; 1390 u8 *controls;
@@ -1400,19 +1398,17 @@ uvc_ctrl_prune_entity(struct uvc_entity *entity)
1400 size = entity->processing.bControlSize; 1398 size = entity->processing.bControlSize;
1401 1399
1402 for (i = 0; i < ARRAY_SIZE(blacklist); ++i) { 1400 for (i = 0; i < ARRAY_SIZE(blacklist); ++i) {
1403 if (blacklist[i].idx_auto >= 8 * size || 1401 if (!usb_match_id(dev->intf, &blacklist[i].id))
1404 blacklist[i].idx_manual >= 8 * size)
1405 continue; 1402 continue;
1406 1403
1407 if (!uvc_test_bit(controls, blacklist[i].idx_auto) || 1404 if (blacklist[i].index >= 8 * size ||
1408 uvc_test_bit(controls, blacklist[i].idx_manual)) 1405 !uvc_test_bit(controls, blacklist[i].index))
1409 continue; 1406 continue;
1410 1407
1411 uvc_trace(UVC_TRACE_CONTROL, "Auto control %u/%u has no " 1408 uvc_trace(UVC_TRACE_CONTROL, "%u/%u control is black listed, "
1412 "matching manual control, removing it.\n", entity->id, 1409 "removing it.\n", entity->id, blacklist[i].index);
1413 blacklist[i].idx_auto);
1414 1410
1415 uvc_clear_bit(controls, blacklist[i].idx_auto); 1411 uvc_clear_bit(controls, blacklist[i].index);
1416 } 1412 }
1417} 1413}
1418 1414
@@ -1442,8 +1438,7 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
1442 bControlSize = entity->camera.bControlSize; 1438 bControlSize = entity->camera.bControlSize;
1443 } 1439 }
1444 1440
1445 if (dev->quirks & UVC_QUIRK_PRUNE_CONTROLS) 1441 uvc_ctrl_prune_entity(dev, entity);
1446 uvc_ctrl_prune_entity(entity);
1447 1442
1448 for (i = 0; i < bControlSize; ++i) 1443 for (i = 0; i < bControlSize; ++i)
1449 ncontrols += hweight8(bmControls[i]); 1444 ncontrols += hweight8(bmControls[i]);
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 507dc85646b2..89927b7aec28 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -289,10 +289,8 @@ static int uvc_parse_format(struct uvc_device *dev,
289 struct uvc_format_desc *fmtdesc; 289 struct uvc_format_desc *fmtdesc;
290 struct uvc_frame *frame; 290 struct uvc_frame *frame;
291 const unsigned char *start = buffer; 291 const unsigned char *start = buffer;
292 unsigned char *_buffer;
293 unsigned int interval; 292 unsigned int interval;
294 unsigned int i, n; 293 unsigned int i, n;
295 int _buflen;
296 __u8 ftype; 294 __u8 ftype;
297 295
298 format->type = buffer[2]; 296 format->type = buffer[2];
@@ -303,7 +301,7 @@ static int uvc_parse_format(struct uvc_device *dev,
303 case VS_FORMAT_FRAME_BASED: 301 case VS_FORMAT_FRAME_BASED:
304 n = buffer[2] == VS_FORMAT_UNCOMPRESSED ? 27 : 28; 302 n = buffer[2] == VS_FORMAT_UNCOMPRESSED ? 27 : 28;
305 if (buflen < n) { 303 if (buflen < n) {
306 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" 304 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
307 "interface %d FORMAT error\n", 305 "interface %d FORMAT error\n",
308 dev->udev->devnum, 306 dev->udev->devnum,
309 alts->desc.bInterfaceNumber); 307 alts->desc.bInterfaceNumber);
@@ -338,7 +336,7 @@ static int uvc_parse_format(struct uvc_device *dev,
338 336
339 case VS_FORMAT_MJPEG: 337 case VS_FORMAT_MJPEG:
340 if (buflen < 11) { 338 if (buflen < 11) {
341 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" 339 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
342 "interface %d FORMAT error\n", 340 "interface %d FORMAT error\n",
343 dev->udev->devnum, 341 dev->udev->devnum,
344 alts->desc.bInterfaceNumber); 342 alts->desc.bInterfaceNumber);
@@ -354,7 +352,7 @@ static int uvc_parse_format(struct uvc_device *dev,
354 352
355 case VS_FORMAT_DV: 353 case VS_FORMAT_DV:
356 if (buflen < 9) { 354 if (buflen < 9) {
357 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" 355 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
358 "interface %d FORMAT error\n", 356 "interface %d FORMAT error\n",
359 dev->udev->devnum, 357 dev->udev->devnum,
360 alts->desc.bInterfaceNumber); 358 alts->desc.bInterfaceNumber);
@@ -372,7 +370,7 @@ static int uvc_parse_format(struct uvc_device *dev,
372 strlcpy(format->name, "HD-DV", sizeof format->name); 370 strlcpy(format->name, "HD-DV", sizeof format->name);
373 break; 371 break;
374 default: 372 default:
375 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" 373 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
376 "interface %d: unknown DV format %u\n", 374 "interface %d: unknown DV format %u\n",
377 dev->udev->devnum, 375 dev->udev->devnum,
378 alts->desc.bInterfaceNumber, buffer[8]); 376 alts->desc.bInterfaceNumber, buffer[8]);
@@ -401,7 +399,7 @@ static int uvc_parse_format(struct uvc_device *dev,
401 case VS_FORMAT_STREAM_BASED: 399 case VS_FORMAT_STREAM_BASED:
402 /* Not supported yet. */ 400 /* Not supported yet. */
403 default: 401 default:
404 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" 402 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
405 "interface %d unsupported format %u\n", 403 "interface %d unsupported format %u\n",
406 dev->udev->devnum, alts->desc.bInterfaceNumber, 404 dev->udev->devnum, alts->desc.bInterfaceNumber,
407 buffer[2]); 405 buffer[2]);
@@ -413,20 +411,11 @@ static int uvc_parse_format(struct uvc_device *dev,
413 buflen -= buffer[0]; 411 buflen -= buffer[0];
414 buffer += buffer[0]; 412 buffer += buffer[0];
415 413
416 /* Count the number of frame descriptors to test the bFrameIndex
417 * field when parsing the descriptors. We can't rely on the
418 * bNumFrameDescriptors field as some cameras don't initialize it
419 * properly.
420 */
421 for (_buflen = buflen, _buffer = buffer;
422 _buflen > 2 && _buffer[2] == ftype;
423 _buflen -= _buffer[0], _buffer += _buffer[0])
424 format->nframes++;
425
426 /* Parse the frame descriptors. Only uncompressed, MJPEG and frame 414 /* Parse the frame descriptors. Only uncompressed, MJPEG and frame
427 * based formats have frame descriptors. 415 * based formats have frame descriptors.
428 */ 416 */
429 while (buflen > 2 && buffer[2] == ftype) { 417 while (buflen > 2 && buffer[2] == ftype) {
418 frame = &format->frame[format->nframes];
430 if (ftype != VS_FRAME_FRAME_BASED) 419 if (ftype != VS_FRAME_FRAME_BASED)
431 n = buflen > 25 ? buffer[25] : 0; 420 n = buflen > 25 ? buffer[25] : 0;
432 else 421 else
@@ -435,22 +424,12 @@ static int uvc_parse_format(struct uvc_device *dev,
435 n = n ? n : 3; 424 n = n ? n : 3;
436 425
437 if (buflen < 26 + 4*n) { 426 if (buflen < 26 + 4*n) {
438 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" 427 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
439 "interface %d FRAME error\n", dev->udev->devnum, 428 "interface %d FRAME error\n", dev->udev->devnum,
440 alts->desc.bInterfaceNumber); 429 alts->desc.bInterfaceNumber);
441 return -EINVAL; 430 return -EINVAL;
442 } 431 }
443 432
444 if (buffer[3] - 1 >= format->nframes) {
445 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming"
446 "interface %d frame index %u out of range\n",
447 dev->udev->devnum, alts->desc.bInterfaceNumber,
448 buffer[3]);
449 return -EINVAL;
450 }
451
452 frame = &format->frame[buffer[3] - 1];
453
454 frame->bFrameIndex = buffer[3]; 433 frame->bFrameIndex = buffer[3];
455 frame->bmCapabilities = buffer[4]; 434 frame->bmCapabilities = buffer[4];
456 frame->wWidth = get_unaligned_le16(&buffer[5]); 435 frame->wWidth = get_unaligned_le16(&buffer[5]);
@@ -507,6 +486,7 @@ static int uvc_parse_format(struct uvc_device *dev,
507 10000000/frame->dwDefaultFrameInterval, 486 10000000/frame->dwDefaultFrameInterval,
508 (100000000/frame->dwDefaultFrameInterval)%10); 487 (100000000/frame->dwDefaultFrameInterval)%10);
509 488
489 format->nframes++;
510 buflen -= buffer[0]; 490 buflen -= buffer[0];
511 buffer += buffer[0]; 491 buffer += buffer[0];
512 } 492 }
@@ -518,7 +498,7 @@ static int uvc_parse_format(struct uvc_device *dev,
518 498
519 if (buflen > 2 && buffer[2] == VS_COLORFORMAT) { 499 if (buflen > 2 && buffer[2] == VS_COLORFORMAT) {
520 if (buflen < 6) { 500 if (buflen < 6) {
521 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" 501 uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming "
522 "interface %d COLORFORMAT error\n", 502 "interface %d COLORFORMAT error\n",
523 dev->udev->devnum, 503 dev->udev->devnum,
524 alts->desc.bInterfaceNumber); 504 alts->desc.bInterfaceNumber);
@@ -664,7 +644,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
664 _buflen = buflen; 644 _buflen = buflen;
665 645
666 /* Count the format and frame descriptors. */ 646 /* Count the format and frame descriptors. */
667 while (_buflen > 2) { 647 while (_buflen > 2 && _buffer[1] == CS_INTERFACE) {
668 switch (_buffer[2]) { 648 switch (_buffer[2]) {
669 case VS_FORMAT_UNCOMPRESSED: 649 case VS_FORMAT_UNCOMPRESSED:
670 case VS_FORMAT_MJPEG: 650 case VS_FORMAT_MJPEG:
@@ -729,7 +709,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
729 streaming->nformats = nformats; 709 streaming->nformats = nformats;
730 710
731 /* Parse the format descriptors. */ 711 /* Parse the format descriptors. */
732 while (buflen > 2) { 712 while (buflen > 2 && buffer[1] == CS_INTERFACE) {
733 switch (buffer[2]) { 713 switch (buffer[2]) {
734 case VS_FORMAT_UNCOMPRESSED: 714 case VS_FORMAT_UNCOMPRESSED:
735 case VS_FORMAT_MJPEG: 715 case VS_FORMAT_MJPEG:
@@ -1316,7 +1296,7 @@ static int uvc_scan_chain_forward(struct uvc_video_device *video,
1316 continue; 1296 continue;
1317 1297
1318 if (forward->extension.bNrInPins != 1) { 1298 if (forward->extension.bNrInPins != 1) {
1319 uvc_trace(UVC_TRACE_DESCR, "Extension unit %d has" 1299 uvc_trace(UVC_TRACE_DESCR, "Extension unit %d has "
1320 "more than 1 input pin.\n", entity->id); 1300 "more than 1 input pin.\n", entity->id);
1321 return -1; 1301 return -1;
1322 } 1302 }
@@ -1614,6 +1594,7 @@ static int uvc_probe(struct usb_interface *intf,
1614 INIT_LIST_HEAD(&dev->entities); 1594 INIT_LIST_HEAD(&dev->entities);
1615 INIT_LIST_HEAD(&dev->streaming); 1595 INIT_LIST_HEAD(&dev->streaming);
1616 kref_init(&dev->kref); 1596 kref_init(&dev->kref);
1597 atomic_set(&dev->users, 0);
1617 1598
1618 dev->udev = usb_get_dev(udev); 1599 dev->udev = usb_get_dev(udev);
1619 dev->intf = usb_get_intf(intf); 1600 dev->intf = usb_get_intf(intf);
@@ -1927,7 +1908,7 @@ static struct usb_device_id uvc_ids[] = {
1927 .bInterfaceSubClass = 1, 1908 .bInterfaceSubClass = 1,
1928 .bInterfaceProtocol = 0, 1909 .bInterfaceProtocol = 0,
1929 .driver_info = UVC_QUIRK_STREAM_NO_FID }, 1910 .driver_info = UVC_QUIRK_STREAM_NO_FID },
1930 /* Lenovo Thinkpad SL500 */ 1911 /* Lenovo Thinkpad SL400/SL500 */
1931 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 1912 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1932 | USB_DEVICE_ID_MATCH_INT_INFO, 1913 | USB_DEVICE_ID_MATCH_INT_INFO,
1933 .idVendor = 0x17ef, 1914 .idVendor = 0x17ef,
@@ -1936,6 +1917,15 @@ static struct usb_device_id uvc_ids[] = {
1936 .bInterfaceSubClass = 1, 1917 .bInterfaceSubClass = 1,
1937 .bInterfaceProtocol = 0, 1918 .bInterfaceProtocol = 0,
1938 .driver_info = UVC_QUIRK_STREAM_NO_FID }, 1919 .driver_info = UVC_QUIRK_STREAM_NO_FID },
1920 /* Aveo Technology USB 2.0 Camera */
1921 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1922 | USB_DEVICE_ID_MATCH_INT_INFO,
1923 .idVendor = 0x1871,
1924 .idProduct = 0x0306,
1925 .bInterfaceClass = USB_CLASS_VIDEO,
1926 .bInterfaceSubClass = 1,
1927 .bInterfaceProtocol = 0,
1928 .driver_info = UVC_QUIRK_PROBE_EXTRAFIELDS },
1939 /* Ecamm Pico iMage */ 1929 /* Ecamm Pico iMage */
1940 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 1930 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1941 | USB_DEVICE_ID_MATCH_INT_INFO, 1931 | USB_DEVICE_ID_MATCH_INT_INFO,
@@ -1945,6 +1935,15 @@ static struct usb_device_id uvc_ids[] = {
1945 .bInterfaceSubClass = 1, 1935 .bInterfaceSubClass = 1,
1946 .bInterfaceProtocol = 0, 1936 .bInterfaceProtocol = 0,
1947 .driver_info = UVC_QUIRK_PROBE_EXTRAFIELDS }, 1937 .driver_info = UVC_QUIRK_PROBE_EXTRAFIELDS },
1938 /* FSC WebCam V30S */
1939 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1940 | USB_DEVICE_ID_MATCH_INT_INFO,
1941 .idVendor = 0x18ec,
1942 .idProduct = 0x3288,
1943 .bInterfaceClass = USB_CLASS_VIDEO,
1944 .bInterfaceSubClass = 1,
1945 .bInterfaceProtocol = 0,
1946 .driver_info = UVC_QUIRK_PROBE_MINMAX },
1948 /* Bodelin ProScopeHR */ 1947 /* Bodelin ProScopeHR */
1949 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE 1948 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
1950 | USB_DEVICE_ID_MATCH_DEV_HI 1949 | USB_DEVICE_ID_MATCH_DEV_HI
@@ -1965,8 +1964,7 @@ static struct usb_device_id uvc_ids[] = {
1965 .bInterfaceSubClass = 1, 1964 .bInterfaceSubClass = 1,
1966 .bInterfaceProtocol = 0, 1965 .bInterfaceProtocol = 0,
1967 .driver_info = UVC_QUIRK_PROBE_MINMAX 1966 .driver_info = UVC_QUIRK_PROBE_MINMAX
1968 | UVC_QUIRK_IGNORE_SELECTOR_UNIT 1967 | UVC_QUIRK_IGNORE_SELECTOR_UNIT },
1969 | UVC_QUIRK_PRUNE_CONTROLS },
1970 /* Generic USB Video Class */ 1968 /* Generic USB Video Class */
1971 { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, 0) }, 1969 { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, 0) },
1972 {} 1970 {}
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 0155752e4a5a..f854698c4061 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -172,6 +172,20 @@ int uvc_free_buffers(struct uvc_video_queue *queue)
172 return 0; 172 return 0;
173} 173}
174 174
175/*
176 * Check if buffers have been allocated.
177 */
178int uvc_queue_allocated(struct uvc_video_queue *queue)
179{
180 int allocated;
181
182 mutex_lock(&queue->mutex);
183 allocated = queue->count != 0;
184 mutex_unlock(&queue->mutex);
185
186 return allocated;
187}
188
175static void __uvc_query_buffer(struct uvc_buffer *buf, 189static void __uvc_query_buffer(struct uvc_buffer *buf,
176 struct v4l2_buffer *v4l2_buf) 190 struct v4l2_buffer *v4l2_buf)
177{ 191{
diff --git a/drivers/media/video/uvc/uvc_status.c b/drivers/media/video/uvc/uvc_status.c
index 21d87124986b..f152a9903862 100644
--- a/drivers/media/video/uvc/uvc_status.c
+++ b/drivers/media/video/uvc/uvc_status.c
@@ -194,7 +194,7 @@ int uvc_status_init(struct uvc_device *dev)
194 dev->status, UVC_MAX_STATUS_SIZE, uvc_status_complete, 194 dev->status, UVC_MAX_STATUS_SIZE, uvc_status_complete,
195 dev, interval); 195 dev, interval);
196 196
197 return usb_submit_urb(dev->int_urb, GFP_KERNEL); 197 return 0;
198} 198}
199 199
200void uvc_status_cleanup(struct uvc_device *dev) 200void uvc_status_cleanup(struct uvc_device *dev)
@@ -205,15 +205,30 @@ void uvc_status_cleanup(struct uvc_device *dev)
205 uvc_input_cleanup(dev); 205 uvc_input_cleanup(dev);
206} 206}
207 207
208int uvc_status_suspend(struct uvc_device *dev) 208int uvc_status_start(struct uvc_device *dev)
209{
210 if (dev->int_urb == NULL)
211 return 0;
212
213 return usb_submit_urb(dev->int_urb, GFP_KERNEL);
214}
215
216void uvc_status_stop(struct uvc_device *dev)
209{ 217{
210 usb_kill_urb(dev->int_urb); 218 usb_kill_urb(dev->int_urb);
219}
220
221int uvc_status_suspend(struct uvc_device *dev)
222{
223 if (atomic_read(&dev->users))
224 usb_kill_urb(dev->int_urb);
225
211 return 0; 226 return 0;
212} 227}
213 228
214int uvc_status_resume(struct uvc_device *dev) 229int uvc_status_resume(struct uvc_device *dev)
215{ 230{
216 if (dev->int_urb == NULL) 231 if (dev->int_urb == NULL || atomic_read(&dev->users) == 0)
217 return 0; 232 return 0;
218 233
219 return usb_submit_urb(dev->int_urb, GFP_NOIO); 234 return usb_submit_urb(dev->int_urb, GFP_NOIO);
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 2a80caa54fb4..5e77cad29690 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -46,6 +46,8 @@ static int uvc_v4l2_query_menu(struct uvc_video_device *video,
46 struct uvc_menu_info *menu_info; 46 struct uvc_menu_info *menu_info;
47 struct uvc_control_mapping *mapping; 47 struct uvc_control_mapping *mapping;
48 struct uvc_control *ctrl; 48 struct uvc_control *ctrl;
49 u32 index = query_menu->index;
50 u32 id = query_menu->id;
49 51
50 ctrl = uvc_find_control(video, query_menu->id, &mapping); 52 ctrl = uvc_find_control(video, query_menu->id, &mapping);
51 if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) 53 if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU)
@@ -54,6 +56,10 @@ static int uvc_v4l2_query_menu(struct uvc_video_device *video,
54 if (query_menu->index >= mapping->menu_count) 56 if (query_menu->index >= mapping->menu_count)
55 return -EINVAL; 57 return -EINVAL;
56 58
59 memset(query_menu, 0, sizeof(*query_menu));
60 query_menu->id = id;
61 query_menu->index = index;
62
57 menu_info = &mapping->menu_info[query_menu->index]; 63 menu_info = &mapping->menu_info[query_menu->index];
58 strlcpy(query_menu->name, menu_info->name, sizeof query_menu->name); 64 strlcpy(query_menu->name, menu_info->name, sizeof query_menu->name);
59 return 0; 65 return 0;
@@ -245,7 +251,7 @@ static int uvc_v4l2_set_format(struct uvc_video_device *video,
245 if (fmt->type != video->streaming->type) 251 if (fmt->type != video->streaming->type)
246 return -EINVAL; 252 return -EINVAL;
247 253
248 if (uvc_queue_streaming(&video->queue)) 254 if (uvc_queue_allocated(&video->queue))
249 return -EBUSY; 255 return -EBUSY;
250 256
251 ret = uvc_v4l2_try_format(video, fmt, &probe, &format, &frame); 257 ret = uvc_v4l2_try_format(video, fmt, &probe, &format, &frame);
@@ -433,6 +439,15 @@ static int uvc_v4l2_open(struct file *file)
433 goto done; 439 goto done;
434 } 440 }
435 441
442 if (atomic_inc_return(&video->dev->users) == 1) {
443 if ((ret = uvc_status_start(video->dev)) < 0) {
444 usb_autopm_put_interface(video->dev->intf);
445 atomic_dec(&video->dev->users);
446 kfree(handle);
447 goto done;
448 }
449 }
450
436 handle->device = video; 451 handle->device = video;
437 handle->state = UVC_HANDLE_PASSIVE; 452 handle->state = UVC_HANDLE_PASSIVE;
438 file->private_data = handle; 453 file->private_data = handle;
@@ -467,6 +482,9 @@ static int uvc_v4l2_release(struct file *file)
467 kfree(handle); 482 kfree(handle);
468 file->private_data = NULL; 483 file->private_data = NULL;
469 484
485 if (atomic_dec_return(&video->dev->users) == 0)
486 uvc_status_stop(video->dev);
487
470 usb_autopm_put_interface(video->dev->intf); 488 usb_autopm_put_interface(video->dev->intf);
471 kref_put(&video->dev->kref, uvc_delete); 489 kref_put(&video->dev->kref, uvc_delete);
472 return 0; 490 return 0;
@@ -512,7 +530,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
512 memset(&xctrl, 0, sizeof xctrl); 530 memset(&xctrl, 0, sizeof xctrl);
513 xctrl.id = ctrl->id; 531 xctrl.id = ctrl->id;
514 532
515 uvc_ctrl_begin(video); 533 ret = uvc_ctrl_begin(video);
534 if (ret < 0)
535 return ret;
536
516 ret = uvc_ctrl_get(video, &xctrl); 537 ret = uvc_ctrl_get(video, &xctrl);
517 uvc_ctrl_rollback(video); 538 uvc_ctrl_rollback(video);
518 if (ret >= 0) 539 if (ret >= 0)
@@ -529,7 +550,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
529 xctrl.id = ctrl->id; 550 xctrl.id = ctrl->id;
530 xctrl.value = ctrl->value; 551 xctrl.value = ctrl->value;
531 552
532 uvc_ctrl_begin(video); 553 ret = uvc_ctrl_begin(video);
554 if (ret < 0)
555 return ret;
556
533 ret = uvc_ctrl_set(video, &xctrl); 557 ret = uvc_ctrl_set(video, &xctrl);
534 if (ret < 0) { 558 if (ret < 0) {
535 uvc_ctrl_rollback(video); 559 uvc_ctrl_rollback(video);
@@ -548,7 +572,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
548 struct v4l2_ext_control *ctrl = ctrls->controls; 572 struct v4l2_ext_control *ctrl = ctrls->controls;
549 unsigned int i; 573 unsigned int i;
550 574
551 uvc_ctrl_begin(video); 575 ret = uvc_ctrl_begin(video);
576 if (ret < 0)
577 return ret;
578
552 for (i = 0; i < ctrls->count; ++ctrl, ++i) { 579 for (i = 0; i < ctrls->count; ++ctrl, ++i) {
553 ret = uvc_ctrl_get(video, ctrl); 580 ret = uvc_ctrl_get(video, ctrl);
554 if (ret < 0) { 581 if (ret < 0) {
@@ -648,7 +675,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
648 675
649 case VIDIOC_S_INPUT: 676 case VIDIOC_S_INPUT:
650 { 677 {
651 u8 input = *(u32 *)arg + 1; 678 u32 input = *(u32 *)arg + 1;
652 679
653 if ((ret = uvc_acquire_privileges(handle)) < 0) 680 if ((ret = uvc_acquire_privileges(handle)) < 0)
654 return ret; 681 return ret;
@@ -660,7 +687,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
660 break; 687 break;
661 } 688 }
662 689
663 if (input > video->selector->selector.bNrInPins) 690 if (input == 0 || input > video->selector->selector.bNrInPins)
664 return -EINVAL; 691 return -EINVAL;
665 692
666 return uvc_query_ctrl(video->dev, SET_CUR, video->selector->id, 693 return uvc_query_ctrl(video->dev, SET_CUR, video->selector->id,
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 6ce974d7362f..01b633c73480 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -65,7 +65,8 @@ static void uvc_fixup_video_ctrl(struct uvc_video_device *video,
65 struct uvc_streaming_control *ctrl) 65 struct uvc_streaming_control *ctrl)
66{ 66{
67 struct uvc_format *format; 67 struct uvc_format *format;
68 struct uvc_frame *frame; 68 struct uvc_frame *frame = NULL;
69 unsigned int i;
69 70
70 if (ctrl->bFormatIndex <= 0 || 71 if (ctrl->bFormatIndex <= 0 ||
71 ctrl->bFormatIndex > video->streaming->nformats) 72 ctrl->bFormatIndex > video->streaming->nformats)
@@ -73,11 +74,15 @@ static void uvc_fixup_video_ctrl(struct uvc_video_device *video,
73 74
74 format = &video->streaming->format[ctrl->bFormatIndex - 1]; 75 format = &video->streaming->format[ctrl->bFormatIndex - 1];
75 76
76 if (ctrl->bFrameIndex <= 0 || 77 for (i = 0; i < format->nframes; ++i) {
77 ctrl->bFrameIndex > format->nframes) 78 if (format->frame[i].bFrameIndex == ctrl->bFrameIndex) {
78 return; 79 frame = &format->frame[i];
80 break;
81 }
82 }
79 83
80 frame = &format->frame[ctrl->bFrameIndex - 1]; 84 if (frame == NULL)
85 return;
81 86
82 if (!(format->flags & UVC_FMT_FLAG_COMPRESSED) || 87 if (!(format->flags & UVC_FMT_FLAG_COMPRESSED) ||
83 (ctrl->dwMaxVideoFrameSize == 0 && 88 (ctrl->dwMaxVideoFrameSize == 0 &&
@@ -1089,7 +1094,7 @@ int uvc_video_init(struct uvc_video_device *video)
1089 /* Zero bFrameIndex might be correct. Stream-based formats (including 1094 /* Zero bFrameIndex might be correct. Stream-based formats (including
1090 * MPEG-2 TS and DV) do not support frames but have a dummy frame 1095 * MPEG-2 TS and DV) do not support frames but have a dummy frame
1091 * descriptor with bFrameIndex set to zero. If the default frame 1096 * descriptor with bFrameIndex set to zero. If the default frame
1092 * descriptor is not found, use the first avalable frame. 1097 * descriptor is not found, use the first available frame.
1093 */ 1098 */
1094 for (i = format->nframes; i > 0; --i) { 1099 for (i = format->nframes; i > 0; --i) {
1095 frame = &format->frame[i-1]; 1100 frame = &format->frame[i-1];
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index e5014e668f99..3c78d3c1e4c0 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -313,7 +313,6 @@ struct uvc_xu_control {
313#define UVC_QUIRK_BUILTIN_ISIGHT 0x00000008 313#define UVC_QUIRK_BUILTIN_ISIGHT 0x00000008
314#define UVC_QUIRK_STREAM_NO_FID 0x00000010 314#define UVC_QUIRK_STREAM_NO_FID 0x00000010
315#define UVC_QUIRK_IGNORE_SELECTOR_UNIT 0x00000020 315#define UVC_QUIRK_IGNORE_SELECTOR_UNIT 0x00000020
316#define UVC_QUIRK_PRUNE_CONTROLS 0x00000040
317#define UVC_QUIRK_FIX_BANDWIDTH 0x00000080 316#define UVC_QUIRK_FIX_BANDWIDTH 0x00000080
318 317
319/* Format flags */ 318/* Format flags */
@@ -634,6 +633,7 @@ struct uvc_device {
634 enum uvc_device_state state; 633 enum uvc_device_state state;
635 struct kref kref; 634 struct kref kref;
636 struct list_head list; 635 struct list_head list;
636 atomic_t users;
637 637
638 /* Video control interface */ 638 /* Video control interface */
639 __u16 uvc_version; 639 __u16 uvc_version;
@@ -747,6 +747,7 @@ extern struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
747 struct uvc_buffer *buf); 747 struct uvc_buffer *buf);
748extern unsigned int uvc_queue_poll(struct uvc_video_queue *queue, 748extern unsigned int uvc_queue_poll(struct uvc_video_queue *queue,
749 struct file *file, poll_table *wait); 749 struct file *file, poll_table *wait);
750extern int uvc_queue_allocated(struct uvc_video_queue *queue);
750static inline int uvc_queue_streaming(struct uvc_video_queue *queue) 751static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
751{ 752{
752 return queue->flags & UVC_QUEUE_STREAMING; 753 return queue->flags & UVC_QUEUE_STREAMING;
@@ -770,6 +771,8 @@ extern int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit,
770/* Status */ 771/* Status */
771extern int uvc_status_init(struct uvc_device *dev); 772extern int uvc_status_init(struct uvc_device *dev);
772extern void uvc_status_cleanup(struct uvc_device *dev); 773extern void uvc_status_cleanup(struct uvc_device *dev);
774extern int uvc_status_start(struct uvc_device *dev);
775extern void uvc_status_stop(struct uvc_device *dev);
773extern int uvc_status_suspend(struct uvc_device *dev); 776extern int uvc_status_suspend(struct uvc_device *dev);
774extern int uvc_status_resume(struct uvc_device *dev); 777extern int uvc_status_resume(struct uvc_device *dev);
775 778
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index f576ef66b807..f96475626da7 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -746,6 +746,7 @@ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
746 const struct v4l2_subdev_ops *ops) 746 const struct v4l2_subdev_ops *ops)
747{ 747{
748 v4l2_subdev_init(sd, ops); 748 v4l2_subdev_init(sd, ops);
749 sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
749 /* the owner is the same as the i2c_client's driver owner */ 750 /* the owner is the same as the i2c_client's driver owner */
750 sd->owner = client->driver->driver.owner; 751 sd->owner = client->driver->driver.owner;
751 /* i2c_client and v4l2_subdev point to one another */ 752 /* i2c_client and v4l2_subdev point to one another */
@@ -897,8 +898,7 @@ const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
897 }; 898 };
898 static const unsigned short tv_addrs[] = { 899 static const unsigned short tv_addrs[] = {
899 0x42, 0x43, 0x4a, 0x4b, /* tda8290 */ 900 0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
900 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 901 0x60, 0x61, 0x62, 0x63, 0x64,
901 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
902 I2C_CLIENT_END 902 I2C_CLIENT_END
903 }; 903 };
904 904
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 94aa485ade52..0d06e7cbd5b3 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -49,6 +49,22 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
49} 49}
50EXPORT_SYMBOL_GPL(v4l2_device_register); 50EXPORT_SYMBOL_GPL(v4l2_device_register);
51 51
52int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
53 atomic_t *instance)
54{
55 int num = atomic_inc_return(instance) - 1;
56 int len = strlen(basename);
57
58 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
59 snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
60 "%s-%d", basename, num);
61 else
62 snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
63 "%s%d", basename, num);
64 return num;
65}
66EXPORT_SYMBOL_GPL(v4l2_device_set_name);
67
52void v4l2_device_disconnect(struct v4l2_device *v4l2_dev) 68void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)
53{ 69{
54 if (v4l2_dev->dev) { 70 if (v4l2_dev->dev) {
@@ -67,8 +83,21 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
67 v4l2_device_disconnect(v4l2_dev); 83 v4l2_device_disconnect(v4l2_dev);
68 84
69 /* Unregister subdevs */ 85 /* Unregister subdevs */
70 list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) 86 list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) {
71 v4l2_device_unregister_subdev(sd); 87 v4l2_device_unregister_subdev(sd);
88#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
89 if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) {
90 struct i2c_client *client = v4l2_get_subdevdata(sd);
91
92 /* We need to unregister the i2c client explicitly.
93 We cannot rely on i2c_del_adapter to always
94 unregister clients for us, since if the i2c bus
95 is a platform bus, then it is never deleted. */
96 if (client)
97 i2c_unregister_device(client);
98 }
99#endif
100 }
72} 101}
73EXPORT_SYMBOL_GPL(v4l2_device_unregister); 102EXPORT_SYMBOL_GPL(v4l2_device_unregister);
74 103
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index b7b05842cf28..f1ccf98c0a6f 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -118,6 +118,7 @@ void videobuf_queue_core_init(struct videobuf_queue *q,
118 void *priv, 118 void *priv,
119 struct videobuf_qtype_ops *int_ops) 119 struct videobuf_qtype_ops *int_ops)
120{ 120{
121 BUG_ON(!q);
121 memset(q, 0, sizeof(*q)); 122 memset(q, 0, sizeof(*q));
122 q->irqlock = irqlock; 123 q->irqlock = irqlock;
123 q->dev = dev; 124 q->dev = dev;
@@ -439,6 +440,7 @@ int videobuf_reqbufs(struct videobuf_queue *q,
439 } 440 }
440 441
441 req->count = retval; 442 req->count = retval;
443 retval = 0;
442 444
443 done: 445 done:
444 mutex_unlock(&q->vb_lock); 446 mutex_unlock(&q->vb_lock);
@@ -454,7 +456,7 @@ int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
454 dprintk(1, "querybuf: Wrong type.\n"); 456 dprintk(1, "querybuf: Wrong type.\n");
455 goto done; 457 goto done;
456 } 458 }
457 if (unlikely(b->index < 0 || b->index >= VIDEO_MAX_FRAME)) { 459 if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
458 dprintk(1, "querybuf: index out of range.\n"); 460 dprintk(1, "querybuf: index out of range.\n");
459 goto done; 461 goto done;
460 } 462 }
@@ -495,7 +497,7 @@ int videobuf_qbuf(struct videobuf_queue *q,
495 dprintk(1, "qbuf: Wrong type.\n"); 497 dprintk(1, "qbuf: Wrong type.\n");
496 goto done; 498 goto done;
497 } 499 }
498 if (b->index < 0 || b->index >= VIDEO_MAX_FRAME) { 500 if (b->index >= VIDEO_MAX_FRAME) {
499 dprintk(1, "qbuf: index out of range.\n"); 501 dprintk(1, "qbuf: index out of range.\n");
500 goto done; 502 goto done;
501 } 503 }
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index 6109fb5f34e2..d09ce83a9429 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/pagemap.h>
20#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
21#include <media/videobuf-dma-contig.h> 22#include <media/videobuf-dma-contig.h>
22 23
@@ -25,6 +26,7 @@ struct videobuf_dma_contig_memory {
25 void *vaddr; 26 void *vaddr;
26 dma_addr_t dma_handle; 27 dma_addr_t dma_handle;
27 unsigned long size; 28 unsigned long size;
29 int is_userptr;
28}; 30};
29 31
30#define MAGIC_DC_MEM 0x0733ac61 32#define MAGIC_DC_MEM 0x0733ac61
@@ -108,6 +110,82 @@ static struct vm_operations_struct videobuf_vm_ops = {
108 .close = videobuf_vm_close, 110 .close = videobuf_vm_close,
109}; 111};
110 112
113/**
114 * videobuf_dma_contig_user_put() - reset pointer to user space buffer
115 * @mem: per-buffer private videobuf-dma-contig data
116 *
117 * This function resets the user space pointer
118 */
119static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
120{
121 mem->is_userptr = 0;
122 mem->dma_handle = 0;
123 mem->size = 0;
124}
125
126/**
127 * videobuf_dma_contig_user_get() - setup user space memory pointer
128 * @mem: per-buffer private videobuf-dma-contig data
129 * @vb: video buffer to map
130 *
131 * This function validates and sets up a pointer to user space memory.
132 * Only physically contiguous pfn-mapped memory is accepted.
133 *
134 * Returns 0 if successful.
135 */
136static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
137 struct videobuf_buffer *vb)
138{
139 struct mm_struct *mm = current->mm;
140 struct vm_area_struct *vma;
141 unsigned long prev_pfn, this_pfn;
142 unsigned long pages_done, user_address;
143 int ret;
144
145 mem->size = PAGE_ALIGN(vb->size);
146 mem->is_userptr = 0;
147 ret = -EINVAL;
148
149 down_read(&mm->mmap_sem);
150
151 vma = find_vma(mm, vb->baddr);
152 if (!vma)
153 goto out_up;
154
155 if ((vb->baddr + mem->size) > vma->vm_end)
156 goto out_up;
157
158 pages_done = 0;
159 prev_pfn = 0; /* kill warning */
160 user_address = vb->baddr;
161
162 while (pages_done < (mem->size >> PAGE_SHIFT)) {
163 ret = follow_pfn(vma, user_address, &this_pfn);
164 if (ret)
165 break;
166
167 if (pages_done == 0)
168 mem->dma_handle = this_pfn << PAGE_SHIFT;
169 else if (this_pfn != (prev_pfn + 1))
170 ret = -EFAULT;
171
172 if (ret)
173 break;
174
175 prev_pfn = this_pfn;
176 user_address += PAGE_SIZE;
177 pages_done++;
178 }
179
180 if (!ret)
181 mem->is_userptr = 1;
182
183 out_up:
184 up_read(&current->mm->mmap_sem);
185
186 return ret;
187}
188
111static void *__videobuf_alloc(size_t size) 189static void *__videobuf_alloc(size_t size)
112{ 190{
113 struct videobuf_dma_contig_memory *mem; 191 struct videobuf_dma_contig_memory *mem;
@@ -154,12 +232,11 @@ static int __videobuf_iolock(struct videobuf_queue *q,
154 case V4L2_MEMORY_USERPTR: 232 case V4L2_MEMORY_USERPTR:
155 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__); 233 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
156 234
157 /* The only USERPTR currently supported is the one needed for 235 /* handle pointer from user space */
158 read() method.
159 */
160 if (vb->baddr) 236 if (vb->baddr)
161 return -EINVAL; 237 return videobuf_dma_contig_user_get(mem, vb);
162 238
239 /* allocate memory for the read() method */
163 mem->size = PAGE_ALIGN(vb->size); 240 mem->size = PAGE_ALIGN(vb->size);
164 mem->vaddr = dma_alloc_coherent(q->dev, mem->size, 241 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
165 &mem->dma_handle, GFP_KERNEL); 242 &mem->dma_handle, GFP_KERNEL);
@@ -182,19 +259,6 @@ static int __videobuf_iolock(struct videobuf_queue *q,
182 return 0; 259 return 0;
183} 260}
184 261
185static int __videobuf_sync(struct videobuf_queue *q,
186 struct videobuf_buffer *buf)
187{
188 struct videobuf_dma_contig_memory *mem = buf->priv;
189
190 BUG_ON(!mem);
191 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
192
193 dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size,
194 DMA_FROM_DEVICE);
195 return 0;
196}
197
198static int __videobuf_mmap_free(struct videobuf_queue *q) 262static int __videobuf_mmap_free(struct videobuf_queue *q)
199{ 263{
200 unsigned int i; 264 unsigned int i;
@@ -356,7 +420,6 @@ static struct videobuf_qtype_ops qops = {
356 420
357 .alloc = __videobuf_alloc, 421 .alloc = __videobuf_alloc,
358 .iolock = __videobuf_iolock, 422 .iolock = __videobuf_iolock,
359 .sync = __videobuf_sync,
360 .mmap_free = __videobuf_mmap_free, 423 .mmap_free = __videobuf_mmap_free,
361 .mmap_mapper = __videobuf_mmap_mapper, 424 .mmap_mapper = __videobuf_mmap_mapper,
362 .video_copy_to_user = __videobuf_copy_to_user, 425 .video_copy_to_user = __videobuf_copy_to_user,
@@ -400,7 +463,7 @@ void videobuf_dma_contig_free(struct videobuf_queue *q,
400 So, it should free memory only if the memory were allocated for 463 So, it should free memory only if the memory were allocated for
401 read() operation. 464 read() operation.
402 */ 465 */
403 if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr) 466 if (buf->memory != V4L2_MEMORY_USERPTR)
404 return; 467 return;
405 468
406 if (!mem) 469 if (!mem)
@@ -408,6 +471,13 @@ void videobuf_dma_contig_free(struct videobuf_queue *q,
408 471
409 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM); 472 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
410 473
474 /* handle user space pointer case */
475 if (buf->baddr) {
476 videobuf_dma_contig_user_put(mem);
477 return;
478 }
479
480 /* read() method */
411 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle); 481 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
412 mem->vaddr = NULL; 482 mem->vaddr = NULL;
413} 483}
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index da1790e57a86..a8dd22ace3fb 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -58,9 +58,10 @@ videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
58 struct page *pg; 58 struct page *pg;
59 int i; 59 int i;
60 60
61 sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL); 61 sglist = vmalloc(nr_pages * sizeof(*sglist));
62 if (NULL == sglist) 62 if (NULL == sglist)
63 return NULL; 63 return NULL;
64 memset(sglist, 0, nr_pages * sizeof(*sglist));
64 sg_init_table(sglist, nr_pages); 65 sg_init_table(sglist, nr_pages);
65 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { 66 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
66 pg = vmalloc_to_page(virt); 67 pg = vmalloc_to_page(virt);
@@ -72,7 +73,7 @@ videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
72 return sglist; 73 return sglist;
73 74
74 err: 75 err:
75 kfree(sglist); 76 vfree(sglist);
76 return NULL; 77 return NULL;
77} 78}
78 79
@@ -84,7 +85,7 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
84 85
85 if (NULL == pages[0]) 86 if (NULL == pages[0])
86 return NULL; 87 return NULL;
87 sglist = kmalloc(nr_pages * sizeof(*sglist), GFP_KERNEL); 88 sglist = vmalloc(nr_pages * sizeof(*sglist));
88 if (NULL == sglist) 89 if (NULL == sglist)
89 return NULL; 90 return NULL;
90 sg_init_table(sglist, nr_pages); 91 sg_init_table(sglist, nr_pages);
@@ -104,12 +105,12 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
104 105
105 nopage: 106 nopage:
106 dprintk(2,"sgl: oops - no page\n"); 107 dprintk(2,"sgl: oops - no page\n");
107 kfree(sglist); 108 vfree(sglist);
108 return NULL; 109 return NULL;
109 110
110 highmem: 111 highmem:
111 dprintk(2,"sgl: oops - highmem page\n"); 112 dprintk(2,"sgl: oops - highmem page\n");
112 kfree(sglist); 113 vfree(sglist);
113 return NULL; 114 return NULL;
114} 115}
115 116
@@ -230,7 +231,7 @@ int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
230 (dma->vmalloc,dma->nr_pages); 231 (dma->vmalloc,dma->nr_pages);
231 } 232 }
232 if (dma->bus_addr) { 233 if (dma->bus_addr) {
233 dma->sglist = kmalloc(sizeof(struct scatterlist), GFP_KERNEL); 234 dma->sglist = vmalloc(sizeof(*dma->sglist));
234 if (NULL != dma->sglist) { 235 if (NULL != dma->sglist) {
235 dma->sglen = 1; 236 dma->sglen = 1;
236 sg_dma_address(&dma->sglist[0]) = dma->bus_addr & PAGE_MASK; 237 sg_dma_address(&dma->sglist[0]) = dma->bus_addr & PAGE_MASK;
@@ -248,10 +249,10 @@ int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
248 if (0 == dma->sglen) { 249 if (0 == dma->sglen) {
249 printk(KERN_WARNING 250 printk(KERN_WARNING
250 "%s: videobuf_map_sg failed\n",__func__); 251 "%s: videobuf_map_sg failed\n",__func__);
251 kfree(dma->sglist); 252 vfree(dma->sglist);
252 dma->sglist = NULL; 253 dma->sglist = NULL;
253 dma->sglen = 0; 254 dma->sglen = 0;
254 return -EIO; 255 return -ENOMEM;
255 } 256 }
256 } 257 }
257 return 0; 258 return 0;
@@ -274,7 +275,7 @@ int videobuf_dma_unmap(struct videobuf_queue* q,struct videobuf_dmabuf *dma)
274 275
275 dma_unmap_sg(q->dev, dma->sglist, dma->nr_pages, dma->direction); 276 dma_unmap_sg(q->dev, dma->sglist, dma->nr_pages, dma->direction);
276 277
277 kfree(dma->sglist); 278 vfree(dma->sglist);
278 dma->sglist = NULL; 279 dma->sglist = NULL;
279 dma->sglen = 0; 280 dma->sglen = 0;
280 return 0; 281 return 0;
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 43e0998adb53..97b082fe4473 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -868,9 +868,9 @@ static void vino_sync_buffer(struct vino_framebuffer *fb)
868 dprintk("vino_sync_buffer():\n"); 868 dprintk("vino_sync_buffer():\n");
869 869
870 for (i = 0; i < fb->desc_table.page_count; i++) 870 for (i = 0; i < fb->desc_table.page_count; i++)
871 dma_sync_single(NULL, 871 dma_sync_single_for_cpu(NULL,
872 fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i], 872 fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i],
873 PAGE_SIZE, DMA_FROM_DEVICE); 873 PAGE_SIZE, DMA_FROM_DEVICE);
874} 874}
875 875
876/* Framebuffer fifo functions (need to be locked externally) */ 876/* Framebuffer fifo functions (need to be locked externally) */
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index ea6c577b0eb3..03dc2f3cf84a 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -1022,7 +1022,7 @@ zr36057_init (struct zoran *zr)
1022 zr->vbuf_bytesperline = 0; 1022 zr->vbuf_bytesperline = 0;
1023 1023
1024 /* Avoid nonsense settings from user for default input/norm */ 1024 /* Avoid nonsense settings from user for default input/norm */
1025 if (default_norm < 0 && default_norm > 2) 1025 if (default_norm < 0 || default_norm > 2)
1026 default_norm = 0; 1026 default_norm = 0;
1027 if (default_norm == 0) { 1027 if (default_norm == 0) {
1028 zr->norm = V4L2_STD_PAL; 1028 zr->norm = V4L2_STD_PAL;
@@ -1477,7 +1477,7 @@ static struct pci_driver zoran_driver = {
1477 .name = "zr36067", 1477 .name = "zr36067",
1478 .id_table = zr36067_pci_tbl, 1478 .id_table = zr36067_pci_tbl,
1479 .probe = zoran_probe, 1479 .probe = zoran_probe,
1480 .remove = zoran_remove, 1480 .remove = __devexit_p(zoran_remove),
1481}; 1481};
1482 1482
1483static int __init zoran_init(void) 1483static int __init zoran_init(void)
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index ac169c9eb18d..fc976f42f432 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -882,9 +882,11 @@ static void zr364xx_disconnect(struct usb_interface *intf)
882 video_unregister_device(cam->vdev); 882 video_unregister_device(cam->vdev);
883 cam->vdev = NULL; 883 cam->vdev = NULL;
884 kfree(cam->buffer); 884 kfree(cam->buffer);
885 if (cam->framebuf) 885 cam->buffer = NULL;
886 vfree(cam->framebuf); 886 vfree(cam->framebuf);
887 cam->framebuf = NULL;
887 kfree(cam); 888 kfree(cam);
889 cam = NULL;
888} 890}
889 891
890 892
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index 386da1566fcc..cb73051e43db 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -35,7 +35,7 @@ struct pasic3_data {
35 */ 35 */
36void pasic3_write_register(struct device *dev, u32 reg, u8 val) 36void pasic3_write_register(struct device *dev, u32 reg, u8 val)
37{ 37{
38 struct pasic3_data *asic = dev->driver_data; 38 struct pasic3_data *asic = dev_get_drvdata(dev);
39 int bus_shift = asic->bus_shift; 39 int bus_shift = asic->bus_shift;
40 void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift); 40 void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift);
41 void __iomem *data = asic->mapping + (REG_DATA << bus_shift); 41 void __iomem *data = asic->mapping + (REG_DATA << bus_shift);
@@ -50,7 +50,7 @@ EXPORT_SYMBOL(pasic3_write_register); /* for leds-pasic3 */
50 */ 50 */
51u8 pasic3_read_register(struct device *dev, u32 reg) 51u8 pasic3_read_register(struct device *dev, u32 reg)
52{ 52{
53 struct pasic3_data *asic = dev->driver_data; 53 struct pasic3_data *asic = dev_get_drvdata(dev);
54 int bus_shift = asic->bus_shift; 54 int bus_shift = asic->bus_shift;
55 void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift); 55 void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift);
56 void __iomem *data = asic->mapping + (REG_DATA << bus_shift); 56 void __iomem *data = asic->mapping + (REG_DATA << bus_shift);
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 11a6248cc1c1..082c197ab9b8 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -618,7 +618,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
618 618
619 pdev->dev.parent = pcf->dev; 619 pdev->dev.parent = pcf->dev;
620 pdev->dev.platform_data = &pdata->reg_init_data[i]; 620 pdev->dev.platform_data = &pdata->reg_init_data[i];
621 pdev->dev.driver_data = pcf; 621 dev_set_drvdata(&pdev->dev, pcf);
622 pcf->regulator_pdev[i] = pdev; 622 pcf->regulator_pdev[i] = pdev;
623 623
624 platform_device_add(pdev); 624 platform_device_add(pdev);
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index cf30d06a0104..7c21bf791569 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -265,7 +265,7 @@ static int wm8400_init(struct wm8400 *wm8400,
265 265
266 mutex_init(&wm8400->io_lock); 266 mutex_init(&wm8400->io_lock);
267 267
268 wm8400->dev->driver_data = wm8400; 268 dev_set_drvdata(wm8400->dev, wm8400);
269 269
270 /* Check that this is actually a WM8400 */ 270 /* Check that this is actually a WM8400 */
271 ret = wm8400->read_dev(wm8400->io_data, WM8400_RESET_ID, 1, &reg); 271 ret = wm8400->read_dev(wm8400->io_data, WM8400_RESET_ID, 1, &reg);
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index 0207dd59090d..b5346b4db91a 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -15,6 +15,7 @@
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
18#include <linux/ctype.h> 19#include <linux/ctype.h>
19#include <linux/delay.h> 20#include <linux/delay.h>
20#include <linux/idr.h> 21#include <linux/idr.h>
@@ -891,6 +892,7 @@ struct c2port_device *c2port_device_register(char *name,
891 return ERR_PTR(-EINVAL); 892 return ERR_PTR(-EINVAL);
892 893
893 c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL); 894 c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
895 kmemcheck_annotate_bitfield(c2dev, flags);
894 if (unlikely(!c2dev)) 896 if (unlikely(!c2dev))
895 return ERR_PTR(-ENOMEM); 897 return ERR_PTR(-ENOMEM);
896 898
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index bbefe77c67a9..3ce2920e2bf3 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -302,7 +302,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
302 pnode = uv_node_to_pnode(nid); 302 pnode = uv_node_to_pnode(nid);
303 if (bid < 0 || gru_base[bid]) 303 if (bid < 0 || gru_base[bid])
304 continue; 304 continue;
305 page = alloc_pages_node(nid, GFP_KERNEL, order); 305 page = alloc_pages_exact_node(nid, GFP_KERNEL, order);
306 if (!page) 306 if (!page)
307 goto fail; 307 goto fail;
308 gru_base[bid] = page_address(page); 308 gru_base[bid] = page_address(page);
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 9172fcdee4e2..c76677afda1b 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -232,7 +232,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
232 mq->mmr_blade = uv_cpu_to_blade_id(cpu); 232 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
233 233
234 nid = cpu_to_node(cpu); 234 nid = cpu_to_node(cpu);
235 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 235 page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
236 pg_order); 236 pg_order);
237 if (page == NULL) { 237 if (page == NULL) {
238 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " 238 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 3f063108e95f..b1cd7a1a2191 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -49,15 +49,16 @@ config MTD_UBI_BEB_RESERVE
49 reserved. Leave the default value if unsure. 49 reserved. Leave the default value if unsure.
50 50
51config MTD_UBI_GLUEBI 51config MTD_UBI_GLUEBI
52 bool "Emulate MTD devices" 52 tristate "MTD devices emulation driver (gluebi)"
53 default n 53 default n
54 depends on MTD_UBI 54 depends on MTD_UBI
55 help 55 help
56 This option enables MTD devices emulation on top of UBI volumes: for 56 This option enables gluebi - an additional driver which emulates MTD
57 each UBI volumes an MTD device is created, and all I/O to this MTD 57 devices on top of UBI volumes: for each UBI volumes an MTD device is
58 device is redirected to the UBI volume. This is handy to make 58 created, and all I/O to this MTD device is redirected to the UBI
59 MTD-oriented software (like JFFS2) work on top of UBI. Do not enable 59 volume. This is handy to make MTD-oriented software (like JFFS2)
60 this if no legacy software will be used. 60 work on top of UBI. Do not enable this unless you use legacy
61 software.
61 62
62source "drivers/mtd/ubi/Kconfig.debug" 63source "drivers/mtd/ubi/Kconfig.debug"
63endmenu 64endmenu
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index dd834e04151b..c9302a5452b0 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -4,4 +4,4 @@ ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o scan.o
4ubi-y += misc.o 4ubi-y += misc.o
5 5
6ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o 6ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o
7ubi-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o 7obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 4048db83aef6..286ed594e5a0 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -41,6 +41,7 @@
41#include <linux/miscdevice.h> 41#include <linux/miscdevice.h>
42#include <linux/log2.h> 42#include <linux/log2.h>
43#include <linux/kthread.h> 43#include <linux/kthread.h>
44#include <linux/reboot.h>
44#include "ubi.h" 45#include "ubi.h"
45 46
46/* Maximum length of the 'mtd=' parameter */ 47/* Maximum length of the 'mtd=' parameter */
@@ -122,6 +123,94 @@ static struct device_attribute dev_mtd_num =
122 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); 123 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
123 124
124/** 125/**
126 * ubi_volume_notify - send a volume change notification.
127 * @ubi: UBI device description object
128 * @vol: volume description object of the changed volume
129 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
130 *
131 * This is a helper function which notifies all subscribers about a volume
132 * change event (creation, removal, re-sizing, re-naming, updating). Returns
133 * zero in case of success and a negative error code in case of failure.
134 */
135int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
136{
137 struct ubi_notification nt;
138
139 ubi_do_get_device_info(ubi, &nt.di);
140 ubi_do_get_volume_info(ubi, vol, &nt.vi);
141 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
142}
143
144/**
145 * ubi_notify_all - send a notification to all volumes.
146 * @ubi: UBI device description object
147 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
148 * @nb: the notifier to call
149 *
150 * This function walks all volumes of UBI device @ubi and sends the @ntype
151 * notification for each volume. If @nb is %NULL, then all registered notifiers
152 * are called, otherwise only the @nb notifier is called. Returns the number of
153 * sent notifications.
154 */
155int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
156{
157 struct ubi_notification nt;
158 int i, count = 0;
159
160 ubi_do_get_device_info(ubi, &nt.di);
161
162 mutex_lock(&ubi->device_mutex);
163 for (i = 0; i < ubi->vtbl_slots; i++) {
164 /*
165 * Since the @ubi->device is locked, and we are not going to
166 * change @ubi->volumes, we do not have to lock
167 * @ubi->volumes_lock.
168 */
169 if (!ubi->volumes[i])
170 continue;
171
172 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
173 if (nb)
174 nb->notifier_call(nb, ntype, &nt);
175 else
176 blocking_notifier_call_chain(&ubi_notifiers, ntype,
177 &nt);
178 count += 1;
179 }
180 mutex_unlock(&ubi->device_mutex);
181
182 return count;
183}
184
185/**
186 * ubi_enumerate_volumes - send "add" notification for all existing volumes.
187 * @nb: the notifier to call
188 *
189 * This function walks all UBI devices and volumes and sends the
190 * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
191 * registered notifiers are called, otherwise only the @nb notifier is called.
192 * Returns the number of sent notifications.
193 */
194int ubi_enumerate_volumes(struct notifier_block *nb)
195{
196 int i, count = 0;
197
198 /*
199 * Since the @ubi_devices_mutex is locked, and we are not going to
200 * change @ubi_devices, we do not have to lock @ubi_devices_lock.
201 */
202 for (i = 0; i < UBI_MAX_DEVICES; i++) {
203 struct ubi_device *ubi = ubi_devices[i];
204
205 if (!ubi)
206 continue;
207 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
208 }
209
210 return count;
211}
212
213/**
125 * ubi_get_device - get UBI device. 214 * ubi_get_device - get UBI device.
126 * @ubi_num: UBI device number 215 * @ubi_num: UBI device number
127 * 216 *
@@ -380,7 +469,7 @@ static void free_user_volumes(struct ubi_device *ubi)
380 * @ubi: UBI device description object 469 * @ubi: UBI device description object
381 * 470 *
382 * This function returns zero in case of success and a negative error code in 471 * This function returns zero in case of success and a negative error code in
383 * case of failure. Note, this function destroys all volumes if it failes. 472 * case of failure. Note, this function destroys all volumes if it fails.
384 */ 473 */
385static int uif_init(struct ubi_device *ubi) 474static int uif_init(struct ubi_device *ubi)
386{ 475{
@@ -633,6 +722,15 @@ static int io_init(struct ubi_device *ubi)
633 } 722 }
634 723
635 /* 724 /*
725 * Set maximum amount of physical erroneous eraseblocks to be 10%.
726 * Erroneous PEB are those which have read errors.
727 */
728 ubi->max_erroneous = ubi->peb_count / 10;
729 if (ubi->max_erroneous < 16)
730 ubi->max_erroneous = 16;
731 dbg_msg("max_erroneous %d", ubi->max_erroneous);
732
733 /*
636 * It may happen that EC and VID headers are situated in one minimal 734 * It may happen that EC and VID headers are situated in one minimal
637 * I/O unit. In this case we can only accept this UBI image in 735 * I/O unit. In this case we can only accept this UBI image in
638 * read-only mode. 736 * read-only mode.
@@ -726,6 +824,34 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
726} 824}
727 825
728/** 826/**
827 * ubi_reboot_notifier - halt UBI transactions immediately prior to a reboot.
828 * @n: reboot notifier object
829 * @state: SYS_RESTART, SYS_HALT, or SYS_POWER_OFF
830 * @cmd: pointer to command string for RESTART2
831 *
832 * This function stops the UBI background thread so that the flash device
833 * remains quiescent when Linux restarts the system. Any queued work will be
834 * discarded, but this function will block until do_work() finishes if an
835 * operation is already in progress.
836 *
837 * This function solves a real-life problem observed on NOR flashes when an
838 * PEB erase operation starts, then the system is rebooted before the erase is
839 * finishes, and the boot loader gets confused and dies. So we prefer to finish
840 * the ongoing operation before rebooting.
841 */
842static int ubi_reboot_notifier(struct notifier_block *n, unsigned long state,
843 void *cmd)
844{
845 struct ubi_device *ubi;
846
847 ubi = container_of(n, struct ubi_device, reboot_notifier);
848 if (ubi->bgt_thread)
849 kthread_stop(ubi->bgt_thread);
850 ubi_sync(ubi->ubi_num);
851 return NOTIFY_DONE;
852}
853
854/**
729 * ubi_attach_mtd_dev - attach an MTD device. 855 * ubi_attach_mtd_dev - attach an MTD device.
730 * @mtd: MTD device description object 856 * @mtd: MTD device description object
731 * @ubi_num: number to assign to the new UBI device 857 * @ubi_num: number to assign to the new UBI device
@@ -806,8 +932,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
806 932
807 mutex_init(&ubi->buf_mutex); 933 mutex_init(&ubi->buf_mutex);
808 mutex_init(&ubi->ckvol_mutex); 934 mutex_init(&ubi->ckvol_mutex);
809 mutex_init(&ubi->mult_mutex); 935 mutex_init(&ubi->device_mutex);
810 mutex_init(&ubi->volumes_mutex);
811 spin_lock_init(&ubi->volumes_lock); 936 spin_lock_init(&ubi->volumes_lock);
812 937
813 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); 938 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
@@ -825,7 +950,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
825 if (!ubi->peb_buf2) 950 if (!ubi->peb_buf2)
826 goto out_free; 951 goto out_free;
827 952
828#ifdef CONFIG_MTD_UBI_DEBUG 953#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
829 mutex_init(&ubi->dbg_buf_mutex); 954 mutex_init(&ubi->dbg_buf_mutex);
830 ubi->dbg_peb_buf = vmalloc(ubi->peb_size); 955 ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
831 if (!ubi->dbg_peb_buf) 956 if (!ubi->dbg_peb_buf)
@@ -872,11 +997,23 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
872 ubi->beb_rsvd_pebs); 997 ubi->beb_rsvd_pebs);
873 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); 998 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
874 999
1000 /*
1001 * The below lock makes sure we do not race with 'ubi_thread()' which
1002 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
1003 */
1004 spin_lock(&ubi->wl_lock);
875 if (!DBG_DISABLE_BGT) 1005 if (!DBG_DISABLE_BGT)
876 ubi->thread_enabled = 1; 1006 ubi->thread_enabled = 1;
877 wake_up_process(ubi->bgt_thread); 1007 wake_up_process(ubi->bgt_thread);
1008 spin_unlock(&ubi->wl_lock);
1009
1010 /* Flash device priority is 0 - UBI needs to shut down first */
1011 ubi->reboot_notifier.priority = 1;
1012 ubi->reboot_notifier.notifier_call = ubi_reboot_notifier;
1013 register_reboot_notifier(&ubi->reboot_notifier);
878 1014
879 ubi_devices[ubi_num] = ubi; 1015 ubi_devices[ubi_num] = ubi;
1016 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
880 return ubi_num; 1017 return ubi_num;
881 1018
882out_uif: 1019out_uif:
@@ -892,7 +1029,7 @@ out_detach:
892out_free: 1029out_free:
893 vfree(ubi->peb_buf1); 1030 vfree(ubi->peb_buf1);
894 vfree(ubi->peb_buf2); 1031 vfree(ubi->peb_buf2);
895#ifdef CONFIG_MTD_UBI_DEBUG 1032#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
896 vfree(ubi->dbg_peb_buf); 1033 vfree(ubi->dbg_peb_buf);
897#endif 1034#endif
898 kfree(ubi); 1035 kfree(ubi);
@@ -919,13 +1056,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
919 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 1056 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
920 return -EINVAL; 1057 return -EINVAL;
921 1058
922 spin_lock(&ubi_devices_lock); 1059 ubi = ubi_get_device(ubi_num);
923 ubi = ubi_devices[ubi_num]; 1060 if (!ubi)
924 if (!ubi) {
925 spin_unlock(&ubi_devices_lock);
926 return -EINVAL; 1061 return -EINVAL;
927 }
928 1062
1063 spin_lock(&ubi_devices_lock);
1064 put_device(&ubi->dev);
1065 ubi->ref_count -= 1;
929 if (ubi->ref_count) { 1066 if (ubi->ref_count) {
930 if (!anyway) { 1067 if (!anyway) {
931 spin_unlock(&ubi_devices_lock); 1068 spin_unlock(&ubi_devices_lock);
@@ -939,12 +1076,14 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
939 spin_unlock(&ubi_devices_lock); 1076 spin_unlock(&ubi_devices_lock);
940 1077
941 ubi_assert(ubi_num == ubi->ubi_num); 1078 ubi_assert(ubi_num == ubi->ubi_num);
1079 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
942 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); 1080 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
943 1081
944 /* 1082 /*
945 * Before freeing anything, we have to stop the background thread to 1083 * Before freeing anything, we have to stop the background thread to
946 * prevent it from doing anything on this device while we are freeing. 1084 * prevent it from doing anything on this device while we are freeing.
947 */ 1085 */
1086 unregister_reboot_notifier(&ubi->reboot_notifier);
948 if (ubi->bgt_thread) 1087 if (ubi->bgt_thread)
949 kthread_stop(ubi->bgt_thread); 1088 kthread_stop(ubi->bgt_thread);
950 1089
@@ -961,7 +1100,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
961 put_mtd_device(ubi->mtd); 1100 put_mtd_device(ubi->mtd);
962 vfree(ubi->peb_buf1); 1101 vfree(ubi->peb_buf1);
963 vfree(ubi->peb_buf2); 1102 vfree(ubi->peb_buf2);
964#ifdef CONFIG_MTD_UBI_DEBUG 1103#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
965 vfree(ubi->dbg_peb_buf); 1104 vfree(ubi->dbg_peb_buf);
966#endif 1105#endif
967 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); 1106 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index f8e0f68f2186..f237ddbb2713 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -113,7 +113,8 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
113 else 113 else
114 mode = UBI_READONLY; 114 mode = UBI_READONLY;
115 115
116 dbg_gen("open volume %d, mode %d", vol_id, mode); 116 dbg_gen("open device %d, volume %d, mode %d",
117 ubi_num, vol_id, mode);
117 118
118 desc = ubi_open_volume(ubi_num, vol_id, mode); 119 desc = ubi_open_volume(ubi_num, vol_id, mode);
119 if (IS_ERR(desc)) 120 if (IS_ERR(desc))
@@ -128,7 +129,8 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
128 struct ubi_volume_desc *desc = file->private_data; 129 struct ubi_volume_desc *desc = file->private_data;
129 struct ubi_volume *vol = desc->vol; 130 struct ubi_volume *vol = desc->vol;
130 131
131 dbg_gen("release volume %d, mode %d", vol->vol_id, desc->mode); 132 dbg_gen("release device %d, volume %d, mode %d",
133 vol->ubi->ubi_num, vol->vol_id, desc->mode);
132 134
133 if (vol->updating) { 135 if (vol->updating) {
134 ubi_warn("update of volume %d not finished, volume is damaged", 136 ubi_warn("update of volume %d not finished, volume is damaged",
@@ -393,7 +395,7 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
393 vol->corrupted = 1; 395 vol->corrupted = 1;
394 } 396 }
395 vol->checked = 1; 397 vol->checked = 1;
396 ubi_gluebi_updated(vol); 398 ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
397 revoke_exclusive(desc, UBI_READWRITE); 399 revoke_exclusive(desc, UBI_READWRITE);
398 } 400 }
399 401
@@ -558,7 +560,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
558 break; 560 break;
559 } 561 }
560 562
561 /* Set volume property command*/ 563 /* Set volume property command */
562 case UBI_IOCSETPROP: 564 case UBI_IOCSETPROP:
563 { 565 {
564 struct ubi_set_prop_req req; 566 struct ubi_set_prop_req req;
@@ -571,9 +573,9 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
571 } 573 }
572 switch (req.property) { 574 switch (req.property) {
573 case UBI_PROP_DIRECT_WRITE: 575 case UBI_PROP_DIRECT_WRITE:
574 mutex_lock(&ubi->volumes_mutex); 576 mutex_lock(&ubi->device_mutex);
575 desc->vol->direct_writes = !!req.value; 577 desc->vol->direct_writes = !!req.value;
576 mutex_unlock(&ubi->volumes_mutex); 578 mutex_unlock(&ubi->device_mutex);
577 break; 579 break;
578 default: 580 default:
579 err = -EINVAL; 581 err = -EINVAL;
@@ -810,9 +812,9 @@ static int rename_volumes(struct ubi_device *ubi,
810 re->desc->vol->vol_id, re->desc->vol->name); 812 re->desc->vol->vol_id, re->desc->vol->name);
811 } 813 }
812 814
813 mutex_lock(&ubi->volumes_mutex); 815 mutex_lock(&ubi->device_mutex);
814 err = ubi_rename_volumes(ubi, &rename_list); 816 err = ubi_rename_volumes(ubi, &rename_list);
815 mutex_unlock(&ubi->volumes_mutex); 817 mutex_unlock(&ubi->device_mutex);
816 818
817out_free: 819out_free:
818 list_for_each_entry_safe(re, re1, &rename_list, list) { 820 list_for_each_entry_safe(re, re1, &rename_list, list) {
@@ -856,9 +858,9 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
856 if (err) 858 if (err)
857 break; 859 break;
858 860
859 mutex_lock(&ubi->volumes_mutex); 861 mutex_lock(&ubi->device_mutex);
860 err = ubi_create_volume(ubi, &req); 862 err = ubi_create_volume(ubi, &req);
861 mutex_unlock(&ubi->volumes_mutex); 863 mutex_unlock(&ubi->device_mutex);
862 if (err) 864 if (err)
863 break; 865 break;
864 866
@@ -887,9 +889,9 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
887 break; 889 break;
888 } 890 }
889 891
890 mutex_lock(&ubi->volumes_mutex); 892 mutex_lock(&ubi->device_mutex);
891 err = ubi_remove_volume(desc, 0); 893 err = ubi_remove_volume(desc, 0);
892 mutex_unlock(&ubi->volumes_mutex); 894 mutex_unlock(&ubi->device_mutex);
893 895
894 /* 896 /*
895 * The volume is deleted (unless an error occurred), and the 897 * The volume is deleted (unless an error occurred), and the
@@ -926,9 +928,9 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
926 pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1, 928 pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
927 desc->vol->usable_leb_size); 929 desc->vol->usable_leb_size);
928 930
929 mutex_lock(&ubi->volumes_mutex); 931 mutex_lock(&ubi->device_mutex);
930 err = ubi_resize_volume(desc, pebs); 932 err = ubi_resize_volume(desc, pebs);
931 mutex_unlock(&ubi->volumes_mutex); 933 mutex_unlock(&ubi->device_mutex);
932 ubi_close_volume(desc); 934 ubi_close_volume(desc);
933 break; 935 break;
934 } 936 }
@@ -952,9 +954,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
952 break; 954 break;
953 } 955 }
954 956
955 mutex_lock(&ubi->mult_mutex);
956 err = rename_volumes(ubi, req); 957 err = rename_volumes(ubi, req);
957 mutex_unlock(&ubi->mult_mutex);
958 kfree(req); 958 kfree(req);
959 break; 959 break;
960 } 960 }
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 25def348e5ba..0f2034c3ed2f 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -419,8 +419,9 @@ retry:
419 * not implemented. 419 * not implemented.
420 */ 420 */
421 if (err == UBI_IO_BAD_VID_HDR) { 421 if (err == UBI_IO_BAD_VID_HDR) {
422 ubi_warn("bad VID header at PEB %d, LEB" 422 ubi_warn("corrupted VID header at PEB "
423 "%d:%d", pnum, vol_id, lnum); 423 "%d, LEB %d:%d", pnum, vol_id,
424 lnum);
424 err = -EBADMSG; 425 err = -EBADMSG;
425 } else 426 } else
426 ubi_ro_mode(ubi); 427 ubi_ro_mode(ubi);
@@ -940,6 +941,33 @@ write_error:
940} 941}
941 942
942/** 943/**
944 * is_error_sane - check whether a read error is sane.
945 * @err: code of the error happened during reading
946 *
947 * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
948 * cannot read data from the target PEB (an error @err happened). If the error
949 * code is sane, then we treat this error as non-fatal. Otherwise the error is
950 * fatal and UBI will be switched to R/O mode later.
951 *
952 * The idea is that we try not to switch to R/O mode if the read error is
953 * something which suggests there was a real read problem. E.g., %-EIO. Or a
954 * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
955 * mode, simply because we do not know what happened at the MTD level, and we
956 * cannot handle this. E.g., the underlying driver may have become crazy, and
957 * it is safer to switch to R/O mode to preserve the data.
958 *
959 * And bear in mind, this is about reading from the target PEB, i.e. the PEB
960 * which we have just written.
961 */
962static int is_error_sane(int err)
963{
964 if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_VID_HDR ||
965 err == -ETIMEDOUT)
966 return 0;
967 return 1;
968}
969
970/**
943 * ubi_eba_copy_leb - copy logical eraseblock. 971 * ubi_eba_copy_leb - copy logical eraseblock.
944 * @ubi: UBI device description object 972 * @ubi: UBI device description object
945 * @from: physical eraseblock number from where to copy 973 * @from: physical eraseblock number from where to copy
@@ -950,12 +978,7 @@ write_error:
950 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 978 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
951 * function. Returns: 979 * function. Returns:
952 * o %0 in case of success; 980 * o %0 in case of success;
953 * o %1 if the operation was canceled because the volume is being deleted 981 * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_CANCEL_BITFLIPS, etc;
954 * or because the PEB was put meanwhile;
955 * o %2 if the operation was canceled because there was a write error to the
956 * target PEB;
957 * o %-EAGAIN if the operation was canceled because a bit-flip was detected
958 * in the target PEB;
959 * o a negative error code in case of failure. 982 * o a negative error code in case of failure.
960 */ 983 */
961int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 984int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
@@ -968,7 +991,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
968 vol_id = be32_to_cpu(vid_hdr->vol_id); 991 vol_id = be32_to_cpu(vid_hdr->vol_id);
969 lnum = be32_to_cpu(vid_hdr->lnum); 992 lnum = be32_to_cpu(vid_hdr->lnum);
970 993
971 dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); 994 dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
972 995
973 if (vid_hdr->vol_type == UBI_VID_STATIC) { 996 if (vid_hdr->vol_type == UBI_VID_STATIC) {
974 data_size = be32_to_cpu(vid_hdr->data_size); 997 data_size = be32_to_cpu(vid_hdr->data_size);
@@ -986,13 +1009,12 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
986 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. 1009 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
987 */ 1010 */
988 vol = ubi->volumes[idx]; 1011 vol = ubi->volumes[idx];
1012 spin_unlock(&ubi->volumes_lock);
989 if (!vol) { 1013 if (!vol) {
990 /* No need to do further work, cancel */ 1014 /* No need to do further work, cancel */
991 dbg_eba("volume %d is being removed, cancel", vol_id); 1015 dbg_wl("volume %d is being removed, cancel", vol_id);
992 spin_unlock(&ubi->volumes_lock); 1016 return MOVE_CANCEL_RACE;
993 return 1;
994 } 1017 }
995 spin_unlock(&ubi->volumes_lock);
996 1018
997 /* 1019 /*
998 * We do not want anybody to write to this logical eraseblock while we 1020 * We do not want anybody to write to this logical eraseblock while we
@@ -1004,12 +1026,13 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1004 * (@from). This task locks the LEB and goes sleep in the 1026 * (@from). This task locks the LEB and goes sleep in the
1005 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are 1027 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
1006 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the 1028 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
1007 * LEB is already locked, we just do not move it and return %1. 1029 * LEB is already locked, we just do not move it and return
1030 * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
1008 */ 1031 */
1009 err = leb_write_trylock(ubi, vol_id, lnum); 1032 err = leb_write_trylock(ubi, vol_id, lnum);
1010 if (err) { 1033 if (err) {
1011 dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum); 1034 dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
1012 return err; 1035 return MOVE_CANCEL_RACE;
1013 } 1036 }
1014 1037
1015 /* 1038 /*
@@ -1018,25 +1041,26 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1018 * cancel it. 1041 * cancel it.
1019 */ 1042 */
1020 if (vol->eba_tbl[lnum] != from) { 1043 if (vol->eba_tbl[lnum] != from) {
1021 dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " 1044 dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to "
1022 "PEB %d, cancel", vol_id, lnum, from, 1045 "PEB %d, cancel", vol_id, lnum, from,
1023 vol->eba_tbl[lnum]); 1046 vol->eba_tbl[lnum]);
1024 err = 1; 1047 err = MOVE_CANCEL_RACE;
1025 goto out_unlock_leb; 1048 goto out_unlock_leb;
1026 } 1049 }
1027 1050
1028 /* 1051 /*
1029 * OK, now the LEB is locked and we can safely start moving it. Since 1052 * OK, now the LEB is locked and we can safely start moving it. Since
1030 * this function utilizes the @ubi->peb1_buf buffer which is shared 1053 * this function utilizes the @ubi->peb_buf1 buffer which is shared
1031 * with some other functions, so lock the buffer by taking the 1054 * with some other functions - we lock the buffer by taking the
1032 * @ubi->buf_mutex. 1055 * @ubi->buf_mutex.
1033 */ 1056 */
1034 mutex_lock(&ubi->buf_mutex); 1057 mutex_lock(&ubi->buf_mutex);
1035 dbg_eba("read %d bytes of data", aldata_size); 1058 dbg_wl("read %d bytes of data", aldata_size);
1036 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); 1059 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
1037 if (err && err != UBI_IO_BITFLIPS) { 1060 if (err && err != UBI_IO_BITFLIPS) {
1038 ubi_warn("error %d while reading data from PEB %d", 1061 ubi_warn("error %d while reading data from PEB %d",
1039 err, from); 1062 err, from);
1063 err = MOVE_SOURCE_RD_ERR;
1040 goto out_unlock_buf; 1064 goto out_unlock_buf;
1041 } 1065 }
1042 1066
@@ -1059,7 +1083,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1059 cond_resched(); 1083 cond_resched();
1060 1084
1061 /* 1085 /*
1062 * It may turn out to me that the whole @from physical eraseblock 1086 * It may turn out to be that the whole @from physical eraseblock
1063 * contains only 0xFF bytes. Then we have to only write the VID header 1087 * contains only 0xFF bytes. Then we have to only write the VID header
1064 * and do not write any data. This also means we should not set 1088 * and do not write any data. This also means we should not set
1065 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. 1089 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
@@ -1074,7 +1098,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1074 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1098 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1075 if (err) { 1099 if (err) {
1076 if (err == -EIO) 1100 if (err == -EIO)
1077 err = 2; 1101 err = MOVE_TARGET_WR_ERR;
1078 goto out_unlock_buf; 1102 goto out_unlock_buf;
1079 } 1103 }
1080 1104
@@ -1083,10 +1107,13 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1083 /* Read the VID header back and check if it was written correctly */ 1107 /* Read the VID header back and check if it was written correctly */
1084 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); 1108 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
1085 if (err) { 1109 if (err) {
1086 if (err != UBI_IO_BITFLIPS) 1110 if (err != UBI_IO_BITFLIPS) {
1087 ubi_warn("cannot read VID header back from PEB %d", to); 1111 ubi_warn("error %d while reading VID header back from "
1088 else 1112 "PEB %d", err, to);
1089 err = -EAGAIN; 1113 if (is_error_sane(err))
1114 err = MOVE_TARGET_RD_ERR;
1115 } else
1116 err = MOVE_CANCEL_BITFLIPS;
1090 goto out_unlock_buf; 1117 goto out_unlock_buf;
1091 } 1118 }
1092 1119
@@ -1094,7 +1121,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1094 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); 1121 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
1095 if (err) { 1122 if (err) {
1096 if (err == -EIO) 1123 if (err == -EIO)
1097 err = 2; 1124 err = MOVE_TARGET_WR_ERR;
1098 goto out_unlock_buf; 1125 goto out_unlock_buf;
1099 } 1126 }
1100 1127
@@ -1107,11 +1134,13 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1107 1134
1108 err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size); 1135 err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size);
1109 if (err) { 1136 if (err) {
1110 if (err != UBI_IO_BITFLIPS) 1137 if (err != UBI_IO_BITFLIPS) {
1111 ubi_warn("cannot read data back from PEB %d", 1138 ubi_warn("error %d while reading data back "
1112 to); 1139 "from PEB %d", err, to);
1113 else 1140 if (is_error_sane(err))
1114 err = -EAGAIN; 1141 err = MOVE_TARGET_RD_ERR;
1142 } else
1143 err = MOVE_CANCEL_BITFLIPS;
1115 goto out_unlock_buf; 1144 goto out_unlock_buf;
1116 } 1145 }
1117 1146
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 49cd55ade9c8..95aaac03f938 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -19,17 +19,71 @@
19 */ 19 */
20 20
21/* 21/*
22 * This file includes implementation of fake MTD devices for each UBI volume. 22 * This is a small driver which implements fake MTD devices on top of UBI
23 * This sounds strange, but it is in fact quite useful to make MTD-oriented 23 * volumes. This sounds strange, but it is in fact quite useful to make
24 * software (including all the legacy software) to work on top of UBI. 24 * MTD-oriented software (including all the legacy software) work on top of
25 * UBI.
25 * 26 *
26 * Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit 27 * Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit
27 * size (mtd->writesize) is equivalent to the UBI minimal I/O unit. The 28 * size (@mtd->writesize) is equivalent to the UBI minimal I/O unit. The
28 * eraseblock size is equivalent to the logical eraseblock size of the volume. 29 * eraseblock size is equivalent to the logical eraseblock size of the volume.
29 */ 30 */
30 31
32#include <linux/err.h>
33#include <linux/list.h>
34#include <linux/sched.h>
31#include <linux/math64.h> 35#include <linux/math64.h>
32#include "ubi.h" 36#include <linux/module.h>
37#include <linux/mutex.h>
38#include <linux/mtd/ubi.h>
39#include <linux/mtd/mtd.h>
40#include "ubi-media.h"
41
42#define err_msg(fmt, ...) \
43 printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \
44 current->pid, __func__, ##__VA_ARGS__)
45
46/**
47 * struct gluebi_device - a gluebi device description data structure.
48 * @mtd: emulated MTD device description object
49 * @refcnt: gluebi device reference count
50 * @desc: UBI volume descriptor
51 * @ubi_num: UBI device number this gluebi device works on
52 * @vol_id: ID of UBI volume this gluebi device works on
53 * @list: link in a list of gluebi devices
54 */
55struct gluebi_device {
56 struct mtd_info mtd;
57 int refcnt;
58 struct ubi_volume_desc *desc;
59 int ubi_num;
60 int vol_id;
61 struct list_head list;
62};
63
64/* List of all gluebi devices */
65static LIST_HEAD(gluebi_devices);
66static DEFINE_MUTEX(devices_mutex);
67
68/**
69 * find_gluebi_nolock - find a gluebi device.
70 * @ubi_num: UBI device number
71 * @vol_id: volume ID
72 *
73 * This function seraches for gluebi device corresponding to UBI device
74 * @ubi_num and UBI volume @vol_id. Returns the gluebi device description
75 * object in case of success and %NULL in case of failure. The caller has to
76 * have the &devices_mutex locked.
77 */
78static struct gluebi_device *find_gluebi_nolock(int ubi_num, int vol_id)
79{
80 struct gluebi_device *gluebi;
81
82 list_for_each_entry(gluebi, &gluebi_devices, list)
83 if (gluebi->ubi_num == ubi_num && gluebi->vol_id == vol_id)
84 return gluebi;
85 return NULL;
86}
33 87
34/** 88/**
35 * gluebi_get_device - get MTD device reference. 89 * gluebi_get_device - get MTD device reference.
@@ -41,15 +95,18 @@
41 */ 95 */
42static int gluebi_get_device(struct mtd_info *mtd) 96static int gluebi_get_device(struct mtd_info *mtd)
43{ 97{
44 struct ubi_volume *vol; 98 struct gluebi_device *gluebi;
99 int ubi_mode = UBI_READONLY;
45 100
46 vol = container_of(mtd, struct ubi_volume, gluebi_mtd); 101 if (!try_module_get(THIS_MODULE))
102 return -ENODEV;
47 103
48 /* 104 if (mtd->flags & MTD_WRITEABLE)
49 * We do not introduce locks for gluebi reference count because the 105 ubi_mode = UBI_READWRITE;
50 * get_device()/put_device() calls are already serialized at MTD. 106
51 */ 107 gluebi = container_of(mtd, struct gluebi_device, mtd);
52 if (vol->gluebi_refcount > 0) { 108 mutex_lock(&devices_mutex);
109 if (gluebi->refcnt > 0) {
53 /* 110 /*
54 * The MTD device is already referenced and this is just one 111 * The MTD device is already referenced and this is just one
55 * more reference. MTD allows many users to open the same 112 * more reference. MTD allows many users to open the same
@@ -58,7 +115,8 @@ static int gluebi_get_device(struct mtd_info *mtd)
58 * open the UBI volume again - just increase the reference 115 * open the UBI volume again - just increase the reference
59 * counter and return. 116 * counter and return.
60 */ 117 */
61 vol->gluebi_refcount += 1; 118 gluebi->refcnt += 1;
119 mutex_unlock(&devices_mutex);
62 return 0; 120 return 0;
63 } 121 }
64 122
@@ -66,11 +124,15 @@ static int gluebi_get_device(struct mtd_info *mtd)
66 * This is the first reference to this UBI volume via the MTD device 124 * This is the first reference to this UBI volume via the MTD device
67 * interface. Open the corresponding volume in read-write mode. 125 * interface. Open the corresponding volume in read-write mode.
68 */ 126 */
69 vol->gluebi_desc = ubi_open_volume(vol->ubi->ubi_num, vol->vol_id, 127 gluebi->desc = ubi_open_volume(gluebi->ubi_num, gluebi->vol_id,
70 UBI_READWRITE); 128 ubi_mode);
71 if (IS_ERR(vol->gluebi_desc)) 129 if (IS_ERR(gluebi->desc)) {
72 return PTR_ERR(vol->gluebi_desc); 130 mutex_unlock(&devices_mutex);
73 vol->gluebi_refcount += 1; 131 module_put(THIS_MODULE);
132 return PTR_ERR(gluebi->desc);
133 }
134 gluebi->refcnt += 1;
135 mutex_unlock(&devices_mutex);
74 return 0; 136 return 0;
75} 137}
76 138
@@ -83,13 +145,15 @@ static int gluebi_get_device(struct mtd_info *mtd)
83 */ 145 */
84static void gluebi_put_device(struct mtd_info *mtd) 146static void gluebi_put_device(struct mtd_info *mtd)
85{ 147{
86 struct ubi_volume *vol; 148 struct gluebi_device *gluebi;
87 149
88 vol = container_of(mtd, struct ubi_volume, gluebi_mtd); 150 gluebi = container_of(mtd, struct gluebi_device, mtd);
89 vol->gluebi_refcount -= 1; 151 mutex_lock(&devices_mutex);
90 ubi_assert(vol->gluebi_refcount >= 0); 152 gluebi->refcnt -= 1;
91 if (vol->gluebi_refcount == 0) 153 if (gluebi->refcnt == 0)
92 ubi_close_volume(vol->gluebi_desc); 154 ubi_close_volume(gluebi->desc);
155 module_put(THIS_MODULE);
156 mutex_unlock(&devices_mutex);
93} 157}
94 158
95/** 159/**
@@ -107,16 +171,12 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
107 size_t *retlen, unsigned char *buf) 171 size_t *retlen, unsigned char *buf)
108{ 172{
109 int err = 0, lnum, offs, total_read; 173 int err = 0, lnum, offs, total_read;
110 struct ubi_volume *vol; 174 struct gluebi_device *gluebi;
111 struct ubi_device *ubi;
112
113 dbg_gen("read %zd bytes from offset %lld", len, from);
114 175
115 if (len < 0 || from < 0 || from + len > mtd->size) 176 if (len < 0 || from < 0 || from + len > mtd->size)
116 return -EINVAL; 177 return -EINVAL;
117 178
118 vol = container_of(mtd, struct ubi_volume, gluebi_mtd); 179 gluebi = container_of(mtd, struct gluebi_device, mtd);
119 ubi = vol->ubi;
120 180
121 lnum = div_u64_rem(from, mtd->erasesize, &offs); 181 lnum = div_u64_rem(from, mtd->erasesize, &offs);
122 total_read = len; 182 total_read = len;
@@ -126,7 +186,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
126 if (to_read > total_read) 186 if (to_read > total_read)
127 to_read = total_read; 187 to_read = total_read;
128 188
129 err = ubi_eba_read_leb(ubi, vol, lnum, buf, offs, to_read, 0); 189 err = ubi_read(gluebi->desc, lnum, buf, offs, to_read);
130 if (err) 190 if (err)
131 break; 191 break;
132 192
@@ -152,21 +212,17 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
152 * case of failure. 212 * case of failure.
153 */ 213 */
154static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, 214static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
155 size_t *retlen, const u_char *buf) 215 size_t *retlen, const u_char *buf)
156{ 216{
157 int err = 0, lnum, offs, total_written; 217 int err = 0, lnum, offs, total_written;
158 struct ubi_volume *vol; 218 struct gluebi_device *gluebi;
159 struct ubi_device *ubi;
160
161 dbg_gen("write %zd bytes to offset %lld", len, to);
162 219
163 if (len < 0 || to < 0 || len + to > mtd->size) 220 if (len < 0 || to < 0 || len + to > mtd->size)
164 return -EINVAL; 221 return -EINVAL;
165 222
166 vol = container_of(mtd, struct ubi_volume, gluebi_mtd); 223 gluebi = container_of(mtd, struct gluebi_device, mtd);
167 ubi = vol->ubi;
168 224
169 if (ubi->ro_mode) 225 if (!(mtd->flags & MTD_WRITEABLE))
170 return -EROFS; 226 return -EROFS;
171 227
172 lnum = div_u64_rem(to, mtd->erasesize, &offs); 228 lnum = div_u64_rem(to, mtd->erasesize, &offs);
@@ -181,8 +237,7 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
181 if (to_write > total_written) 237 if (to_write > total_written)
182 to_write = total_written; 238 to_write = total_written;
183 239
184 err = ubi_eba_write_leb(ubi, vol, lnum, buf, offs, to_write, 240 err = ubi_write(gluebi->desc, lnum, buf, offs, to_write);
185 UBI_UNKNOWN);
186 if (err) 241 if (err)
187 break; 242 break;
188 243
@@ -207,41 +262,36 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
207static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) 262static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
208{ 263{
209 int err, i, lnum, count; 264 int err, i, lnum, count;
210 struct ubi_volume *vol; 265 struct gluebi_device *gluebi;
211 struct ubi_device *ubi;
212
213 dbg_gen("erase %llu bytes at offset %llu", (unsigned long long)instr->len,
214 (unsigned long long)instr->addr);
215 266
216 if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) 267 if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
217 return -EINVAL; 268 return -EINVAL;
218
219 if (instr->len < 0 || instr->addr + instr->len > mtd->size) 269 if (instr->len < 0 || instr->addr + instr->len > mtd->size)
220 return -EINVAL; 270 return -EINVAL;
221
222 if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd)) 271 if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
223 return -EINVAL; 272 return -EINVAL;
224 273
225 lnum = mtd_div_by_eb(instr->addr, mtd); 274 lnum = mtd_div_by_eb(instr->addr, mtd);
226 count = mtd_div_by_eb(instr->len, mtd); 275 count = mtd_div_by_eb(instr->len, mtd);
227 276
228 vol = container_of(mtd, struct ubi_volume, gluebi_mtd); 277 gluebi = container_of(mtd, struct gluebi_device, mtd);
229 ubi = vol->ubi;
230 278
231 if (ubi->ro_mode) 279 if (!(mtd->flags & MTD_WRITEABLE))
232 return -EROFS; 280 return -EROFS;
233 281
234 for (i = 0; i < count; i++) { 282 for (i = 0; i < count - 1; i++) {
235 err = ubi_eba_unmap_leb(ubi, vol, lnum + i); 283 err = ubi_leb_unmap(gluebi->desc, lnum + i);
236 if (err) 284 if (err)
237 goto out_err; 285 goto out_err;
238 } 286 }
239
240 /* 287 /*
241 * MTD erase operations are synchronous, so we have to make sure the 288 * MTD erase operations are synchronous, so we have to make sure the
242 * physical eraseblock is wiped out. 289 * physical eraseblock is wiped out.
290 *
291 * Thus, perform leb_erase instead of leb_unmap operation - leb_erase
292 * will wait for the end of operations
243 */ 293 */
244 err = ubi_wl_flush(ubi); 294 err = ubi_leb_erase(gluebi->desc, lnum + i);
245 if (err) 295 if (err)
246 goto out_err; 296 goto out_err;
247 297
@@ -256,28 +306,38 @@ out_err:
256} 306}
257 307
258/** 308/**
259 * ubi_create_gluebi - initialize gluebi for an UBI volume. 309 * gluebi_create - create a gluebi device for an UBI volume.
260 * @ubi: UBI device description object 310 * @di: UBI device description object
261 * @vol: volume description object 311 * @vi: UBI volume description object
262 * 312 *
263 * This function is called when an UBI volume is created in order to create 313 * This function is called when a new UBI volume is created in order to create
264 * corresponding fake MTD device. Returns zero in case of success and a 314 * corresponding fake MTD device. Returns zero in case of success and a
265 * negative error code in case of failure. 315 * negative error code in case of failure.
266 */ 316 */
267int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol) 317static int gluebi_create(struct ubi_device_info *di,
318 struct ubi_volume_info *vi)
268{ 319{
269 struct mtd_info *mtd = &vol->gluebi_mtd; 320 struct gluebi_device *gluebi, *g;
321 struct mtd_info *mtd;
270 322
271 mtd->name = kmemdup(vol->name, vol->name_len + 1, GFP_KERNEL); 323 gluebi = kzalloc(sizeof(struct gluebi_device), GFP_KERNEL);
272 if (!mtd->name) 324 if (!gluebi)
273 return -ENOMEM; 325 return -ENOMEM;
274 326
327 mtd = &gluebi->mtd;
328 mtd->name = kmemdup(vi->name, vi->name_len + 1, GFP_KERNEL);
329 if (!mtd->name) {
330 kfree(gluebi);
331 return -ENOMEM;
332 }
333
334 gluebi->vol_id = vi->vol_id;
275 mtd->type = MTD_UBIVOLUME; 335 mtd->type = MTD_UBIVOLUME;
276 if (!ubi->ro_mode) 336 if (!di->ro_mode)
277 mtd->flags = MTD_WRITEABLE; 337 mtd->flags = MTD_WRITEABLE;
278 mtd->writesize = ubi->min_io_size;
279 mtd->owner = THIS_MODULE; 338 mtd->owner = THIS_MODULE;
280 mtd->erasesize = vol->usable_leb_size; 339 mtd->writesize = di->min_io_size;
340 mtd->erasesize = vi->usable_leb_size;
281 mtd->read = gluebi_read; 341 mtd->read = gluebi_read;
282 mtd->write = gluebi_write; 342 mtd->write = gluebi_write;
283 mtd->erase = gluebi_erase; 343 mtd->erase = gluebi_erase;
@@ -285,60 +345,196 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol)
285 mtd->put_device = gluebi_put_device; 345 mtd->put_device = gluebi_put_device;
286 346
287 /* 347 /*
288 * In case of dynamic volume, MTD device size is just volume size. In 348 * In case of dynamic a volume, MTD device size is just volume size. In
289 * case of a static volume the size is equivalent to the amount of data 349 * case of a static volume the size is equivalent to the amount of data
290 * bytes. 350 * bytes.
291 */ 351 */
292 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 352 if (vi->vol_type == UBI_DYNAMIC_VOLUME)
293 mtd->size = (long long)vol->usable_leb_size * vol->reserved_pebs; 353 mtd->size = (unsigned long long)vi->usable_leb_size * vi->size;
294 else 354 else
295 mtd->size = vol->used_bytes; 355 mtd->size = vi->used_bytes;
356
357 /* Just a sanity check - make sure this gluebi device does not exist */
358 mutex_lock(&devices_mutex);
359 g = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
360 if (g)
361 err_msg("gluebi MTD device %d form UBI device %d volume %d "
362 "already exists", g->mtd.index, vi->ubi_num,
363 vi->vol_id);
364 mutex_unlock(&devices_mutex);
296 365
297 if (add_mtd_device(mtd)) { 366 if (add_mtd_device(mtd)) {
298 ubi_err("cannot not add MTD device"); 367 err_msg("cannot add MTD device");
299 kfree(mtd->name); 368 kfree(mtd->name);
369 kfree(gluebi);
300 return -ENFILE; 370 return -ENFILE;
301 } 371 }
302 372
303 dbg_gen("added mtd%d (\"%s\"), size %llu, EB size %u", 373 mutex_lock(&devices_mutex);
304 mtd->index, mtd->name, (unsigned long long)mtd->size, mtd->erasesize); 374 list_add_tail(&gluebi->list, &gluebi_devices);
375 mutex_unlock(&devices_mutex);
305 return 0; 376 return 0;
306} 377}
307 378
308/** 379/**
309 * ubi_destroy_gluebi - close gluebi for an UBI volume. 380 * gluebi_remove - remove a gluebi device.
310 * @vol: volume description object 381 * @vi: UBI volume description object
311 * 382 *
312 * This function is called when an UBI volume is removed in order to remove 383 * This function is called when an UBI volume is removed and it removes
313 * corresponding fake MTD device. Returns zero in case of success and a 384 * corresponding fake MTD device. Returns zero in case of success and a
314 * negative error code in case of failure. 385 * negative error code in case of failure.
315 */ 386 */
316int ubi_destroy_gluebi(struct ubi_volume *vol) 387static int gluebi_remove(struct ubi_volume_info *vi)
317{ 388{
318 int err; 389 int err = 0;
319 struct mtd_info *mtd = &vol->gluebi_mtd; 390 struct mtd_info *mtd;
391 struct gluebi_device *gluebi;
392
393 mutex_lock(&devices_mutex);
394 gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
395 if (!gluebi) {
396 err_msg("got remove notification for unknown UBI device %d "
397 "volume %d", vi->ubi_num, vi->vol_id);
398 err = -ENOENT;
399 } else if (gluebi->refcnt)
400 err = -EBUSY;
401 else
402 list_del(&gluebi->list);
403 mutex_unlock(&devices_mutex);
404 if (err)
405 return err;
320 406
321 dbg_gen("remove mtd%d", mtd->index); 407 mtd = &gluebi->mtd;
322 err = del_mtd_device(mtd); 408 err = del_mtd_device(mtd);
323 if (err) 409 if (err) {
410 err_msg("cannot remove fake MTD device %d, UBI device %d, "
411 "volume %d, error %d", mtd->index, gluebi->ubi_num,
412 gluebi->vol_id, err);
413 mutex_lock(&devices_mutex);
414 list_add_tail(&gluebi->list, &gluebi_devices);
415 mutex_unlock(&devices_mutex);
324 return err; 416 return err;
417 }
418
325 kfree(mtd->name); 419 kfree(mtd->name);
420 kfree(gluebi);
326 return 0; 421 return 0;
327} 422}
328 423
329/** 424/**
330 * ubi_gluebi_updated - UBI volume was updated notifier. 425 * gluebi_updated - UBI volume was updated notifier.
331 * @vol: volume description object 426 * @vi: volume info structure
332 * 427 *
333 * This function is called every time an UBI volume is updated. This function 428 * This function is called every time an UBI volume is updated. It does nothing
334 * does nothing if volume @vol is dynamic, and changes MTD device size if the 429 * if te volume @vol is dynamic, and changes MTD device size if the
335 * volume is static. This is needed because static volumes cannot be read past 430 * volume is static. This is needed because static volumes cannot be read past
336 * data they contain. 431 * data they contain. This function returns zero in case of success and a
432 * negative error code in case of error.
433 */
434static int gluebi_updated(struct ubi_volume_info *vi)
435{
436 struct gluebi_device *gluebi;
437
438 mutex_lock(&devices_mutex);
439 gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
440 if (!gluebi) {
441 mutex_unlock(&devices_mutex);
442 err_msg("got update notification for unknown UBI device %d "
443 "volume %d", vi->ubi_num, vi->vol_id);
444 return -ENOENT;
445 }
446
447 if (vi->vol_type == UBI_STATIC_VOLUME)
448 gluebi->mtd.size = vi->used_bytes;
449 mutex_unlock(&devices_mutex);
450 return 0;
451}
452
453/**
454 * gluebi_resized - UBI volume was re-sized notifier.
455 * @vi: volume info structure
456 *
457 * This function is called every time an UBI volume is re-size. It changes the
458 * corresponding fake MTD device size. This function returns zero in case of
459 * success and a negative error code in case of error.
460 */
461static int gluebi_resized(struct ubi_volume_info *vi)
462{
463 struct gluebi_device *gluebi;
464
465 mutex_lock(&devices_mutex);
466 gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
467 if (!gluebi) {
468 mutex_unlock(&devices_mutex);
469 err_msg("got update notification for unknown UBI device %d "
470 "volume %d", vi->ubi_num, vi->vol_id);
471 return -ENOENT;
472 }
473 gluebi->mtd.size = vi->used_bytes;
474 mutex_unlock(&devices_mutex);
475 return 0;
476}
477
478/**
479 * gluebi_notify - UBI notification handler.
480 * @nb: registered notifier block
481 * @l: notification type
482 * @ptr: pointer to the &struct ubi_notification object
337 */ 483 */
338void ubi_gluebi_updated(struct ubi_volume *vol) 484static int gluebi_notify(struct notifier_block *nb, unsigned long l,
485 void *ns_ptr)
339{ 486{
340 struct mtd_info *mtd = &vol->gluebi_mtd; 487 struct ubi_notification *nt = ns_ptr;
488
489 switch (l) {
490 case UBI_VOLUME_ADDED:
491 gluebi_create(&nt->di, &nt->vi);
492 break;
493 case UBI_VOLUME_REMOVED:
494 gluebi_remove(&nt->vi);
495 break;
496 case UBI_VOLUME_RESIZED:
497 gluebi_resized(&nt->vi);
498 break;
499 case UBI_VOLUME_UPDATED:
500 gluebi_updated(&nt->vi);
501 break;
502 default:
503 break;
504 }
505 return NOTIFY_OK;
506}
341 507
342 if (vol->vol_type == UBI_STATIC_VOLUME) 508static struct notifier_block gluebi_notifier = {
343 mtd->size = vol->used_bytes; 509 .notifier_call = gluebi_notify,
510};
511
512static int __init ubi_gluebi_init(void)
513{
514 return ubi_register_volume_notifier(&gluebi_notifier, 0);
344} 515}
516
517static void __exit ubi_gluebi_exit(void)
518{
519 struct gluebi_device *gluebi, *g;
520
521 list_for_each_entry_safe(gluebi, g, &gluebi_devices, list) {
522 int err;
523 struct mtd_info *mtd = &gluebi->mtd;
524
525 err = del_mtd_device(mtd);
526 if (err)
527 err_msg("error %d while removing gluebi MTD device %d, "
528 "UBI device %d, volume %d - ignoring", err,
529 mtd->index, gluebi->ubi_num, gluebi->vol_id);
530 kfree(mtd->name);
531 kfree(gluebi);
532 }
533 ubi_unregister_volume_notifier(&gluebi_notifier);
534}
535
536module_init(ubi_gluebi_init);
537module_exit(ubi_gluebi_exit);
538MODULE_DESCRIPTION("MTD emulation layer over UBI volumes");
539MODULE_AUTHOR("Artem Bityutskiy, Joern Engel");
540MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index fe81039f2a7c..effaff28bab1 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -100,6 +100,7 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
100 const struct ubi_vid_hdr *vid_hdr); 100 const struct ubi_vid_hdr *vid_hdr);
101static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, 101static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
102 int len); 102 int len);
103static int paranoid_check_empty(struct ubi_device *ubi, int pnum);
103#else 104#else
104#define paranoid_check_not_bad(ubi, pnum) 0 105#define paranoid_check_not_bad(ubi, pnum) 0
105#define paranoid_check_peb_ec_hdr(ubi, pnum) 0 106#define paranoid_check_peb_ec_hdr(ubi, pnum) 0
@@ -107,6 +108,7 @@ static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
107#define paranoid_check_peb_vid_hdr(ubi, pnum) 0 108#define paranoid_check_peb_vid_hdr(ubi, pnum) 0
108#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0 109#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0
109#define paranoid_check_all_ff(ubi, pnum, offset, len) 0 110#define paranoid_check_all_ff(ubi, pnum, offset, len) 0
111#define paranoid_check_empty(ubi, pnum) 0
110#endif 112#endif
111 113
112/** 114/**
@@ -670,11 +672,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
670 if (read_err != -EBADMSG && 672 if (read_err != -EBADMSG &&
671 check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { 673 check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
672 /* The physical eraseblock is supposedly empty */ 674 /* The physical eraseblock is supposedly empty */
673
674 /*
675 * The below is just a paranoid check, it has to be
676 * compiled out if paranoid checks are disabled.
677 */
678 err = paranoid_check_all_ff(ubi, pnum, 0, 675 err = paranoid_check_all_ff(ubi, pnum, 0,
679 ubi->peb_size); 676 ubi->peb_size);
680 if (err) 677 if (err)
@@ -902,7 +899,7 @@ bad:
902 * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected 899 * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
903 * and corrected by the flash driver; this is harmless but may indicate that 900 * and corrected by the flash driver; this is harmless but may indicate that
904 * this eraseblock may become bad soon; 901 * this eraseblock may become bad soon;
905 * o %UBI_IO_BAD_VID_HRD if the volume identifier header is corrupted (a CRC 902 * o %UBI_IO_BAD_VID_HDR if the volume identifier header is corrupted (a CRC
906 * error detected); 903 * error detected);
907 * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID 904 * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID
908 * header there); 905 * header there);
@@ -955,8 +952,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
955 * The below is just a paranoid check, it has to be 952 * The below is just a paranoid check, it has to be
956 * compiled out if paranoid checks are disabled. 953 * compiled out if paranoid checks are disabled.
957 */ 954 */
958 err = paranoid_check_all_ff(ubi, pnum, ubi->leb_start, 955 err = paranoid_check_empty(ubi, pnum);
959 ubi->leb_size);
960 if (err) 956 if (err)
961 return err > 0 ? UBI_IO_BAD_VID_HDR : err; 957 return err > 0 ? UBI_IO_BAD_VID_HDR : err;
962 958
@@ -1280,4 +1276,74 @@ error:
1280 return err; 1276 return err;
1281} 1277}
1282 1278
1279/**
1280 * paranoid_check_empty - whether a PEB is empty.
1281 * @ubi: UBI device description object
1282 * @pnum: the physical eraseblock number to check
1283 *
1284 * This function makes sure PEB @pnum is empty, which means it contains only
1285 * %0xFF data bytes. Returns zero if the PEB is empty, %1 if not, and a
1286 * negative error code in case of failure.
1287 *
1288 * Empty PEBs have the EC header, and do not have the VID header. The caller of
1289 * this function should have already made sure the PEB does not have the VID
1290 * header. However, this function re-checks that, because it is possible that
1291 * the header and data has already been written to the PEB.
1292 *
1293 * Let's consider a possible scenario. Suppose there are 2 tasks - A and B.
1294 * Task A is in 'wear_leveling_worker()'. It is reading VID header of PEB X to
1295 * find which LEB it corresponds to. PEB X is currently unmapped, and has no
1296 * VID header. Task B is trying to write to PEB X.
1297 *
1298 * Task A: in 'ubi_io_read_vid_hdr()': reads the VID header from PEB X. The
1299 * read data contain all 0xFF bytes;
1300 * Task B: writes VID header and some data to PEB X;
1301 * Task A: assumes PEB X is empty, calls 'paranoid_check_empty()'. And if we
1302 * do not re-read the VID header, and do not cancel the checking if it
1303 * is there, we fail.
1304 */
1305static int paranoid_check_empty(struct ubi_device *ubi, int pnum)
1306{
1307 int err, offs = ubi->vid_hdr_aloffset, len = ubi->vid_hdr_alsize;
1308 size_t read;
1309 uint32_t magic;
1310 const struct ubi_vid_hdr *vid_hdr;
1311
1312 mutex_lock(&ubi->dbg_buf_mutex);
1313 err = ubi->mtd->read(ubi->mtd, offs, len, &read, ubi->dbg_peb_buf);
1314 if (err && err != -EUCLEAN) {
1315 ubi_err("error %d while reading %d bytes from PEB %d:%d, "
1316 "read %zd bytes", err, len, pnum, offs, read);
1317 goto error;
1318 }
1319
1320 vid_hdr = ubi->dbg_peb_buf;
1321 magic = be32_to_cpu(vid_hdr->magic);
1322 if (magic == UBI_VID_HDR_MAGIC)
1323 /* The PEB contains VID header, so it is not empty */
1324 goto out;
1325
1326 err = check_pattern(ubi->dbg_peb_buf, 0xFF, len);
1327 if (err == 0) {
1328 ubi_err("flash region at PEB %d:%d, length %d does not "
1329 "contain all 0xFF bytes", pnum, offs, len);
1330 goto fail;
1331 }
1332
1333out:
1334 mutex_unlock(&ubi->dbg_buf_mutex);
1335 return 0;
1336
1337fail:
1338 ubi_err("paranoid check failed for PEB %d", pnum);
1339 ubi_msg("hex dump of the %d-%d region", offs, offs + len);
1340 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1341 ubi->dbg_peb_buf, len, 1);
1342 err = 1;
1343error:
1344 ubi_dbg_dump_stack();
1345 mutex_unlock(&ubi->dbg_buf_mutex);
1346 return err;
1347}
1348
1283#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ 1349#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 4abbe573fa40..88a72e9c8beb 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -26,6 +26,24 @@
26#include "ubi.h" 26#include "ubi.h"
27 27
28/** 28/**
29 * ubi_do_get_device_info - get information about UBI device.
30 * @ubi: UBI device description object
31 * @di: the information is stored here
32 *
33 * This function is the same as 'ubi_get_device_info()', but it assumes the UBI
34 * device is locked and cannot disappear.
35 */
36void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di)
37{
38 di->ubi_num = ubi->ubi_num;
39 di->leb_size = ubi->leb_size;
40 di->min_io_size = ubi->min_io_size;
41 di->ro_mode = ubi->ro_mode;
42 di->cdev = ubi->cdev.dev;
43}
44EXPORT_SYMBOL_GPL(ubi_do_get_device_info);
45
46/**
29 * ubi_get_device_info - get information about UBI device. 47 * ubi_get_device_info - get information about UBI device.
30 * @ubi_num: UBI device number 48 * @ubi_num: UBI device number
31 * @di: the information is stored here 49 * @di: the information is stored here
@@ -39,33 +57,24 @@ int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
39 57
40 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 58 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
41 return -EINVAL; 59 return -EINVAL;
42
43 ubi = ubi_get_device(ubi_num); 60 ubi = ubi_get_device(ubi_num);
44 if (!ubi) 61 if (!ubi)
45 return -ENODEV; 62 return -ENODEV;
46 63 ubi_do_get_device_info(ubi, di);
47 di->ubi_num = ubi->ubi_num;
48 di->leb_size = ubi->leb_size;
49 di->min_io_size = ubi->min_io_size;
50 di->ro_mode = ubi->ro_mode;
51 di->cdev = ubi->cdev.dev;
52
53 ubi_put_device(ubi); 64 ubi_put_device(ubi);
54 return 0; 65 return 0;
55} 66}
56EXPORT_SYMBOL_GPL(ubi_get_device_info); 67EXPORT_SYMBOL_GPL(ubi_get_device_info);
57 68
58/** 69/**
59 * ubi_get_volume_info - get information about UBI volume. 70 * ubi_do_get_volume_info - get information about UBI volume.
60 * @desc: volume descriptor 71 * @ubi: UBI device description object
72 * @vol: volume description object
61 * @vi: the information is stored here 73 * @vi: the information is stored here
62 */ 74 */
63void ubi_get_volume_info(struct ubi_volume_desc *desc, 75void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
64 struct ubi_volume_info *vi) 76 struct ubi_volume_info *vi)
65{ 77{
66 const struct ubi_volume *vol = desc->vol;
67 const struct ubi_device *ubi = vol->ubi;
68
69 vi->vol_id = vol->vol_id; 78 vi->vol_id = vol->vol_id;
70 vi->ubi_num = ubi->ubi_num; 79 vi->ubi_num = ubi->ubi_num;
71 vi->size = vol->reserved_pebs; 80 vi->size = vol->reserved_pebs;
@@ -79,6 +88,17 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc,
79 vi->name = vol->name; 88 vi->name = vol->name;
80 vi->cdev = vol->cdev.dev; 89 vi->cdev = vol->cdev.dev;
81} 90}
91
92/**
93 * ubi_get_volume_info - get information about UBI volume.
94 * @desc: volume descriptor
95 * @vi: the information is stored here
96 */
97void ubi_get_volume_info(struct ubi_volume_desc *desc,
98 struct ubi_volume_info *vi)
99{
100 ubi_do_get_volume_info(desc->vol->ubi, desc->vol, vi);
101}
82EXPORT_SYMBOL_GPL(ubi_get_volume_info); 102EXPORT_SYMBOL_GPL(ubi_get_volume_info);
83 103
84/** 104/**
@@ -106,7 +126,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
106 struct ubi_device *ubi; 126 struct ubi_device *ubi;
107 struct ubi_volume *vol; 127 struct ubi_volume *vol;
108 128
109 dbg_gen("open device %d volume %d, mode %d", ubi_num, vol_id, mode); 129 dbg_gen("open device %d, volume %d, mode %d", ubi_num, vol_id, mode);
110 130
111 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 131 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
112 return ERR_PTR(-EINVAL); 132 return ERR_PTR(-EINVAL);
@@ -196,6 +216,8 @@ out_free:
196 kfree(desc); 216 kfree(desc);
197out_put_ubi: 217out_put_ubi:
198 ubi_put_device(ubi); 218 ubi_put_device(ubi);
219 dbg_err("cannot open device %d, volume %d, error %d",
220 ubi_num, vol_id, err);
199 return ERR_PTR(err); 221 return ERR_PTR(err);
200} 222}
201EXPORT_SYMBOL_GPL(ubi_open_volume); 223EXPORT_SYMBOL_GPL(ubi_open_volume);
@@ -215,7 +237,7 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
215 struct ubi_device *ubi; 237 struct ubi_device *ubi;
216 struct ubi_volume_desc *ret; 238 struct ubi_volume_desc *ret;
217 239
218 dbg_gen("open volume %s, mode %d", name, mode); 240 dbg_gen("open device %d, volume %s, mode %d", ubi_num, name, mode);
219 241
220 if (!name) 242 if (!name)
221 return ERR_PTR(-EINVAL); 243 return ERR_PTR(-EINVAL);
@@ -266,7 +288,8 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
266 struct ubi_volume *vol = desc->vol; 288 struct ubi_volume *vol = desc->vol;
267 struct ubi_device *ubi = vol->ubi; 289 struct ubi_device *ubi = vol->ubi;
268 290
269 dbg_gen("close volume %d, mode %d", vol->vol_id, desc->mode); 291 dbg_gen("close device %d, volume %d, mode %d",
292 ubi->ubi_num, vol->vol_id, desc->mode);
270 293
271 spin_lock(&ubi->volumes_lock); 294 spin_lock(&ubi->volumes_lock);
272 switch (desc->mode) { 295 switch (desc->mode) {
@@ -558,7 +581,7 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
558EXPORT_SYMBOL_GPL(ubi_leb_unmap); 581EXPORT_SYMBOL_GPL(ubi_leb_unmap);
559 582
560/** 583/**
561 * ubi_leb_map - map logical erasblock to a physical eraseblock. 584 * ubi_leb_map - map logical eraseblock to a physical eraseblock.
562 * @desc: volume descriptor 585 * @desc: volume descriptor
563 * @lnum: logical eraseblock number 586 * @lnum: logical eraseblock number
564 * @dtype: expected data type 587 * @dtype: expected data type
@@ -656,3 +679,59 @@ int ubi_sync(int ubi_num)
656 return 0; 679 return 0;
657} 680}
658EXPORT_SYMBOL_GPL(ubi_sync); 681EXPORT_SYMBOL_GPL(ubi_sync);
682
683BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
684
685/**
686 * ubi_register_volume_notifier - register a volume notifier.
687 * @nb: the notifier description object
688 * @ignore_existing: if non-zero, do not send "added" notification for all
689 * already existing volumes
690 *
691 * This function registers a volume notifier, which means that
692 * 'nb->notifier_call()' will be invoked when an UBI volume is created,
693 * removed, re-sized, re-named, or updated. The first argument of the function
694 * is the notification type. The second argument is pointer to a
695 * &struct ubi_notification object which describes the notification event.
696 * Using UBI API from the volume notifier is prohibited.
697 *
698 * This function returns zero in case of success and a negative error code
699 * in case of failure.
700 */
701int ubi_register_volume_notifier(struct notifier_block *nb,
702 int ignore_existing)
703{
704 int err;
705
706 err = blocking_notifier_chain_register(&ubi_notifiers, nb);
707 if (err != 0)
708 return err;
709 if (ignore_existing)
710 return 0;
711
712 /*
713 * We are going to walk all UBI devices and all volumes, and
714 * notify the user about existing volumes by the %UBI_VOLUME_ADDED
715 * event. We have to lock the @ubi_devices_mutex to make sure UBI
716 * devices do not disappear.
717 */
718 mutex_lock(&ubi_devices_mutex);
719 ubi_enumerate_volumes(nb);
720 mutex_unlock(&ubi_devices_mutex);
721
722 return err;
723}
724EXPORT_SYMBOL_GPL(ubi_register_volume_notifier);
725
726/**
727 * ubi_unregister_volume_notifier - unregister the volume notifier.
728 * @nb: the notifier description object
729 *
730 * This function unregisters volume notifier @nm and returns zero in case of
731 * success and a negative error code in case of failure.
732 */
733int ubi_unregister_volume_notifier(struct notifier_block *nb)
734{
735 return blocking_notifier_chain_unregister(&ubi_notifiers, nb);
736}
737EXPORT_SYMBOL_GPL(ubi_unregister_volume_notifier);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index c055511bb1b2..28acd133c997 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -36,6 +36,7 @@
36#include <linux/device.h> 36#include <linux/device.h>
37#include <linux/string.h> 37#include <linux/string.h>
38#include <linux/vmalloc.h> 38#include <linux/vmalloc.h>
39#include <linux/notifier.h>
39#include <linux/mtd/mtd.h> 40#include <linux/mtd/mtd.h>
40#include <linux/mtd/ubi.h> 41#include <linux/mtd/ubi.h>
41 42
@@ -100,6 +101,28 @@ enum {
100 UBI_IO_BITFLIPS 101 UBI_IO_BITFLIPS
101}; 102};
102 103
104/*
105 * Return codes of the 'ubi_eba_copy_leb()' function.
106 *
107 * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source
108 * PEB was put meanwhile, or there is I/O on the source PEB
109 * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source
110 * PEB
111 * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target
112 * PEB
113 * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target
114 * PEB
115 * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
116 * target PEB
117 */
118enum {
119 MOVE_CANCEL_RACE = 1,
120 MOVE_SOURCE_RD_ERR,
121 MOVE_TARGET_RD_ERR,
122 MOVE_TARGET_WR_ERR,
123 MOVE_CANCEL_BITFLIPS,
124};
125
103/** 126/**
104 * struct ubi_wl_entry - wear-leveling entry. 127 * struct ubi_wl_entry - wear-leveling entry.
105 * @u.rb: link in the corresponding (free/used) RB-tree 128 * @u.rb: link in the corresponding (free/used) RB-tree
@@ -208,10 +231,6 @@ struct ubi_volume_desc;
208 * @changing_leb: %1 if the atomic LEB change ioctl command is in progress 231 * @changing_leb: %1 if the atomic LEB change ioctl command is in progress
209 * @direct_writes: %1 if direct writes are enabled for this volume 232 * @direct_writes: %1 if direct writes are enabled for this volume
210 * 233 *
211 * @gluebi_desc: gluebi UBI volume descriptor
212 * @gluebi_refcount: reference count of the gluebi MTD device
213 * @gluebi_mtd: MTD device description object of the gluebi MTD device
214 *
215 * The @corrupted field indicates that the volume's contents is corrupted. 234 * The @corrupted field indicates that the volume's contents is corrupted.
216 * Since UBI protects only static volumes, this field is not relevant to 235 * Since UBI protects only static volumes, this field is not relevant to
217 * dynamic volumes - it is user's responsibility to assure their data 236 * dynamic volumes - it is user's responsibility to assure their data
@@ -255,17 +274,6 @@ struct ubi_volume {
255 unsigned int updating:1; 274 unsigned int updating:1;
256 unsigned int changing_leb:1; 275 unsigned int changing_leb:1;
257 unsigned int direct_writes:1; 276 unsigned int direct_writes:1;
258
259#ifdef CONFIG_MTD_UBI_GLUEBI
260 /*
261 * Gluebi-related stuff may be compiled out.
262 * Note: this should not be built into UBI but should be a separate
263 * ubimtd driver which works on top of UBI and emulates MTD devices.
264 */
265 struct ubi_volume_desc *gluebi_desc;
266 int gluebi_refcount;
267 struct mtd_info gluebi_mtd;
268#endif
269}; 277};
270 278
271/** 279/**
@@ -305,9 +313,9 @@ struct ubi_wl_entry;
305 * @vtbl_slots: how many slots are available in the volume table 313 * @vtbl_slots: how many slots are available in the volume table
306 * @vtbl_size: size of the volume table in bytes 314 * @vtbl_size: size of the volume table in bytes
307 * @vtbl: in-RAM volume table copy 315 * @vtbl: in-RAM volume table copy
308 * @volumes_mutex: protects on-flash volume table and serializes volume 316 * @device_mutex: protects on-flash volume table and serializes volume
309 * changes, like creation, deletion, update, re-size, 317 * creation, deletion, update, re-size, re-name and set
310 * re-name and set property 318 * property
311 * 319 *
312 * @max_ec: current highest erase counter value 320 * @max_ec: current highest erase counter value
313 * @mean_ec: current mean erase counter value 321 * @mean_ec: current mean erase counter value
@@ -318,14 +326,15 @@ struct ubi_wl_entry;
318 * @alc_mutex: serializes "atomic LEB change" operations 326 * @alc_mutex: serializes "atomic LEB change" operations
319 * 327 *
320 * @used: RB-tree of used physical eraseblocks 328 * @used: RB-tree of used physical eraseblocks
329 * @erroneous: RB-tree of erroneous used physical eraseblocks
321 * @free: RB-tree of free physical eraseblocks 330 * @free: RB-tree of free physical eraseblocks
322 * @scrub: RB-tree of physical eraseblocks which need scrubbing 331 * @scrub: RB-tree of physical eraseblocks which need scrubbing
323 * @pq: protection queue (contain physical eraseblocks which are temporarily 332 * @pq: protection queue (contain physical eraseblocks which are temporarily
324 * protected from the wear-leveling worker) 333 * protected from the wear-leveling worker)
325 * @pq_head: protection queue head 334 * @pq_head: protection queue head
326 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, 335 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
327 * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works 336 * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
328 * fields 337 * @erroneous, and @erroneous_peb_count fields
329 * @move_mutex: serializes eraseblock moves 338 * @move_mutex: serializes eraseblock moves
330 * @work_sem: synchronizes the WL worker with use tasks 339 * @work_sem: synchronizes the WL worker with use tasks
331 * @wl_scheduled: non-zero if the wear-leveling was scheduled 340 * @wl_scheduled: non-zero if the wear-leveling was scheduled
@@ -339,12 +348,15 @@ struct ubi_wl_entry;
339 * @bgt_thread: background thread description object 348 * @bgt_thread: background thread description object
340 * @thread_enabled: if the background thread is enabled 349 * @thread_enabled: if the background thread is enabled
341 * @bgt_name: background thread name 350 * @bgt_name: background thread name
351 * @reboot_notifier: notifier to terminate background thread before rebooting
342 * 352 *
343 * @flash_size: underlying MTD device size (in bytes) 353 * @flash_size: underlying MTD device size (in bytes)
344 * @peb_count: count of physical eraseblocks on the MTD device 354 * @peb_count: count of physical eraseblocks on the MTD device
345 * @peb_size: physical eraseblock size 355 * @peb_size: physical eraseblock size
346 * @bad_peb_count: count of bad physical eraseblocks 356 * @bad_peb_count: count of bad physical eraseblocks
347 * @good_peb_count: count of good physical eraseblocks 357 * @good_peb_count: count of good physical eraseblocks
358 * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous
359 * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks
348 * @min_io_size: minimal input/output unit size of the underlying MTD device 360 * @min_io_size: minimal input/output unit size of the underlying MTD device
349 * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers 361 * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers
350 * @ro_mode: if the UBI device is in read-only mode 362 * @ro_mode: if the UBI device is in read-only mode
@@ -366,7 +378,6 @@ struct ubi_wl_entry;
366 * @peb_buf2: another buffer of PEB size used for different purposes 378 * @peb_buf2: another buffer of PEB size used for different purposes
367 * @buf_mutex: protects @peb_buf1 and @peb_buf2 379 * @buf_mutex: protects @peb_buf1 and @peb_buf2
368 * @ckvol_mutex: serializes static volume checking when opening 380 * @ckvol_mutex: serializes static volume checking when opening
369 * @mult_mutex: serializes operations on multiple volumes, like re-naming
370 * @dbg_peb_buf: buffer of PEB size used for debugging 381 * @dbg_peb_buf: buffer of PEB size used for debugging
371 * @dbg_buf_mutex: protects @dbg_peb_buf 382 * @dbg_buf_mutex: protects @dbg_peb_buf
372 */ 383 */
@@ -389,7 +400,7 @@ struct ubi_device {
389 int vtbl_slots; 400 int vtbl_slots;
390 int vtbl_size; 401 int vtbl_size;
391 struct ubi_vtbl_record *vtbl; 402 struct ubi_vtbl_record *vtbl;
392 struct mutex volumes_mutex; 403 struct mutex device_mutex;
393 404
394 int max_ec; 405 int max_ec;
395 /* Note, mean_ec is not updated run-time - should be fixed */ 406 /* Note, mean_ec is not updated run-time - should be fixed */
@@ -403,6 +414,7 @@ struct ubi_device {
403 414
404 /* Wear-leveling sub-system's stuff */ 415 /* Wear-leveling sub-system's stuff */
405 struct rb_root used; 416 struct rb_root used;
417 struct rb_root erroneous;
406 struct rb_root free; 418 struct rb_root free;
407 struct rb_root scrub; 419 struct rb_root scrub;
408 struct list_head pq[UBI_PROT_QUEUE_LEN]; 420 struct list_head pq[UBI_PROT_QUEUE_LEN];
@@ -420,6 +432,7 @@ struct ubi_device {
420 struct task_struct *bgt_thread; 432 struct task_struct *bgt_thread;
421 int thread_enabled; 433 int thread_enabled;
422 char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; 434 char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
435 struct notifier_block reboot_notifier;
423 436
424 /* I/O sub-system's stuff */ 437 /* I/O sub-system's stuff */
425 long long flash_size; 438 long long flash_size;
@@ -427,6 +440,8 @@ struct ubi_device {
427 int peb_size; 440 int peb_size;
428 int bad_peb_count; 441 int bad_peb_count;
429 int good_peb_count; 442 int good_peb_count;
443 int erroneous_peb_count;
444 int max_erroneous;
430 int min_io_size; 445 int min_io_size;
431 int hdrs_min_io_size; 446 int hdrs_min_io_size;
432 int ro_mode; 447 int ro_mode;
@@ -444,8 +459,7 @@ struct ubi_device {
444 void *peb_buf2; 459 void *peb_buf2;
445 struct mutex buf_mutex; 460 struct mutex buf_mutex;
446 struct mutex ckvol_mutex; 461 struct mutex ckvol_mutex;
447 struct mutex mult_mutex; 462#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
448#ifdef CONFIG_MTD_UBI_DEBUG
449 void *dbg_peb_buf; 463 void *dbg_peb_buf;
450 struct mutex dbg_buf_mutex; 464 struct mutex dbg_buf_mutex;
451#endif 465#endif
@@ -457,6 +471,7 @@ extern const struct file_operations ubi_cdev_operations;
457extern const struct file_operations ubi_vol_cdev_operations; 471extern const struct file_operations ubi_vol_cdev_operations;
458extern struct class *ubi_class; 472extern struct class *ubi_class;
459extern struct mutex ubi_devices_mutex; 473extern struct mutex ubi_devices_mutex;
474extern struct blocking_notifier_head ubi_notifiers;
460 475
461/* vtbl.c */ 476/* vtbl.c */
462int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, 477int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
@@ -489,17 +504,6 @@ int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
489int ubi_check_volume(struct ubi_device *ubi, int vol_id); 504int ubi_check_volume(struct ubi_device *ubi, int vol_id);
490void ubi_calculate_reserved(struct ubi_device *ubi); 505void ubi_calculate_reserved(struct ubi_device *ubi);
491 506
492/* gluebi.c */
493#ifdef CONFIG_MTD_UBI_GLUEBI
494int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol);
495int ubi_destroy_gluebi(struct ubi_volume *vol);
496void ubi_gluebi_updated(struct ubi_volume *vol);
497#else
498#define ubi_create_gluebi(ubi, vol) 0
499#define ubi_destroy_gluebi(vol) 0
500#define ubi_gluebi_updated(vol)
501#endif
502
503/* eba.c */ 507/* eba.c */
504int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, 508int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
505 int lnum); 509 int lnum);
@@ -549,6 +553,16 @@ struct ubi_device *ubi_get_device(int ubi_num);
549void ubi_put_device(struct ubi_device *ubi); 553void ubi_put_device(struct ubi_device *ubi);
550struct ubi_device *ubi_get_by_major(int major); 554struct ubi_device *ubi_get_by_major(int major);
551int ubi_major2num(int major); 555int ubi_major2num(int major);
556int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
557 int ntype);
558int ubi_notify_all(struct ubi_device *ubi, int ntype,
559 struct notifier_block *nb);
560int ubi_enumerate_volumes(struct notifier_block *nb);
561
562/* kapi.c */
563void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
564void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
565 struct ubi_volume_info *vi);
552 566
553/* 567/*
554 * ubi_rb_for_each_entry - walk an RB-tree. 568 * ubi_rb_for_each_entry - walk an RB-tree.
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 6b4d1ae891ae..74fdc40c8627 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -68,10 +68,10 @@ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
68 sizeof(struct ubi_vtbl_record)); 68 sizeof(struct ubi_vtbl_record));
69 vtbl_rec.upd_marker = 1; 69 vtbl_rec.upd_marker = 1;
70 70
71 mutex_lock(&ubi->volumes_mutex); 71 mutex_lock(&ubi->device_mutex);
72 err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); 72 err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
73 mutex_unlock(&ubi->volumes_mutex);
74 vol->upd_marker = 1; 73 vol->upd_marker = 1;
74 mutex_unlock(&ubi->device_mutex);
75 return err; 75 return err;
76} 76}
77 77
@@ -109,10 +109,10 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
109 vol->last_eb_bytes = vol->usable_leb_size; 109 vol->last_eb_bytes = vol->usable_leb_size;
110 } 110 }
111 111
112 mutex_lock(&ubi->volumes_mutex); 112 mutex_lock(&ubi->device_mutex);
113 err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); 113 err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
114 mutex_unlock(&ubi->volumes_mutex);
115 vol->upd_marker = 0; 114 vol->upd_marker = 0;
115 mutex_unlock(&ubi->device_mutex);
116 return err; 116 return err;
117} 117}
118 118
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index df5483562b7a..ab64cb56df6e 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -198,7 +198,7 @@ static void volume_sysfs_close(struct ubi_volume *vol)
198 * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume 198 * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
199 * and saves it in @req->vol_id. Returns zero in case of success and a negative 199 * and saves it in @req->vol_id. Returns zero in case of success and a negative
200 * error code in case of failure. Note, the caller has to have the 200 * error code in case of failure. Note, the caller has to have the
201 * @ubi->volumes_mutex locked. 201 * @ubi->device_mutex locked.
202 */ 202 */
203int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) 203int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
204{ 204{
@@ -232,8 +232,8 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
232 req->vol_id = vol_id; 232 req->vol_id = vol_id;
233 } 233 }
234 234
235 dbg_gen("volume ID %d, %llu bytes, type %d, name %s", 235 dbg_gen("create device %d, volume %d, %llu bytes, type %d, name %s",
236 vol_id, (unsigned long long)req->bytes, 236 ubi->ubi_num, vol_id, (unsigned long long)req->bytes,
237 (int)req->vol_type, req->name); 237 (int)req->vol_type, req->name);
238 238
239 /* Ensure that this volume does not exist */ 239 /* Ensure that this volume does not exist */
@@ -317,10 +317,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
317 goto out_mapping; 317 goto out_mapping;
318 } 318 }
319 319
320 err = ubi_create_gluebi(ubi, vol);
321 if (err)
322 goto out_cdev;
323
324 vol->dev.release = vol_release; 320 vol->dev.release = vol_release;
325 vol->dev.parent = &ubi->dev; 321 vol->dev.parent = &ubi->dev;
326 vol->dev.devt = dev; 322 vol->dev.devt = dev;
@@ -330,7 +326,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
330 err = device_register(&vol->dev); 326 err = device_register(&vol->dev);
331 if (err) { 327 if (err) {
332 ubi_err("cannot register device"); 328 ubi_err("cannot register device");
333 goto out_gluebi; 329 goto out_cdev;
334 } 330 }
335 331
336 err = volume_sysfs_init(ubi, vol); 332 err = volume_sysfs_init(ubi, vol);
@@ -358,7 +354,9 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
358 ubi->vol_count += 1; 354 ubi->vol_count += 1;
359 spin_unlock(&ubi->volumes_lock); 355 spin_unlock(&ubi->volumes_lock);
360 356
361 err = paranoid_check_volumes(ubi); 357 ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
358 if (paranoid_check_volumes(ubi))
359 dbg_err("check failed while creating volume %d", vol_id);
362 return err; 360 return err;
363 361
364out_sysfs: 362out_sysfs:
@@ -373,10 +371,6 @@ out_sysfs:
373 do_free = 0; 371 do_free = 0;
374 get_device(&vol->dev); 372 get_device(&vol->dev);
375 volume_sysfs_close(vol); 373 volume_sysfs_close(vol);
376out_gluebi:
377 if (ubi_destroy_gluebi(vol))
378 dbg_err("cannot destroy gluebi for volume %d:%d",
379 ubi->ubi_num, vol_id);
380out_cdev: 374out_cdev:
381 cdev_del(&vol->cdev); 375 cdev_del(&vol->cdev);
382out_mapping: 376out_mapping:
@@ -403,7 +397,7 @@ out_unlock:
403 * 397 *
404 * This function removes volume described by @desc. The volume has to be opened 398 * This function removes volume described by @desc. The volume has to be opened
405 * in "exclusive" mode. Returns zero in case of success and a negative error 399 * in "exclusive" mode. Returns zero in case of success and a negative error
406 * code in case of failure. The caller has to have the @ubi->volumes_mutex 400 * code in case of failure. The caller has to have the @ubi->device_mutex
407 * locked. 401 * locked.
408 */ 402 */
409int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) 403int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
@@ -412,7 +406,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
412 struct ubi_device *ubi = vol->ubi; 406 struct ubi_device *ubi = vol->ubi;
413 int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs; 407 int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs;
414 408
415 dbg_gen("remove UBI volume %d", vol_id); 409 dbg_gen("remove device %d, volume %d", ubi->ubi_num, vol_id);
416 ubi_assert(desc->mode == UBI_EXCLUSIVE); 410 ubi_assert(desc->mode == UBI_EXCLUSIVE);
417 ubi_assert(vol == ubi->volumes[vol_id]); 411 ubi_assert(vol == ubi->volumes[vol_id]);
418 412
@@ -431,10 +425,6 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
431 ubi->volumes[vol_id] = NULL; 425 ubi->volumes[vol_id] = NULL;
432 spin_unlock(&ubi->volumes_lock); 426 spin_unlock(&ubi->volumes_lock);
433 427
434 err = ubi_destroy_gluebi(vol);
435 if (err)
436 goto out_err;
437
438 if (!no_vtbl) { 428 if (!no_vtbl) {
439 err = ubi_change_vtbl_record(ubi, vol_id, NULL); 429 err = ubi_change_vtbl_record(ubi, vol_id, NULL);
440 if (err) 430 if (err)
@@ -465,8 +455,10 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
465 ubi->vol_count -= 1; 455 ubi->vol_count -= 1;
466 spin_unlock(&ubi->volumes_lock); 456 spin_unlock(&ubi->volumes_lock);
467 457
468 if (!no_vtbl) 458 ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
469 err = paranoid_check_volumes(ubi); 459 if (!no_vtbl && paranoid_check_volumes(ubi))
460 dbg_err("check failed while removing volume %d", vol_id);
461
470 return err; 462 return err;
471 463
472out_err: 464out_err:
@@ -485,7 +477,7 @@ out_unlock:
485 * 477 *
486 * This function re-sizes the volume and returns zero in case of success, and a 478 * This function re-sizes the volume and returns zero in case of success, and a
487 * negative error code in case of failure. The caller has to have the 479 * negative error code in case of failure. The caller has to have the
488 * @ubi->volumes_mutex locked. 480 * @ubi->device_mutex locked.
489 */ 481 */
490int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) 482int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
491{ 483{
@@ -498,8 +490,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
498 if (ubi->ro_mode) 490 if (ubi->ro_mode)
499 return -EROFS; 491 return -EROFS;
500 492
501 dbg_gen("re-size volume %d to from %d to %d PEBs", 493 dbg_gen("re-size device %d, volume %d to from %d to %d PEBs",
502 vol_id, vol->reserved_pebs, reserved_pebs); 494 ubi->ubi_num, vol_id, vol->reserved_pebs, reserved_pebs);
503 495
504 if (vol->vol_type == UBI_STATIC_VOLUME && 496 if (vol->vol_type == UBI_STATIC_VOLUME &&
505 reserved_pebs < vol->used_ebs) { 497 reserved_pebs < vol->used_ebs) {
@@ -587,7 +579,9 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
587 (long long)vol->used_ebs * vol->usable_leb_size; 579 (long long)vol->used_ebs * vol->usable_leb_size;
588 } 580 }
589 581
590 err = paranoid_check_volumes(ubi); 582 ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
583 if (paranoid_check_volumes(ubi))
584 dbg_err("check failed while re-sizing volume %d", vol_id);
591 return err; 585 return err;
592 586
593out_acc: 587out_acc:
@@ -632,11 +626,12 @@ int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
632 vol->name_len = re->new_name_len; 626 vol->name_len = re->new_name_len;
633 memcpy(vol->name, re->new_name, re->new_name_len + 1); 627 memcpy(vol->name, re->new_name, re->new_name_len + 1);
634 spin_unlock(&ubi->volumes_lock); 628 spin_unlock(&ubi->volumes_lock);
629 ubi_volume_notify(ubi, vol, UBI_VOLUME_RENAMED);
635 } 630 }
636 } 631 }
637 632
638 if (!err) 633 if (!err && paranoid_check_volumes(ubi))
639 err = paranoid_check_volumes(ubi); 634 ;
640 return err; 635 return err;
641} 636}
642 637
@@ -667,10 +662,6 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
667 return err; 662 return err;
668 } 663 }
669 664
670 err = ubi_create_gluebi(ubi, vol);
671 if (err)
672 goto out_cdev;
673
674 vol->dev.release = vol_release; 665 vol->dev.release = vol_release;
675 vol->dev.parent = &ubi->dev; 666 vol->dev.parent = &ubi->dev;
676 vol->dev.devt = dev; 667 vol->dev.devt = dev;
@@ -678,21 +669,19 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
678 dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id); 669 dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
679 err = device_register(&vol->dev); 670 err = device_register(&vol->dev);
680 if (err) 671 if (err)
681 goto out_gluebi; 672 goto out_cdev;
682 673
683 err = volume_sysfs_init(ubi, vol); 674 err = volume_sysfs_init(ubi, vol);
684 if (err) { 675 if (err) {
685 cdev_del(&vol->cdev); 676 cdev_del(&vol->cdev);
686 err = ubi_destroy_gluebi(vol);
687 volume_sysfs_close(vol); 677 volume_sysfs_close(vol);
688 return err; 678 return err;
689 } 679 }
690 680
691 err = paranoid_check_volumes(ubi); 681 if (paranoid_check_volumes(ubi))
682 dbg_err("check failed while adding volume %d", vol_id);
692 return err; 683 return err;
693 684
694out_gluebi:
695 err = ubi_destroy_gluebi(vol);
696out_cdev: 685out_cdev:
697 cdev_del(&vol->cdev); 686 cdev_del(&vol->cdev);
698 return err; 687 return err;
@@ -708,12 +697,9 @@ out_cdev:
708 */ 697 */
709void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol) 698void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
710{ 699{
711 int err;
712
713 dbg_gen("free volume %d", vol->vol_id); 700 dbg_gen("free volume %d", vol->vol_id);
714 701
715 ubi->volumes[vol->vol_id] = NULL; 702 ubi->volumes[vol->vol_id] = NULL;
716 err = ubi_destroy_gluebi(vol);
717 cdev_del(&vol->cdev); 703 cdev_del(&vol->cdev);
718 volume_sysfs_close(vol); 704 volume_sysfs_close(vol);
719} 705}
@@ -868,6 +854,7 @@ fail:
868 if (vol) 854 if (vol)
869 ubi_dbg_dump_vol_info(vol); 855 ubi_dbg_dump_vol_info(vol);
870 ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id); 856 ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
857 dump_stack();
871 spin_unlock(&ubi->volumes_lock); 858 spin_unlock(&ubi->volumes_lock);
872 return -EINVAL; 859 return -EINVAL;
873} 860}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 891534f8210d..2b2472300610 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -55,8 +55,8 @@
55 * 55 *
56 * As it was said, for the UBI sub-system all physical eraseblocks are either 56 * As it was said, for the UBI sub-system all physical eraseblocks are either
57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while 57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
58 * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or 58 * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
59 * (temporarily) in the @wl->pq queue. 59 * RB-trees, as well as (temporarily) in the @wl->pq queue.
60 * 60 *
61 * When the WL sub-system returns a physical eraseblock, the physical 61 * When the WL sub-system returns a physical eraseblock, the physical
62 * eraseblock is protected from being moved for some "time". For this reason, 62 * eraseblock is protected from being moved for some "time". For this reason,
@@ -83,6 +83,8 @@
83 * used. The former state corresponds to the @wl->free tree. The latter state 83 * used. The former state corresponds to the @wl->free tree. The latter state
84 * is split up on several sub-states: 84 * is split up on several sub-states:
85 * o the WL movement is allowed (@wl->used tree); 85 * o the WL movement is allowed (@wl->used tree);
86 * o the WL movement is disallowed (@wl->erroneous) because the PEB is
87 * erroneous - e.g., there was a read error;
86 * o the WL movement is temporarily prohibited (@wl->pq queue); 88 * o the WL movement is temporarily prohibited (@wl->pq queue);
87 * o scrubbing is needed (@wl->scrub tree). 89 * o scrubbing is needed (@wl->scrub tree).
88 * 90 *
@@ -653,7 +655,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
653static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 655static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
654 int cancel) 656 int cancel)
655{ 657{
656 int err, scrubbing = 0, torture = 0; 658 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
659 int vol_id = -1, uninitialized_var(lnum);
657 struct ubi_wl_entry *e1, *e2; 660 struct ubi_wl_entry *e1, *e2;
658 struct ubi_vid_hdr *vid_hdr; 661 struct ubi_vid_hdr *vid_hdr;
659 662
@@ -738,68 +741,78 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
738 /* 741 /*
739 * We are trying to move PEB without a VID header. UBI 742 * We are trying to move PEB without a VID header. UBI
740 * always write VID headers shortly after the PEB was 743 * always write VID headers shortly after the PEB was
741 * given, so we have a situation when it did not have 744 * given, so we have a situation when it has not yet
742 * chance to write it down because it was preempted. 745 * had a chance to write it, because it was preempted.
743 * Just re-schedule the work, so that next time it will 746 * So add this PEB to the protection queue so far,
744 * likely have the VID header in place. 747 * because presumably more data will be written there
748 * (including the missing VID header), and then we'll
749 * move it.
745 */ 750 */
746 dbg_wl("PEB %d has no VID header", e1->pnum); 751 dbg_wl("PEB %d has no VID header", e1->pnum);
752 protect = 1;
747 goto out_not_moved; 753 goto out_not_moved;
748 } 754 }
749 755
750 ubi_err("error %d while reading VID header from PEB %d", 756 ubi_err("error %d while reading VID header from PEB %d",
751 err, e1->pnum); 757 err, e1->pnum);
752 if (err > 0)
753 err = -EIO;
754 goto out_error; 758 goto out_error;
755 } 759 }
756 760
761 vol_id = be32_to_cpu(vid_hdr->vol_id);
762 lnum = be32_to_cpu(vid_hdr->lnum);
763
757 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 764 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
758 if (err) { 765 if (err) {
759 if (err == -EAGAIN) 766 if (err == MOVE_CANCEL_RACE) {
767 /*
768 * The LEB has not been moved because the volume is
769 * being deleted or the PEB has been put meanwhile. We
770 * should prevent this PEB from being selected for
771 * wear-leveling movement again, so put it to the
772 * protection queue.
773 */
774 protect = 1;
760 goto out_not_moved; 775 goto out_not_moved;
761 if (err < 0) 776 }
762 goto out_error; 777
763 if (err == 2) { 778 if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
764 /* Target PEB write error, torture it */ 779 err == MOVE_TARGET_RD_ERR) {
780 /*
781 * Target PEB had bit-flips or write error - torture it.
782 */
765 torture = 1; 783 torture = 1;
766 goto out_not_moved; 784 goto out_not_moved;
767 } 785 }
768 786
769 /* 787 if (err == MOVE_SOURCE_RD_ERR) {
770 * The LEB has not been moved because the volume is being 788 /*
771 * deleted or the PEB has been put meanwhile. We should prevent 789 * An error happened while reading the source PEB. Do
772 * this PEB from being selected for wear-leveling movement 790 * not switch to R/O mode in this case, and give the
773 * again, so put it to the protection queue. 791 * upper layers a possibility to recover from this,
774 */ 792 * e.g. by unmapping corresponding LEB. Instead, just
775 793 * put this PEB to the @ubi->erroneous list to prevent
776 dbg_wl("canceled moving PEB %d", e1->pnum); 794 * UBI from trying to move it over and over again.
777 ubi_assert(err == 1); 795 */
778 796 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
779 ubi_free_vid_hdr(ubi, vid_hdr); 797 ubi_err("too many erroneous eraseblocks (%d)",
780 vid_hdr = NULL; 798 ubi->erroneous_peb_count);
781 799 goto out_error;
782 spin_lock(&ubi->wl_lock); 800 }
783 prot_queue_add(ubi, e1); 801 erroneous = 1;
784 ubi_assert(!ubi->move_to_put); 802 goto out_not_moved;
785 ubi->move_from = ubi->move_to = NULL; 803 }
786 ubi->wl_scheduled = 0;
787 spin_unlock(&ubi->wl_lock);
788 804
789 e1 = NULL; 805 if (err < 0)
790 err = schedule_erase(ubi, e2, 0);
791 if (err)
792 goto out_error; 806 goto out_error;
793 mutex_unlock(&ubi->move_mutex); 807
794 return 0; 808 ubi_assert(0);
795 } 809 }
796 810
797 /* The PEB has been successfully moved */ 811 /* The PEB has been successfully moved */
798 ubi_free_vid_hdr(ubi, vid_hdr);
799 vid_hdr = NULL;
800 if (scrubbing) 812 if (scrubbing)
801 ubi_msg("scrubbed PEB %d, data moved to PEB %d", 813 ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
802 e1->pnum, e2->pnum); 814 e1->pnum, vol_id, lnum, e2->pnum);
815 ubi_free_vid_hdr(ubi, vid_hdr);
803 816
804 spin_lock(&ubi->wl_lock); 817 spin_lock(&ubi->wl_lock);
805 if (!ubi->move_to_put) { 818 if (!ubi->move_to_put) {
@@ -812,8 +825,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
812 825
813 err = schedule_erase(ubi, e1, 0); 826 err = schedule_erase(ubi, e1, 0);
814 if (err) { 827 if (err) {
815 e1 = NULL; 828 kmem_cache_free(ubi_wl_entry_slab, e1);
816 goto out_error; 829 if (e2)
830 kmem_cache_free(ubi_wl_entry_slab, e2);
831 goto out_ro;
817 } 832 }
818 833
819 if (e2) { 834 if (e2) {
@@ -821,10 +836,13 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
821 * Well, the target PEB was put meanwhile, schedule it for 836 * Well, the target PEB was put meanwhile, schedule it for
822 * erasure. 837 * erasure.
823 */ 838 */
824 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); 839 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
840 e2->pnum, vol_id, lnum);
825 err = schedule_erase(ubi, e2, 0); 841 err = schedule_erase(ubi, e2, 0);
826 if (err) 842 if (err) {
827 goto out_error; 843 kmem_cache_free(ubi_wl_entry_slab, e2);
844 goto out_ro;
845 }
828 } 846 }
829 847
830 dbg_wl("done"); 848 dbg_wl("done");
@@ -837,11 +855,19 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
837 * have been changed, schedule it for erasure. 855 * have been changed, schedule it for erasure.
838 */ 856 */
839out_not_moved: 857out_not_moved:
840 dbg_wl("canceled moving PEB %d", e1->pnum); 858 if (vol_id != -1)
841 ubi_free_vid_hdr(ubi, vid_hdr); 859 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
842 vid_hdr = NULL; 860 e1->pnum, vol_id, lnum, e2->pnum, err);
861 else
862 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
863 e1->pnum, e2->pnum, err);
843 spin_lock(&ubi->wl_lock); 864 spin_lock(&ubi->wl_lock);
844 if (scrubbing) 865 if (protect)
866 prot_queue_add(ubi, e1);
867 else if (erroneous) {
868 wl_tree_add(e1, &ubi->erroneous);
869 ubi->erroneous_peb_count += 1;
870 } else if (scrubbing)
845 wl_tree_add(e1, &ubi->scrub); 871 wl_tree_add(e1, &ubi->scrub);
846 else 872 else
847 wl_tree_add(e1, &ubi->used); 873 wl_tree_add(e1, &ubi->used);
@@ -850,32 +876,36 @@ out_not_moved:
850 ubi->wl_scheduled = 0; 876 ubi->wl_scheduled = 0;
851 spin_unlock(&ubi->wl_lock); 877 spin_unlock(&ubi->wl_lock);
852 878
853 e1 = NULL; 879 ubi_free_vid_hdr(ubi, vid_hdr);
854 err = schedule_erase(ubi, e2, torture); 880 err = schedule_erase(ubi, e2, torture);
855 if (err) 881 if (err) {
856 goto out_error; 882 kmem_cache_free(ubi_wl_entry_slab, e2);
857 883 goto out_ro;
884 }
858 mutex_unlock(&ubi->move_mutex); 885 mutex_unlock(&ubi->move_mutex);
859 return 0; 886 return 0;
860 887
861out_error: 888out_error:
862 ubi_err("error %d while moving PEB %d to PEB %d", 889 if (vol_id != -1)
863 err, e1->pnum, e2->pnum); 890 ubi_err("error %d while moving PEB %d to PEB %d",
864 891 err, e1->pnum, e2->pnum);
865 ubi_free_vid_hdr(ubi, vid_hdr); 892 else
893 ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
894 err, e1->pnum, vol_id, lnum, e2->pnum);
866 spin_lock(&ubi->wl_lock); 895 spin_lock(&ubi->wl_lock);
867 ubi->move_from = ubi->move_to = NULL; 896 ubi->move_from = ubi->move_to = NULL;
868 ubi->move_to_put = ubi->wl_scheduled = 0; 897 ubi->move_to_put = ubi->wl_scheduled = 0;
869 spin_unlock(&ubi->wl_lock); 898 spin_unlock(&ubi->wl_lock);
870 899
871 if (e1) 900 ubi_free_vid_hdr(ubi, vid_hdr);
872 kmem_cache_free(ubi_wl_entry_slab, e1); 901 kmem_cache_free(ubi_wl_entry_slab, e1);
873 if (e2) 902 kmem_cache_free(ubi_wl_entry_slab, e2);
874 kmem_cache_free(ubi_wl_entry_slab, e2);
875 ubi_ro_mode(ubi);
876 903
904out_ro:
905 ubi_ro_mode(ubi);
877 mutex_unlock(&ubi->move_mutex); 906 mutex_unlock(&ubi->move_mutex);
878 return err; 907 ubi_assert(err != 0);
908 return err < 0 ? err : -EIO;
879 909
880out_cancel: 910out_cancel:
881 ubi->wl_scheduled = 0; 911 ubi->wl_scheduled = 0;
@@ -1015,7 +1045,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1015 /* 1045 /*
1016 * If this is not %-EIO, we have no idea what to do. Scheduling 1046 * If this is not %-EIO, we have no idea what to do. Scheduling
1017 * this physical eraseblock for erasure again would cause 1047 * this physical eraseblock for erasure again would cause
1018 * errors again and again. Well, lets switch to RO mode. 1048 * errors again and again. Well, lets switch to R/O mode.
1019 */ 1049 */
1020 goto out_ro; 1050 goto out_ro;
1021 } 1051 }
@@ -1043,10 +1073,9 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1043 ubi_err("no reserved physical eraseblocks"); 1073 ubi_err("no reserved physical eraseblocks");
1044 goto out_ro; 1074 goto out_ro;
1045 } 1075 }
1046
1047 spin_unlock(&ubi->volumes_lock); 1076 spin_unlock(&ubi->volumes_lock);
1048 ubi_msg("mark PEB %d as bad", pnum);
1049 1077
1078 ubi_msg("mark PEB %d as bad", pnum);
1050 err = ubi_io_mark_bad(ubi, pnum); 1079 err = ubi_io_mark_bad(ubi, pnum);
1051 if (err) 1080 if (err)
1052 goto out_ro; 1081 goto out_ro;
@@ -1056,7 +1085,9 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1056 ubi->bad_peb_count += 1; 1085 ubi->bad_peb_count += 1;
1057 ubi->good_peb_count -= 1; 1086 ubi->good_peb_count -= 1;
1058 ubi_calculate_reserved(ubi); 1087 ubi_calculate_reserved(ubi);
1059 if (ubi->beb_rsvd_pebs == 0) 1088 if (ubi->beb_rsvd_pebs)
1089 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1090 else
1060 ubi_warn("last PEB from the reserved pool was used"); 1091 ubi_warn("last PEB from the reserved pool was used");
1061 spin_unlock(&ubi->volumes_lock); 1092 spin_unlock(&ubi->volumes_lock);
1062 1093
@@ -1125,6 +1156,13 @@ retry:
1125 } else if (in_wl_tree(e, &ubi->scrub)) { 1156 } else if (in_wl_tree(e, &ubi->scrub)) {
1126 paranoid_check_in_wl_tree(e, &ubi->scrub); 1157 paranoid_check_in_wl_tree(e, &ubi->scrub);
1127 rb_erase(&e->u.rb, &ubi->scrub); 1158 rb_erase(&e->u.rb, &ubi->scrub);
1159 } else if (in_wl_tree(e, &ubi->erroneous)) {
1160 paranoid_check_in_wl_tree(e, &ubi->erroneous);
1161 rb_erase(&e->u.rb, &ubi->erroneous);
1162 ubi->erroneous_peb_count -= 1;
1163 ubi_assert(ubi->erroneous_peb_count >= 0);
1164 /* Erroneous PEBs should be tortured */
1165 torture = 1;
1128 } else { 1166 } else {
1129 err = prot_queue_del(ubi, e->pnum); 1167 err = prot_queue_del(ubi, e->pnum);
1130 if (err) { 1168 if (err) {
@@ -1373,7 +1411,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1373 struct ubi_scan_leb *seb, *tmp; 1411 struct ubi_scan_leb *seb, *tmp;
1374 struct ubi_wl_entry *e; 1412 struct ubi_wl_entry *e;
1375 1413
1376 ubi->used = ubi->free = ubi->scrub = RB_ROOT; 1414 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1377 spin_lock_init(&ubi->wl_lock); 1415 spin_lock_init(&ubi->wl_lock);
1378 mutex_init(&ubi->move_mutex); 1416 mutex_init(&ubi->move_mutex);
1379 init_rwsem(&ubi->work_sem); 1417 init_rwsem(&ubi->work_sem);
@@ -1511,6 +1549,7 @@ void ubi_wl_close(struct ubi_device *ubi)
1511 cancel_pending(ubi); 1549 cancel_pending(ubi);
1512 protection_queue_destroy(ubi); 1550 protection_queue_destroy(ubi);
1513 tree_destroy(&ubi->used); 1551 tree_destroy(&ubi->used);
1552 tree_destroy(&ubi->erroneous);
1514 tree_destroy(&ubi->free); 1553 tree_destroy(&ubi->free);
1515 tree_destroy(&ubi->scrub); 1554 tree_destroy(&ubi->scrub);
1516 kfree(ubi->lookuptbl); 1555 kfree(ubi->lookuptbl);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 01f282cd0989..892a9e4e275f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2206,7 +2206,7 @@ config SKGE_DEBUG
2206 depends on SKGE && DEBUG_FS 2206 depends on SKGE && DEBUG_FS
2207 help 2207 help
2208 This option adds the ability to dump driver state for debugging. 2208 This option adds the ability to dump driver state for debugging.
2209 The file debugfs/skge/ethX displays the state of the internal 2209 The file /sys/kernel/debug/skge/ethX displays the state of the internal
2210 transmit and receive rings. 2210 transmit and receive rings.
2211 2211
2212 If unsure, say N. 2212 If unsure, say N.
@@ -2232,7 +2232,7 @@ config SKY2_DEBUG
2232 depends on SKY2 && DEBUG_FS 2232 depends on SKY2 && DEBUG_FS
2233 help 2233 help
2234 This option adds the ability to dump driver state for debugging. 2234 This option adds the ability to dump driver state for debugging.
2235 The file debugfs/sky2/ethX displays the state of the internal 2235 The file /sys/kernel/debug/sky2/ethX displays the state of the internal
2236 transmit and receive rings. 2236 transmit and receive rings.
2237 2237
2238 If unsure, say N. 2238 If unsure, say N.
@@ -2272,8 +2272,9 @@ config BNX2
2272 2272
2273config CNIC 2273config CNIC
2274 tristate "Broadcom CNIC support" 2274 tristate "Broadcom CNIC support"
2275 depends on BNX2 2275 depends on PCI
2276 depends on UIO 2276 select BNX2
2277 select UIO
2277 help 2278 help
2278 This driver supports offload features of Broadcom NetXtremeII 2279 This driver supports offload features of Broadcom NetXtremeII
2279 gigabit Ethernet cards. 2280 gigabit Ethernet cards.
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 811d3517fce0..11a0ba47b677 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1366,6 +1366,7 @@ static const struct file_operations tun_fops = {
1366static struct miscdevice tun_miscdev = { 1366static struct miscdevice tun_miscdev = {
1367 .minor = TUN_MINOR, 1367 .minor = TUN_MINOR,
1368 .name = "tun", 1368 .name = "tun",
1369 .devnode = "net/tun",
1369 .fops = &tun_fops, 1370 .fops = &tun_fops,
1370}; 1371};
1371 1372
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 1fe5da4cf0a0..60330f313f27 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -432,7 +432,7 @@ struct i2400m {
432 unsigned ready:1; /* all probing steps done */ 432 unsigned ready:1; /* all probing steps done */
433 unsigned rx_reorder:1; /* RX reorder is enabled */ 433 unsigned rx_reorder:1; /* RX reorder is enabled */
434 u8 trace_msg_from_user; /* echo rx msgs to 'trace' pipe */ 434 u8 trace_msg_from_user; /* echo rx msgs to 'trace' pipe */
435 /* typed u8 so debugfs/u8 can tweak */ 435 /* typed u8 so /sys/kernel/debug/u8 can tweak */
436 enum i2400m_system_state state; 436 enum i2400m_system_state state;
437 wait_queue_head_t state_wq; /* Woken up when on state updates */ 437 wait_queue_head_t state_wq; /* Woken up when on state updates */
438 438
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index 509b6f94f73b..daf0c83527d8 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -28,11 +28,10 @@ config ATH5K_DEBUG
28 Say Y, if and you will get debug options for ath5k. 28 Say Y, if and you will get debug options for ath5k.
29 To use this, you need to mount debugfs: 29 To use this, you need to mount debugfs:
30 30
31 mkdir /debug/ 31 mount -t debugfs debug /sys/kernel/debug
32 mount -t debugfs debug /debug/
33 32
34 You will get access to files under: 33 You will get access to files under:
35 /debug/ath5k/phy0/ 34 /sys/kernel/debug/ath5k/phy0/
36 35
37 To enable debug, pass the debug level to the debug module 36 To enable debug, pass the debug level to the debug module
38 parameter. For example: 37 parameter. For example:
diff --git a/drivers/net/wireless/libertas/README b/drivers/net/wireless/libertas/README
index d860fc375752..ab6a2d518af0 100644
--- a/drivers/net/wireless/libertas/README
+++ b/drivers/net/wireless/libertas/README
@@ -72,7 +72,7 @@ rdrf
72 location that is to be read. This parameter must be specified in 72 location that is to be read. This parameter must be specified in
73 hexadecimal (its possible to preceed preceding the number with a "0x"). 73 hexadecimal (its possible to preceed preceding the number with a "0x").
74 74
75 Path: /debugfs/libertas_wireless/ethX/registers/ 75 Path: /sys/kernel/debug/libertas_wireless/ethX/registers/
76 76
77 Usage: 77 Usage:
78 echo "0xa123" > rdmac ; cat rdmac 78 echo "0xa123" > rdmac ; cat rdmac
@@ -95,7 +95,7 @@ wrrf
95sleepparams 95sleepparams
96 This command is used to set the sleepclock configurations 96 This command is used to set the sleepclock configurations
97 97
98 Path: /debugfs/libertas_wireless/ethX/ 98 Path: /sys/kernel/debug/libertas_wireless/ethX/
99 99
100 Usage: 100 Usage:
101 cat sleepparams: reads the current sleepclock configuration 101 cat sleepparams: reads the current sleepclock configuration
@@ -115,7 +115,7 @@ subscribed_events
115 The subscribed_events directory contains the interface for the 115 The subscribed_events directory contains the interface for the
116 subscribed events API. 116 subscribed events API.
117 117
118 Path: /debugfs/libertas_wireless/ethX/subscribed_events/ 118 Path: /sys/kernel/debug/libertas_wireless/ethX/subscribed_events/
119 119
120 Each event is represented by a filename. Each filename consists of the 120 Each event is represented by a filename. Each filename consists of the
121 following three fields: 121 following three fields:
@@ -165,7 +165,7 @@ subscribed_events
165extscan 165extscan
166 This command is used to do a specific scan. 166 This command is used to do a specific scan.
167 167
168 Path: /debugfs/libertas_wireless/ethX/ 168 Path: /sys/kernel/debug/libertas_wireless/ethX/
169 169
170 Usage: echo "SSID" > extscan 170 Usage: echo "SSID" > extscan
171 171
@@ -179,7 +179,7 @@ getscantable
179 Display the current contents of the driver scan table (ie. get the 179 Display the current contents of the driver scan table (ie. get the
180 scan results). 180 scan results).
181 181
182 Path: /debugfs/libertas_wireless/ethX/ 182 Path: /sys/kernel/debug/libertas_wireless/ethX/
183 183
184 Usage: 184 Usage:
185 cat getscantable 185 cat getscantable
@@ -188,7 +188,7 @@ setuserscan
188 Initiate a customized scan and retrieve the results 188 Initiate a customized scan and retrieve the results
189 189
190 190
191 Path: /debugfs/libertas_wireless/ethX/ 191 Path: /sys/kernel/debug/libertas_wireless/ethX/
192 192
193 Usage: 193 Usage:
194 echo "[ARGS]" > setuserscan 194 echo "[ARGS]" > setuserscan
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index f8c2898d82b0..06a46d7b3d6c 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -43,8 +43,8 @@ struct if_spi_card {
43 struct lbs_private *priv; 43 struct lbs_private *priv;
44 struct libertas_spi_platform_data *pdata; 44 struct libertas_spi_platform_data *pdata;
45 45
46 char helper_fw_name[FIRMWARE_NAME_MAX]; 46 char helper_fw_name[IF_SPI_FW_NAME_MAX];
47 char main_fw_name[FIRMWARE_NAME_MAX]; 47 char main_fw_name[IF_SPI_FW_NAME_MAX];
48 48
49 /* The card ID and card revision, as reported by the hardware. */ 49 /* The card ID and card revision, as reported by the hardware. */
50 u16 card_id; 50 u16 card_id;
@@ -1019,9 +1019,9 @@ static int if_spi_calculate_fw_names(u16 card_id,
1019 lbs_pr_err("Unsupported chip_id: 0x%02x\n", card_id); 1019 lbs_pr_err("Unsupported chip_id: 0x%02x\n", card_id);
1020 return -EAFNOSUPPORT; 1020 return -EAFNOSUPPORT;
1021 } 1021 }
1022 snprintf(helper_fw, FIRMWARE_NAME_MAX, "libertas/gspi%d_hlp.bin", 1022 snprintf(helper_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d_hlp.bin",
1023 chip_id_to_device_name[i].name); 1023 chip_id_to_device_name[i].name);
1024 snprintf(main_fw, FIRMWARE_NAME_MAX, "libertas/gspi%d.bin", 1024 snprintf(main_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d.bin",
1025 chip_id_to_device_name[i].name); 1025 chip_id_to_device_name[i].name);
1026 return 0; 1026 return 0;
1027} 1027}
diff --git a/drivers/net/wireless/libertas/if_spi.h b/drivers/net/wireless/libertas/if_spi.h
index 2103869cc5b0..f87eec410848 100644
--- a/drivers/net/wireless/libertas/if_spi.h
+++ b/drivers/net/wireless/libertas/if_spi.h
@@ -22,6 +22,9 @@
22#define IF_SPI_CMD_BUF_SIZE 2400 22#define IF_SPI_CMD_BUF_SIZE 2400
23 23
24/***************** Firmware *****************/ 24/***************** Firmware *****************/
25
26#define IF_SPI_FW_NAME_MAX 30
27
25struct chip_ident { 28struct chip_ident {
26 u16 chip_id; 29 u16 chip_id;
27 u16 name; 30 u16 name;
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index d649caebf08a..1844c5adf6e9 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -61,11 +61,9 @@ static ssize_t if_usb_firmware_set(struct device *dev,
61{ 61{
62 struct lbs_private *priv = to_net_dev(dev)->ml_priv; 62 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
63 struct if_usb_card *cardp = priv->card; 63 struct if_usb_card *cardp = priv->card;
64 char fwname[FIRMWARE_NAME_MAX];
65 int ret; 64 int ret;
66 65
67 sscanf(buf, "%29s", fwname); /* FIRMWARE_NAME_MAX - 1 = 29 */ 66 ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_FW);
68 ret = if_usb_prog_firmware(cardp, fwname, BOOT_CMD_UPDATE_FW);
69 if (ret == 0) 67 if (ret == 0)
70 return count; 68 return count;
71 69
@@ -88,11 +86,9 @@ static ssize_t if_usb_boot2_set(struct device *dev,
88{ 86{
89 struct lbs_private *priv = to_net_dev(dev)->ml_priv; 87 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
90 struct if_usb_card *cardp = priv->card; 88 struct if_usb_card *cardp = priv->card;
91 char fwname[FIRMWARE_NAME_MAX];
92 int ret; 89 int ret;
93 90
94 sscanf(buf, "%29s", fwname); /* FIRMWARE_NAME_MAX - 1 = 29 */ 91 ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_BOOT2);
95 ret = if_usb_prog_firmware(cardp, fwname, BOOT_CMD_UPDATE_BOOT2);
96 if (ret == 0) 92 if (ret == 0)
97 return count; 93 return count;
98 94
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f67325387902..8d88daeed0c6 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1212,7 +1212,7 @@ static int __devinit netfront_probe(struct xenbus_device *dev,
1212 } 1212 }
1213 1213
1214 info = netdev_priv(netdev); 1214 info = netdev_priv(netdev);
1215 dev->dev.driver_data = info; 1215 dev_set_drvdata(&dev->dev, info);
1216 1216
1217 err = register_netdev(info->netdev); 1217 err = register_netdev(info->netdev);
1218 if (err) { 1218 if (err) {
@@ -1233,7 +1233,7 @@ static int __devinit netfront_probe(struct xenbus_device *dev,
1233 1233
1234 fail: 1234 fail:
1235 free_netdev(netdev); 1235 free_netdev(netdev);
1236 dev->dev.driver_data = NULL; 1236 dev_set_drvdata(&dev->dev, NULL);
1237 return err; 1237 return err;
1238} 1238}
1239 1239
@@ -1275,7 +1275,7 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1275 */ 1275 */
1276static int netfront_resume(struct xenbus_device *dev) 1276static int netfront_resume(struct xenbus_device *dev)
1277{ 1277{
1278 struct netfront_info *info = dev->dev.driver_data; 1278 struct netfront_info *info = dev_get_drvdata(&dev->dev);
1279 1279
1280 dev_dbg(&dev->dev, "%s\n", dev->nodename); 1280 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1281 1281
@@ -1600,7 +1600,7 @@ static int xennet_connect(struct net_device *dev)
1600static void backend_changed(struct xenbus_device *dev, 1600static void backend_changed(struct xenbus_device *dev,
1601 enum xenbus_state backend_state) 1601 enum xenbus_state backend_state)
1602{ 1602{
1603 struct netfront_info *np = dev->dev.driver_data; 1603 struct netfront_info *np = dev_get_drvdata(&dev->dev);
1604 struct net_device *netdev = np->netdev; 1604 struct net_device *netdev = np->netdev;
1605 1605
1606 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 1606 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
@@ -1774,7 +1774,7 @@ static struct xenbus_device_id netfront_ids[] = {
1774 1774
1775static int __devexit xennet_remove(struct xenbus_device *dev) 1775static int __devexit xennet_remove(struct xenbus_device *dev)
1776{ 1776{
1777 struct netfront_info *info = dev->dev.driver_data; 1777 struct netfront_info *info = dev_get_drvdata(&dev->dev);
1778 1778
1779 dev_dbg(&dev->dev, "%s\n", dev->nodename); 1779 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1780 1780
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index f415fdd9a885..5b89f404e668 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -373,7 +373,7 @@ static int __init eisa_probe(struct parisc_device *dev)
373 if (result >= 0) { 373 if (result >= 0) {
374 /* FIXME : Don't enumerate the bus twice. */ 374 /* FIXME : Don't enumerate the bus twice. */
375 eisa_dev.root.dev = &dev->dev; 375 eisa_dev.root.dev = &dev->dev;
376 dev->dev.driver_data = &eisa_dev.root; 376 dev_set_drvdata(&dev->dev, &eisa_dev.root);
377 eisa_dev.root.bus_base_addr = 0; 377 eisa_dev.root.bus_base_addr = 0;
378 eisa_dev.root.res = &eisa_dev.hba.io_space; 378 eisa_dev.root.res = &eisa_dev.hba.io_space;
379 eisa_dev.root.slots = result; 379 eisa_dev.root.slots = result;
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index e5999c4cedc8..d46dd57450ac 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -2010,7 +2010,7 @@ void __init sba_init(void)
2010void * sba_get_iommu(struct parisc_device *pci_hba) 2010void * sba_get_iommu(struct parisc_device *pci_hba)
2011{ 2011{
2012 struct parisc_device *sba_dev = parisc_parent(pci_hba); 2012 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2013 struct sba_device *sba = sba_dev->dev.driver_data; 2013 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2014 char t = sba_dev->id.hw_type; 2014 char t = sba_dev->id.hw_type;
2015 int iocnum = (pci_hba->hw_path >> 3); /* rope # */ 2015 int iocnum = (pci_hba->hw_path >> 3); /* rope # */
2016 2016
@@ -2031,7 +2031,7 @@ void * sba_get_iommu(struct parisc_device *pci_hba)
2031void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r) 2031void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
2032{ 2032{
2033 struct parisc_device *sba_dev = parisc_parent(pci_hba); 2033 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2034 struct sba_device *sba = sba_dev->dev.driver_data; 2034 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2035 char t = sba_dev->id.hw_type; 2035 char t = sba_dev->id.hw_type;
2036 int i; 2036 int i;
2037 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ 2037 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
@@ -2073,7 +2073,7 @@ void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
2073void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r ) 2073void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
2074{ 2074{
2075 struct parisc_device *sba_dev = parisc_parent(pci_hba); 2075 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2076 struct sba_device *sba = sba_dev->dev.driver_data; 2076 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2077 char t = sba_dev->id.hw_type; 2077 char t = sba_dev->id.hw_type;
2078 int base, size; 2078 int base, size;
2079 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ 2079 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index ea31a452b153..5d6de380e42b 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -376,14 +376,14 @@ static int __devinit parport_init_chip(struct parisc_device *dev)
376 /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, NULL); 376 /* PARPORT_IRQ_NONE */ PARPORT_DMA_NONE, NULL);
377 if (p) 377 if (p)
378 parport_count++; 378 parport_count++;
379 dev->dev.driver_data = p; 379 dev_set_drvdata(&dev->dev, p);
380 380
381 return 0; 381 return 0;
382} 382}
383 383
384static int __devexit parport_remove_chip(struct parisc_device *dev) 384static int __devexit parport_remove_chip(struct parisc_device *dev)
385{ 385{
386 struct parport *p = dev->dev.driver_data; 386 struct parport *p = dev_get_drvdata(&dev->dev);
387 if (p) { 387 if (p) {
388 struct parport_gsc_private *priv = p->private_data; 388 struct parport_gsc_private *priv = p->private_data;
389 struct parport_operations *ops = p->ops; 389 struct parport_operations *ops = p->ops;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1a91bf9687af..07bbb9b3b93f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -24,6 +24,11 @@
24#include <asm/setup.h> 24#include <asm/setup.h>
25#include "pci.h" 25#include "pci.h"
26 26
27const char *pci_power_names[] = {
28 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
29};
30EXPORT_SYMBOL_GPL(pci_power_names);
31
27unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; 32unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
28 33
29#ifdef CONFIG_PCI_DOMAINS 34#ifdef CONFIG_PCI_DOMAINS
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e39982503863..13ffdc35ea0e 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -275,7 +275,7 @@ static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
275 memset(device, 0, sizeof(struct device)); 275 memset(device, 0, sizeof(struct device));
276 device->bus = &pcie_port_bus_type; 276 device->bus = &pcie_port_bus_type;
277 device->driver = NULL; 277 device->driver = NULL;
278 device->driver_data = NULL; 278 dev_set_drvdata(device, NULL);
279 device->release = release_pcie_device; /* callback to free pcie dev */ 279 device->release = release_pcie_device; /* callback to free pcie dev */
280 dev_set_name(device, "%s:pcie%02x", 280 dev_set_name(device, "%s:pcie%02x",
281 pci_name(parent), get_descriptor_id(port_type, service_type)); 281 pci_name(parent), get_descriptor_id(port_type, service_type));
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 47cab31ff6e4..304ff6d5cf3b 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -394,7 +394,7 @@ static int pcmcia_device_probe(struct device * dev)
394 p_drv = to_pcmcia_drv(dev->driver); 394 p_drv = to_pcmcia_drv(dev->driver);
395 s = p_dev->socket; 395 s = p_dev->socket;
396 396
397 /* The PCMCIA code passes the match data in via dev->driver_data 397 /* The PCMCIA code passes the match data in via dev_set_drvdata(dev)
398 * which is an ugly hack. Once the driver probe is called it may 398 * which is an ugly hack. Once the driver probe is called it may
399 * and often will overwrite the match data so we must save it first 399 * and often will overwrite the match data so we must save it first
400 * 400 *
@@ -404,7 +404,7 @@ static int pcmcia_device_probe(struct device * dev)
404 * call which will then check whether there are two 404 * call which will then check whether there are two
405 * pseudo devices, and if not, add the second one. 405 * pseudo devices, and if not, add the second one.
406 */ 406 */
407 did = p_dev->dev.driver_data; 407 did = dev_get_drvdata(&p_dev->dev);
408 408
409 ds_dev_dbg(1, dev, "trying to bind to %s\n", p_drv->drv.name); 409 ds_dev_dbg(1, dev, "trying to bind to %s\n", p_drv->drv.name);
410 410
@@ -499,7 +499,7 @@ static int pcmcia_device_remove(struct device * dev)
499 * pseudo multi-function card, we need to unbind 499 * pseudo multi-function card, we need to unbind
500 * all devices 500 * all devices
501 */ 501 */
502 did = p_dev->dev.driver_data; 502 did = dev_get_drvdata(&p_dev->dev);
503 if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) && 503 if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) &&
504 (p_dev->socket->device_count != 0) && 504 (p_dev->socket->device_count != 0) &&
505 (p_dev->device_no == 0)) 505 (p_dev->device_no == 0))
@@ -828,7 +828,6 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
828{ 828{
829 struct pcmcia_socket *s = dev->socket; 829 struct pcmcia_socket *s = dev->socket;
830 const struct firmware *fw; 830 const struct firmware *fw;
831 char path[FIRMWARE_NAME_MAX];
832 int ret = -ENOMEM; 831 int ret = -ENOMEM;
833 int no_funcs; 832 int no_funcs;
834 int old_funcs; 833 int old_funcs;
@@ -839,16 +838,7 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
839 838
840 ds_dev_dbg(1, &dev->dev, "trying to load CIS file %s\n", filename); 839 ds_dev_dbg(1, &dev->dev, "trying to load CIS file %s\n", filename);
841 840
842 if (strlen(filename) > (FIRMWARE_NAME_MAX - 1)) { 841 if (request_firmware(&fw, filename, &dev->dev) == 0) {
843 dev_printk(KERN_WARNING, &dev->dev,
844 "pcmcia: CIS filename is too long [%s]\n",
845 filename);
846 return -EINVAL;
847 }
848
849 snprintf(path, sizeof(path), "%s", filename);
850
851 if (request_firmware(&fw, path, &dev->dev) == 0) {
852 if (fw->size >= CISTPL_MAX_CIS_SIZE) { 842 if (fw->size >= CISTPL_MAX_CIS_SIZE) {
853 ret = -EINVAL; 843 ret = -EINVAL;
854 dev_printk(KERN_ERR, &dev->dev, 844 dev_printk(KERN_ERR, &dev->dev,
@@ -988,7 +978,7 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
988 return 0; 978 return 0;
989 } 979 }
990 980
991 dev->dev.driver_data = (void *) did; 981 dev_set_drvdata(&dev->dev, did);
992 982
993 return 1; 983 return 1;
994} 984}
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 1703b20cad5d..6095f8daecd7 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -915,12 +915,9 @@ static int ds_ioctl(struct inode * inode, struct file * file,
915 err = -EPERM; 915 err = -EPERM;
916 goto free_out; 916 goto free_out;
917 } else { 917 } else {
918 static int printed = 0; 918 printk_once(KERN_WARNING
919 if (!printed) { 919 "2.6. kernels use pcmciamtd instead of memory_cs.c and do not require special\n");
920 printk(KERN_WARNING "2.6. kernels use pcmciamtd instead of memory_cs.c and do not require special\n"); 920 printk_once(KERN_WARNING "MTD handling any more.\n");
921 printk(KERN_WARNING "MTD handling any more.\n");
922 printed++;
923 }
924 } 921 }
925 err = -EINVAL; 922 err = -EINVAL;
926 goto free_out; 923 goto free_out;
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index b79f31add39c..04dc734805c6 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -364,7 +364,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
364 int cstat, dstat; 364 int cstat, dstat;
365 int count; 365 int count;
366 366
367 raw = cdev->dev.driver_data; 367 raw = dev_get_drvdata(&cdev->dev);
368 req = (struct raw3215_req *) intparm; 368 req = (struct raw3215_req *) intparm;
369 cstat = irb->scsw.cmd.cstat; 369 cstat = irb->scsw.cmd.cstat;
370 dstat = irb->scsw.cmd.dstat; 370 dstat = irb->scsw.cmd.dstat;
@@ -652,7 +652,7 @@ static int raw3215_probe (struct ccw_device *cdev)
652 int line; 652 int line;
653 653
654 /* Console is special. */ 654 /* Console is special. */
655 if (raw3215[0] && (cdev->dev.driver_data == raw3215[0])) 655 if (raw3215[0] && (raw3215[0] == dev_get_drvdata(&cdev->dev)))
656 return 0; 656 return 0;
657 raw = kmalloc(sizeof(struct raw3215_info) + 657 raw = kmalloc(sizeof(struct raw3215_info) +
658 RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA); 658 RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA);
@@ -686,7 +686,7 @@ static int raw3215_probe (struct ccw_device *cdev)
686 } 686 }
687 init_waitqueue_head(&raw->empty_wait); 687 init_waitqueue_head(&raw->empty_wait);
688 688
689 cdev->dev.driver_data = raw; 689 dev_set_drvdata(&cdev->dev, raw);
690 cdev->handler = raw3215_irq; 690 cdev->handler = raw3215_irq;
691 691
692 return 0; 692 return 0;
@@ -697,9 +697,9 @@ static void raw3215_remove (struct ccw_device *cdev)
697 struct raw3215_info *raw; 697 struct raw3215_info *raw;
698 698
699 ccw_device_set_offline(cdev); 699 ccw_device_set_offline(cdev);
700 raw = cdev->dev.driver_data; 700 raw = dev_get_drvdata(&cdev->dev);
701 if (raw) { 701 if (raw) {
702 cdev->dev.driver_data = NULL; 702 dev_set_drvdata(&cdev->dev, NULL);
703 kfree(raw->buffer); 703 kfree(raw->buffer);
704 kfree(raw); 704 kfree(raw);
705 } 705 }
@@ -709,7 +709,7 @@ static int raw3215_set_online (struct ccw_device *cdev)
709{ 709{
710 struct raw3215_info *raw; 710 struct raw3215_info *raw;
711 711
712 raw = cdev->dev.driver_data; 712 raw = dev_get_drvdata(&cdev->dev);
713 if (!raw) 713 if (!raw)
714 return -ENODEV; 714 return -ENODEV;
715 715
@@ -720,7 +720,7 @@ static int raw3215_set_offline (struct ccw_device *cdev)
720{ 720{
721 struct raw3215_info *raw; 721 struct raw3215_info *raw;
722 722
723 raw = cdev->dev.driver_data; 723 raw = dev_get_drvdata(&cdev->dev);
724 if (!raw) 724 if (!raw)
725 return -ENODEV; 725 return -ENODEV;
726 726
@@ -898,7 +898,7 @@ static int __init con3215_init(void)
898 raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE); 898 raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE);
899 raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE); 899 raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
900 raw->cdev = cdev; 900 raw->cdev = cdev;
901 cdev->dev.driver_data = raw; 901 dev_set_drvdata(&cdev->dev, raw);
902 cdev->handler = raw3215_irq; 902 cdev->handler = raw3215_irq;
903 903
904 raw->flags |= RAW3215_FIXED; 904 raw->flags |= RAW3215_FIXED;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 81c151b5f0ac..acab7b2dfe8a 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -357,7 +357,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
357 struct raw3270_request *rq; 357 struct raw3270_request *rq;
358 int rc; 358 int rc;
359 359
360 rp = (struct raw3270 *) cdev->dev.driver_data; 360 rp = dev_get_drvdata(&cdev->dev);
361 if (!rp) 361 if (!rp)
362 return; 362 return;
363 rq = (struct raw3270_request *) intparm; 363 rq = (struct raw3270_request *) intparm;
@@ -831,7 +831,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
831 if (rp->minor == -1) 831 if (rp->minor == -1)
832 return -EUSERS; 832 return -EUSERS;
833 rp->cdev = cdev; 833 rp->cdev = cdev;
834 cdev->dev.driver_data = rp; 834 dev_set_drvdata(&cdev->dev, rp);
835 cdev->handler = raw3270_irq; 835 cdev->handler = raw3270_irq;
836 return 0; 836 return 0;
837} 837}
@@ -1112,7 +1112,7 @@ raw3270_delete_device(struct raw3270 *rp)
1112 /* Disconnect from ccw_device. */ 1112 /* Disconnect from ccw_device. */
1113 cdev = rp->cdev; 1113 cdev = rp->cdev;
1114 rp->cdev = NULL; 1114 rp->cdev = NULL;
1115 cdev->dev.driver_data = NULL; 1115 dev_set_drvdata(&cdev->dev, NULL);
1116 cdev->handler = NULL; 1116 cdev->handler = NULL;
1117 1117
1118 /* Put ccw_device structure. */ 1118 /* Put ccw_device structure. */
@@ -1136,7 +1136,7 @@ static ssize_t
1136raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf) 1136raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf)
1137{ 1137{
1138 return snprintf(buf, PAGE_SIZE, "%i\n", 1138 return snprintf(buf, PAGE_SIZE, "%i\n",
1139 ((struct raw3270 *) dev->driver_data)->model); 1139 ((struct raw3270 *) dev_get_drvdata(dev))->model);
1140} 1140}
1141static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL); 1141static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL);
1142 1142
@@ -1144,7 +1144,7 @@ static ssize_t
1144raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf) 1144raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf)
1145{ 1145{
1146 return snprintf(buf, PAGE_SIZE, "%i\n", 1146 return snprintf(buf, PAGE_SIZE, "%i\n",
1147 ((struct raw3270 *) dev->driver_data)->rows); 1147 ((struct raw3270 *) dev_get_drvdata(dev))->rows);
1148} 1148}
1149static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL); 1149static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL);
1150 1150
@@ -1152,7 +1152,7 @@ static ssize_t
1152raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf) 1152raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf)
1153{ 1153{
1154 return snprintf(buf, PAGE_SIZE, "%i\n", 1154 return snprintf(buf, PAGE_SIZE, "%i\n",
1155 ((struct raw3270 *) dev->driver_data)->cols); 1155 ((struct raw3270 *) dev_get_drvdata(dev))->cols);
1156} 1156}
1157static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL); 1157static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL);
1158 1158
@@ -1289,7 +1289,7 @@ raw3270_remove (struct ccw_device *cdev)
1289 struct raw3270_view *v; 1289 struct raw3270_view *v;
1290 struct raw3270_notifier *np; 1290 struct raw3270_notifier *np;
1291 1291
1292 rp = cdev->dev.driver_data; 1292 rp = dev_get_drvdata(&cdev->dev);
1293 /* 1293 /*
1294 * _remove is the opposite of _probe; it's probe that 1294 * _remove is the opposite of _probe; it's probe that
1295 * should set up rp. raw3270_remove gets entered for 1295 * should set up rp. raw3270_remove gets entered for
@@ -1337,7 +1337,7 @@ raw3270_set_offline (struct ccw_device *cdev)
1337{ 1337{
1338 struct raw3270 *rp; 1338 struct raw3270 *rp;
1339 1339
1340 rp = cdev->dev.driver_data; 1340 rp = dev_get_drvdata(&cdev->dev);
1341 if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) 1341 if (test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags))
1342 return -EBUSY; 1342 return -EBUSY;
1343 raw3270_remove(cdev); 1343 raw3270_remove(cdev);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 144d2a5e1a92..5a519fac37b7 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1289,7 +1289,7 @@ static int
1289tape_34xx_online(struct ccw_device *cdev) 1289tape_34xx_online(struct ccw_device *cdev)
1290{ 1290{
1291 return tape_generic_online( 1291 return tape_generic_online(
1292 cdev->dev.driver_data, 1292 dev_get_drvdata(&cdev->dev),
1293 &tape_discipline_34xx 1293 &tape_discipline_34xx
1294 ); 1294 );
1295} 1295}
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 23e6598bc4b5..418f72dd39b4 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1703,7 +1703,7 @@ static struct ccw_device_id tape_3590_ids[] = {
1703static int 1703static int
1704tape_3590_online(struct ccw_device *cdev) 1704tape_3590_online(struct ccw_device *cdev)
1705{ 1705{
1706 return tape_generic_online(cdev->dev.driver_data, 1706 return tape_generic_online(dev_get_drvdata(&cdev->dev),
1707 &tape_discipline_3590); 1707 &tape_discipline_3590);
1708} 1708}
1709 1709
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 3ebaa8eb5c86..595aa04cfd01 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -92,7 +92,7 @@ tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *
92{ 92{
93 struct tape_device *tdev; 93 struct tape_device *tdev;
94 94
95 tdev = (struct tape_device *) dev->driver_data; 95 tdev = dev_get_drvdata(dev);
96 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state); 96 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
97} 97}
98 98
@@ -104,7 +104,7 @@ tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *b
104{ 104{
105 struct tape_device *tdev; 105 struct tape_device *tdev;
106 106
107 tdev = (struct tape_device *) dev->driver_data; 107 tdev = dev_get_drvdata(dev);
108 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor); 108 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
109} 109}
110 110
@@ -116,7 +116,7 @@ tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
116{ 116{
117 struct tape_device *tdev; 117 struct tape_device *tdev;
118 118
119 tdev = (struct tape_device *) dev->driver_data; 119 tdev = dev_get_drvdata(dev);
120 return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ? 120 return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
121 "OFFLINE" : tape_state_verbose[tdev->tape_state]); 121 "OFFLINE" : tape_state_verbose[tdev->tape_state]);
122} 122}
@@ -130,7 +130,7 @@ tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf
130 struct tape_device *tdev; 130 struct tape_device *tdev;
131 ssize_t rc; 131 ssize_t rc;
132 132
133 tdev = (struct tape_device *) dev->driver_data; 133 tdev = dev_get_drvdata(dev);
134 if (tdev->first_minor < 0) 134 if (tdev->first_minor < 0)
135 return scnprintf(buf, PAGE_SIZE, "N/A\n"); 135 return scnprintf(buf, PAGE_SIZE, "N/A\n");
136 136
@@ -156,7 +156,7 @@ tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf
156{ 156{
157 struct tape_device *tdev; 157 struct tape_device *tdev;
158 158
159 tdev = (struct tape_device *) dev->driver_data; 159 tdev = dev_get_drvdata(dev);
160 160
161 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size); 161 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
162} 162}
@@ -440,7 +440,7 @@ tape_generic_offline(struct ccw_device *cdev)
440{ 440{
441 struct tape_device *device; 441 struct tape_device *device;
442 442
443 device = cdev->dev.driver_data; 443 device = dev_get_drvdata(&cdev->dev);
444 if (!device) { 444 if (!device) {
445 return -ENODEV; 445 return -ENODEV;
446 } 446 }
@@ -583,7 +583,7 @@ tape_generic_probe(struct ccw_device *cdev)
583 tape_put_device(device); 583 tape_put_device(device);
584 return ret; 584 return ret;
585 } 585 }
586 cdev->dev.driver_data = device; 586 dev_set_drvdata(&cdev->dev, device);
587 cdev->handler = __tape_do_irq; 587 cdev->handler = __tape_do_irq;
588 device->cdev = cdev; 588 device->cdev = cdev;
589 ccw_device_get_id(cdev, &dev_id); 589 ccw_device_get_id(cdev, &dev_id);
@@ -622,7 +622,7 @@ tape_generic_remove(struct ccw_device *cdev)
622{ 622{
623 struct tape_device * device; 623 struct tape_device * device;
624 624
625 device = cdev->dev.driver_data; 625 device = dev_get_drvdata(&cdev->dev);
626 if (!device) { 626 if (!device) {
627 return; 627 return;
628 } 628 }
@@ -662,9 +662,9 @@ tape_generic_remove(struct ccw_device *cdev)
662 tape_cleanup_device(device); 662 tape_cleanup_device(device);
663 } 663 }
664 664
665 if (cdev->dev.driver_data != NULL) { 665 if (!dev_get_drvdata(&cdev->dev)) {
666 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group); 666 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
667 cdev->dev.driver_data = tape_put_device(cdev->dev.driver_data); 667 dev_set_drvdata(&cdev->dev, tape_put_device(dev_get_drvdata(&cdev->dev)));
668 } 668 }
669} 669}
670 670
@@ -1060,7 +1060,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1060 struct tape_request *request; 1060 struct tape_request *request;
1061 int rc; 1061 int rc;
1062 1062
1063 device = (struct tape_device *) cdev->dev.driver_data; 1063 device = dev_get_drvdata(&cdev->dev);
1064 if (device == NULL) { 1064 if (device == NULL) {
1065 return; 1065 return;
1066 } 1066 }
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index e925808c2149..411cfa3c7719 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -504,7 +504,7 @@ static ssize_t vmlogrdr_autopurge_store(struct device * dev,
504 struct device_attribute *attr, 504 struct device_attribute *attr,
505 const char * buf, size_t count) 505 const char * buf, size_t count)
506{ 506{
507 struct vmlogrdr_priv_t *priv = dev->driver_data; 507 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
508 ssize_t ret = count; 508 ssize_t ret = count;
509 509
510 switch (buf[0]) { 510 switch (buf[0]) {
@@ -525,7 +525,7 @@ static ssize_t vmlogrdr_autopurge_show(struct device *dev,
525 struct device_attribute *attr, 525 struct device_attribute *attr,
526 char *buf) 526 char *buf)
527{ 527{
528 struct vmlogrdr_priv_t *priv = dev->driver_data; 528 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
529 return sprintf(buf, "%u\n", priv->autopurge); 529 return sprintf(buf, "%u\n", priv->autopurge);
530} 530}
531 531
@@ -541,7 +541,7 @@ static ssize_t vmlogrdr_purge_store(struct device * dev,
541 541
542 char cp_command[80]; 542 char cp_command[80];
543 char cp_response[80]; 543 char cp_response[80];
544 struct vmlogrdr_priv_t *priv = dev->driver_data; 544 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
545 545
546 if (buf[0] != '1') 546 if (buf[0] != '1')
547 return -EINVAL; 547 return -EINVAL;
@@ -578,7 +578,7 @@ static ssize_t vmlogrdr_autorecording_store(struct device *dev,
578 struct device_attribute *attr, 578 struct device_attribute *attr,
579 const char *buf, size_t count) 579 const char *buf, size_t count)
580{ 580{
581 struct vmlogrdr_priv_t *priv = dev->driver_data; 581 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
582 ssize_t ret = count; 582 ssize_t ret = count;
583 583
584 switch (buf[0]) { 584 switch (buf[0]) {
@@ -599,7 +599,7 @@ static ssize_t vmlogrdr_autorecording_show(struct device *dev,
599 struct device_attribute *attr, 599 struct device_attribute *attr,
600 char *buf) 600 char *buf)
601{ 601{
602 struct vmlogrdr_priv_t *priv = dev->driver_data; 602 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
603 return sprintf(buf, "%u\n", priv->autorecording); 603 return sprintf(buf, "%u\n", priv->autorecording);
604} 604}
605 605
@@ -612,7 +612,7 @@ static ssize_t vmlogrdr_recording_store(struct device * dev,
612 struct device_attribute *attr, 612 struct device_attribute *attr,
613 const char * buf, size_t count) 613 const char * buf, size_t count)
614{ 614{
615 struct vmlogrdr_priv_t *priv = dev->driver_data; 615 struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
616 ssize_t ret; 616 ssize_t ret;
617 617
618 switch (buf[0]) { 618 switch (buf[0]) {
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 92458219a9e9..7d9e67cb6471 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -80,11 +80,11 @@ static DEFINE_MUTEX(vmur_mutex);
80 * 80 *
81 * Each ur device (urd) contains a reference to its corresponding ccw device 81 * Each ur device (urd) contains a reference to its corresponding ccw device
82 * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the 82 * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
83 * ur device using the cdev->dev.driver_data pointer. 83 * ur device using dev_get_drvdata(&cdev->dev) pointer.
84 * 84 *
85 * urd references: 85 * urd references:
86 * - ur_probe gets a urd reference, ur_remove drops the reference 86 * - ur_probe gets a urd reference, ur_remove drops the reference
87 * (cdev->dev.driver_data) 87 * dev_get_drvdata(&cdev->dev)
88 * - ur_open gets a urd reference, ur_relase drops the reference 88 * - ur_open gets a urd reference, ur_relase drops the reference
89 * (urf->urd) 89 * (urf->urd)
90 * 90 *
@@ -92,7 +92,7 @@ static DEFINE_MUTEX(vmur_mutex);
92 * - urdev_alloc get a cdev reference (urd->cdev) 92 * - urdev_alloc get a cdev reference (urd->cdev)
93 * - urdev_free drops the cdev reference (urd->cdev) 93 * - urdev_free drops the cdev reference (urd->cdev)
94 * 94 *
95 * Setting and clearing of cdev->dev.driver_data is protected by the ccwdev lock 95 * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock
96 */ 96 */
97static struct urdev *urdev_alloc(struct ccw_device *cdev) 97static struct urdev *urdev_alloc(struct ccw_device *cdev)
98{ 98{
@@ -131,7 +131,7 @@ static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
131 unsigned long flags; 131 unsigned long flags;
132 132
133 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 133 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
134 urd = cdev->dev.driver_data; 134 urd = dev_get_drvdata(&cdev->dev);
135 if (urd) 135 if (urd)
136 urdev_get(urd); 136 urdev_get(urd);
137 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 137 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
@@ -310,7 +310,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
310 TRACE("ur_int_handler: unsolicited interrupt\n"); 310 TRACE("ur_int_handler: unsolicited interrupt\n");
311 return; 311 return;
312 } 312 }
313 urd = cdev->dev.driver_data; 313 urd = dev_get_drvdata(&cdev->dev);
314 BUG_ON(!urd); 314 BUG_ON(!urd);
315 /* On special conditions irb is an error pointer */ 315 /* On special conditions irb is an error pointer */
316 if (IS_ERR(irb)) 316 if (IS_ERR(irb))
@@ -856,7 +856,7 @@ static int ur_probe(struct ccw_device *cdev)
856 goto fail_remove_attr; 856 goto fail_remove_attr;
857 } 857 }
858 spin_lock_irq(get_ccwdev_lock(cdev)); 858 spin_lock_irq(get_ccwdev_lock(cdev));
859 cdev->dev.driver_data = urd; 859 dev_set_drvdata(&cdev->dev, urd);
860 spin_unlock_irq(get_ccwdev_lock(cdev)); 860 spin_unlock_irq(get_ccwdev_lock(cdev));
861 861
862 mutex_unlock(&vmur_mutex); 862 mutex_unlock(&vmur_mutex);
@@ -996,8 +996,8 @@ static void ur_remove(struct ccw_device *cdev)
996 ur_remove_attributes(&cdev->dev); 996 ur_remove_attributes(&cdev->dev);
997 997
998 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 998 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
999 urdev_put(cdev->dev.driver_data); 999 urdev_put(dev_get_drvdata(&cdev->dev));
1000 cdev->dev.driver_data = NULL; 1000 dev_set_drvdata(&cdev->dev, NULL);
1001 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1001 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1002 1002
1003 mutex_unlock(&vmur_mutex); 1003 mutex_unlock(&vmur_mutex);
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index d40f7a934f94..f370f8d460a7 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -290,7 +290,7 @@ claw_probe(struct ccwgroup_device *cgdev)
290 if (!get_device(&cgdev->dev)) 290 if (!get_device(&cgdev->dev))
291 return -ENODEV; 291 return -ENODEV;
292 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL); 292 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
293 cgdev->dev.driver_data = privptr; 293 dev_set_drvdata(&cgdev->dev, privptr);
294 if (privptr == NULL) { 294 if (privptr == NULL) {
295 probe_error(cgdev); 295 probe_error(cgdev);
296 put_device(&cgdev->dev); 296 put_device(&cgdev->dev);
@@ -597,14 +597,14 @@ claw_irq_handler(struct ccw_device *cdev,
597 597
598 CLAW_DBF_TEXT(4, trace, "clawirq"); 598 CLAW_DBF_TEXT(4, trace, "clawirq");
599 /* Bypass all 'unsolicited interrupts' */ 599 /* Bypass all 'unsolicited interrupts' */
600 if (!cdev->dev.driver_data) { 600 privptr = dev_get_drvdata(&cdev->dev);
601 if (!privptr) {
601 dev_warn(&cdev->dev, "An uninitialized CLAW device received an" 602 dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
602 " IRQ, c-%02x d-%02x\n", 603 " IRQ, c-%02x d-%02x\n",
603 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); 604 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
604 CLAW_DBF_TEXT(2, trace, "badirq"); 605 CLAW_DBF_TEXT(2, trace, "badirq");
605 return; 606 return;
606 } 607 }
607 privptr = (struct claw_privbk *)cdev->dev.driver_data;
608 608
609 /* Try to extract channel from driver data. */ 609 /* Try to extract channel from driver data. */
610 if (privptr->channel[READ].cdev == cdev) 610 if (privptr->channel[READ].cdev == cdev)
@@ -1986,9 +1986,9 @@ probe_error( struct ccwgroup_device *cgdev)
1986 struct claw_privbk *privptr; 1986 struct claw_privbk *privptr;
1987 1987
1988 CLAW_DBF_TEXT(4, trace, "proberr"); 1988 CLAW_DBF_TEXT(4, trace, "proberr");
1989 privptr = (struct claw_privbk *) cgdev->dev.driver_data; 1989 privptr = dev_get_drvdata(&cgdev->dev);
1990 if (privptr != NULL) { 1990 if (privptr != NULL) {
1991 cgdev->dev.driver_data = NULL; 1991 dev_set_drvdata(&cgdev->dev, NULL);
1992 kfree(privptr->p_env); 1992 kfree(privptr->p_env);
1993 kfree(privptr->p_mtc_envelope); 1993 kfree(privptr->p_mtc_envelope);
1994 kfree(privptr); 1994 kfree(privptr);
@@ -2917,9 +2917,9 @@ claw_new_device(struct ccwgroup_device *cgdev)
2917 dev_info(&cgdev->dev, "add for %s\n", 2917 dev_info(&cgdev->dev, "add for %s\n",
2918 dev_name(&cgdev->cdev[READ]->dev)); 2918 dev_name(&cgdev->cdev[READ]->dev));
2919 CLAW_DBF_TEXT(2, setup, "new_dev"); 2919 CLAW_DBF_TEXT(2, setup, "new_dev");
2920 privptr = cgdev->dev.driver_data; 2920 privptr = dev_get_drvdata(&cgdev->dev);
2921 cgdev->cdev[READ]->dev.driver_data = privptr; 2921 dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
2922 cgdev->cdev[WRITE]->dev.driver_data = privptr; 2922 dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
2923 if (!privptr) 2923 if (!privptr)
2924 return -ENODEV; 2924 return -ENODEV;
2925 p_env = privptr->p_env; 2925 p_env = privptr->p_env;
@@ -2956,9 +2956,9 @@ claw_new_device(struct ccwgroup_device *cgdev)
2956 goto out; 2956 goto out;
2957 } 2957 }
2958 dev->ml_priv = privptr; 2958 dev->ml_priv = privptr;
2959 cgdev->dev.driver_data = privptr; 2959 dev_set_drvdata(&cgdev->dev, privptr);
2960 cgdev->cdev[READ]->dev.driver_data = privptr; 2960 dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr);
2961 cgdev->cdev[WRITE]->dev.driver_data = privptr; 2961 dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr);
2962 /* sysfs magic */ 2962 /* sysfs magic */
2963 SET_NETDEV_DEV(dev, &cgdev->dev); 2963 SET_NETDEV_DEV(dev, &cgdev->dev);
2964 if (register_netdev(dev) != 0) { 2964 if (register_netdev(dev) != 0) {
@@ -3024,7 +3024,7 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
3024 int ret; 3024 int ret;
3025 3025
3026 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); 3026 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3027 priv = cgdev->dev.driver_data; 3027 priv = dev_get_drvdata(&cgdev->dev);
3028 if (!priv) 3028 if (!priv)
3029 return -ENODEV; 3029 return -ENODEV;
3030 ndev = priv->channel[READ].ndev; 3030 ndev = priv->channel[READ].ndev;
@@ -3054,7 +3054,7 @@ claw_remove_device(struct ccwgroup_device *cgdev)
3054 3054
3055 BUG_ON(!cgdev); 3055 BUG_ON(!cgdev);
3056 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); 3056 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3057 priv = cgdev->dev.driver_data; 3057 priv = dev_get_drvdata(&cgdev->dev);
3058 BUG_ON(!priv); 3058 BUG_ON(!priv);
3059 dev_info(&cgdev->dev, " will be removed.\n"); 3059 dev_info(&cgdev->dev, " will be removed.\n");
3060 if (cgdev->state == CCWGROUP_ONLINE) 3060 if (cgdev->state == CCWGROUP_ONLINE)
@@ -3069,9 +3069,9 @@ claw_remove_device(struct ccwgroup_device *cgdev)
3069 kfree(priv->channel[1].irb); 3069 kfree(priv->channel[1].irb);
3070 priv->channel[1].irb=NULL; 3070 priv->channel[1].irb=NULL;
3071 kfree(priv); 3071 kfree(priv);
3072 cgdev->dev.driver_data=NULL; 3072 dev_set_drvdata(&cgdev->dev, NULL);
3073 cgdev->cdev[READ]->dev.driver_data = NULL; 3073 dev_set_drvdata(&cgdev->cdev[READ]->dev, NULL);
3074 cgdev->cdev[WRITE]->dev.driver_data = NULL; 3074 dev_set_drvdata(&cgdev->cdev[WRITE]->dev, NULL);
3075 put_device(&cgdev->dev); 3075 put_device(&cgdev->dev);
3076 3076
3077 return; 3077 return;
@@ -3087,7 +3087,7 @@ claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
3087 struct claw_privbk *priv; 3087 struct claw_privbk *priv;
3088 struct claw_env * p_env; 3088 struct claw_env * p_env;
3089 3089
3090 priv = dev->driver_data; 3090 priv = dev_get_drvdata(dev);
3091 if (!priv) 3091 if (!priv)
3092 return -ENODEV; 3092 return -ENODEV;
3093 p_env = priv->p_env; 3093 p_env = priv->p_env;
@@ -3101,7 +3101,7 @@ claw_hname_write(struct device *dev, struct device_attribute *attr,
3101 struct claw_privbk *priv; 3101 struct claw_privbk *priv;
3102 struct claw_env * p_env; 3102 struct claw_env * p_env;
3103 3103
3104 priv = dev->driver_data; 3104 priv = dev_get_drvdata(dev);
3105 if (!priv) 3105 if (!priv)
3106 return -ENODEV; 3106 return -ENODEV;
3107 p_env = priv->p_env; 3107 p_env = priv->p_env;
@@ -3125,7 +3125,7 @@ claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
3125 struct claw_privbk *priv; 3125 struct claw_privbk *priv;
3126 struct claw_env * p_env; 3126 struct claw_env * p_env;
3127 3127
3128 priv = dev->driver_data; 3128 priv = dev_get_drvdata(dev);
3129 if (!priv) 3129 if (!priv)
3130 return -ENODEV; 3130 return -ENODEV;
3131 p_env = priv->p_env; 3131 p_env = priv->p_env;
@@ -3139,7 +3139,7 @@ claw_adname_write(struct device *dev, struct device_attribute *attr,
3139 struct claw_privbk *priv; 3139 struct claw_privbk *priv;
3140 struct claw_env * p_env; 3140 struct claw_env * p_env;
3141 3141
3142 priv = dev->driver_data; 3142 priv = dev_get_drvdata(dev);
3143 if (!priv) 3143 if (!priv)
3144 return -ENODEV; 3144 return -ENODEV;
3145 p_env = priv->p_env; 3145 p_env = priv->p_env;
@@ -3163,7 +3163,7 @@ claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
3163 struct claw_privbk *priv; 3163 struct claw_privbk *priv;
3164 struct claw_env * p_env; 3164 struct claw_env * p_env;
3165 3165
3166 priv = dev->driver_data; 3166 priv = dev_get_drvdata(dev);
3167 if (!priv) 3167 if (!priv)
3168 return -ENODEV; 3168 return -ENODEV;
3169 p_env = priv->p_env; 3169 p_env = priv->p_env;
@@ -3178,7 +3178,7 @@ claw_apname_write(struct device *dev, struct device_attribute *attr,
3178 struct claw_privbk *priv; 3178 struct claw_privbk *priv;
3179 struct claw_env * p_env; 3179 struct claw_env * p_env;
3180 3180
3181 priv = dev->driver_data; 3181 priv = dev_get_drvdata(dev);
3182 if (!priv) 3182 if (!priv)
3183 return -ENODEV; 3183 return -ENODEV;
3184 p_env = priv->p_env; 3184 p_env = priv->p_env;
@@ -3212,7 +3212,7 @@ claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3212 struct claw_privbk *priv; 3212 struct claw_privbk *priv;
3213 struct claw_env * p_env; 3213 struct claw_env * p_env;
3214 3214
3215 priv = dev->driver_data; 3215 priv = dev_get_drvdata(dev);
3216 if (!priv) 3216 if (!priv)
3217 return -ENODEV; 3217 return -ENODEV;
3218 p_env = priv->p_env; 3218 p_env = priv->p_env;
@@ -3227,7 +3227,7 @@ claw_wbuff_write(struct device *dev, struct device_attribute *attr,
3227 struct claw_env * p_env; 3227 struct claw_env * p_env;
3228 int nnn,max; 3228 int nnn,max;
3229 3229
3230 priv = dev->driver_data; 3230 priv = dev_get_drvdata(dev);
3231 if (!priv) 3231 if (!priv)
3232 return -ENODEV; 3232 return -ENODEV;
3233 p_env = priv->p_env; 3233 p_env = priv->p_env;
@@ -3254,7 +3254,7 @@ claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3254 struct claw_privbk *priv; 3254 struct claw_privbk *priv;
3255 struct claw_env * p_env; 3255 struct claw_env * p_env;
3256 3256
3257 priv = dev->driver_data; 3257 priv = dev_get_drvdata(dev);
3258 if (!priv) 3258 if (!priv)
3259 return -ENODEV; 3259 return -ENODEV;
3260 p_env = priv->p_env; 3260 p_env = priv->p_env;
@@ -3269,7 +3269,7 @@ claw_rbuff_write(struct device *dev, struct device_attribute *attr,
3269 struct claw_env *p_env; 3269 struct claw_env *p_env;
3270 int nnn,max; 3270 int nnn,max;
3271 3271
3272 priv = dev->driver_data; 3272 priv = dev_get_drvdata(dev);
3273 if (!priv) 3273 if (!priv)
3274 return -ENODEV; 3274 return -ENODEV;
3275 p_env = priv->p_env; 3275 p_env = priv->p_env;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 07a25c3f94b6..8c675905448b 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1936,7 +1936,7 @@ lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf)
1936{ 1936{
1937 struct lcs_card *card; 1937 struct lcs_card *card;
1938 1938
1939 card = (struct lcs_card *)dev->driver_data; 1939 card = dev_get_drvdata(dev);
1940 1940
1941 if (!card) 1941 if (!card)
1942 return 0; 1942 return 0;
@@ -1953,7 +1953,7 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
1953 struct lcs_card *card; 1953 struct lcs_card *card;
1954 int value; 1954 int value;
1955 1955
1956 card = (struct lcs_card *)dev->driver_data; 1956 card = dev_get_drvdata(dev);
1957 1957
1958 if (!card) 1958 if (!card)
1959 return 0; 1959 return 0;
@@ -1987,7 +1987,7 @@ lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf)
1987{ 1987{
1988 struct lcs_card *card; 1988 struct lcs_card *card;
1989 1989
1990 card = (struct lcs_card *)dev->driver_data; 1990 card = dev_get_drvdata(dev);
1991 1991
1992 return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0; 1992 return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0;
1993} 1993}
@@ -1998,7 +1998,7 @@ lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char
1998 struct lcs_card *card; 1998 struct lcs_card *card;
1999 int value; 1999 int value;
2000 2000
2001 card = (struct lcs_card *)dev->driver_data; 2001 card = dev_get_drvdata(dev);
2002 2002
2003 if (!card) 2003 if (!card)
2004 return 0; 2004 return 0;
@@ -2017,7 +2017,7 @@ static ssize_t
2017lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, 2017lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
2018 const char *buf, size_t count) 2018 const char *buf, size_t count)
2019{ 2019{
2020 struct lcs_card *card = dev->driver_data; 2020 struct lcs_card *card = dev_get_drvdata(dev);
2021 char *tmp; 2021 char *tmp;
2022 int i; 2022 int i;
2023 2023
@@ -2070,7 +2070,7 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev)
2070 put_device(&ccwgdev->dev); 2070 put_device(&ccwgdev->dev);
2071 return ret; 2071 return ret;
2072 } 2072 }
2073 ccwgdev->dev.driver_data = card; 2073 dev_set_drvdata(&ccwgdev->dev, card);
2074 ccwgdev->cdev[0]->handler = lcs_irq; 2074 ccwgdev->cdev[0]->handler = lcs_irq;
2075 ccwgdev->cdev[1]->handler = lcs_irq; 2075 ccwgdev->cdev[1]->handler = lcs_irq;
2076 card->gdev = ccwgdev; 2076 card->gdev = ccwgdev;
@@ -2087,7 +2087,7 @@ lcs_register_netdev(struct ccwgroup_device *ccwgdev)
2087 struct lcs_card *card; 2087 struct lcs_card *card;
2088 2088
2089 LCS_DBF_TEXT(2, setup, "regnetdv"); 2089 LCS_DBF_TEXT(2, setup, "regnetdv");
2090 card = (struct lcs_card *)ccwgdev->dev.driver_data; 2090 card = dev_get_drvdata(&ccwgdev->dev);
2091 if (card->dev->reg_state != NETREG_UNINITIALIZED) 2091 if (card->dev->reg_state != NETREG_UNINITIALIZED)
2092 return 0; 2092 return 0;
2093 SET_NETDEV_DEV(card->dev, &ccwgdev->dev); 2093 SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
@@ -2120,7 +2120,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
2120 enum lcs_dev_states recover_state; 2120 enum lcs_dev_states recover_state;
2121 int rc; 2121 int rc;
2122 2122
2123 card = (struct lcs_card *)ccwgdev->dev.driver_data; 2123 card = dev_get_drvdata(&ccwgdev->dev);
2124 if (!card) 2124 if (!card)
2125 return -ENODEV; 2125 return -ENODEV;
2126 2126
@@ -2226,7 +2226,7 @@ __lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode)
2226 int ret; 2226 int ret;
2227 2227
2228 LCS_DBF_TEXT(3, setup, "shtdndev"); 2228 LCS_DBF_TEXT(3, setup, "shtdndev");
2229 card = (struct lcs_card *)ccwgdev->dev.driver_data; 2229 card = dev_get_drvdata(&ccwgdev->dev);
2230 if (!card) 2230 if (!card)
2231 return -ENODEV; 2231 return -ENODEV;
2232 if (recovery_mode == 0) { 2232 if (recovery_mode == 0) {
@@ -2293,7 +2293,7 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev)
2293{ 2293{
2294 struct lcs_card *card; 2294 struct lcs_card *card;
2295 2295
2296 card = (struct lcs_card *)ccwgdev->dev.driver_data; 2296 card = dev_get_drvdata(&ccwgdev->dev);
2297 if (!card) 2297 if (!card)
2298 return; 2298 return;
2299 2299
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index d58fea52557d..6d668642af27 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -34,8 +34,8 @@ static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
34 * sysfs related stuff 34 * sysfs related stuff
35 */ 35 */
36#define CARD_FROM_DEV(cdev) \ 36#define CARD_FROM_DEV(cdev) \
37 (struct lcs_card *) \ 37 (struct lcs_card *) dev_get_drvdata( \
38 ((struct ccwgroup_device *)cdev->dev.driver_data)->dev.driver_data; 38 &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
39/** 39/**
40 * CCW commands used in this driver 40 * CCW commands used in this driver
41 */ 41 */
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index fdb02d043d3e..52574ce797b2 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1452,7 +1452,7 @@ static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1452static ssize_t user_show(struct device *dev, struct device_attribute *attr, 1452static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1453 char *buf) 1453 char *buf)
1454{ 1454{
1455 struct netiucv_priv *priv = dev->driver_data; 1455 struct netiucv_priv *priv = dev_get_drvdata(dev);
1456 1456
1457 IUCV_DBF_TEXT(trace, 5, __func__); 1457 IUCV_DBF_TEXT(trace, 5, __func__);
1458 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); 1458 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
@@ -1461,7 +1461,7 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1461static ssize_t user_write(struct device *dev, struct device_attribute *attr, 1461static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1462 const char *buf, size_t count) 1462 const char *buf, size_t count)
1463{ 1463{
1464 struct netiucv_priv *priv = dev->driver_data; 1464 struct netiucv_priv *priv = dev_get_drvdata(dev);
1465 struct net_device *ndev = priv->conn->netdev; 1465 struct net_device *ndev = priv->conn->netdev;
1466 char *p; 1466 char *p;
1467 char *tmp; 1467 char *tmp;
@@ -1518,7 +1518,8 @@ static DEVICE_ATTR(user, 0644, user_show, user_write);
1518 1518
1519static ssize_t buffer_show (struct device *dev, struct device_attribute *attr, 1519static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1520 char *buf) 1520 char *buf)
1521{ struct netiucv_priv *priv = dev->driver_data; 1521{
1522 struct netiucv_priv *priv = dev_get_drvdata(dev);
1522 1523
1523 IUCV_DBF_TEXT(trace, 5, __func__); 1524 IUCV_DBF_TEXT(trace, 5, __func__);
1524 return sprintf(buf, "%d\n", priv->conn->max_buffsize); 1525 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
@@ -1527,7 +1528,7 @@ static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1527static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, 1528static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1528 const char *buf, size_t count) 1529 const char *buf, size_t count)
1529{ 1530{
1530 struct netiucv_priv *priv = dev->driver_data; 1531 struct netiucv_priv *priv = dev_get_drvdata(dev);
1531 struct net_device *ndev = priv->conn->netdev; 1532 struct net_device *ndev = priv->conn->netdev;
1532 char *e; 1533 char *e;
1533 int bs1; 1534 int bs1;
@@ -1575,7 +1576,7 @@ static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1575static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr, 1576static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1576 char *buf) 1577 char *buf)
1577{ 1578{
1578 struct netiucv_priv *priv = dev->driver_data; 1579 struct netiucv_priv *priv = dev_get_drvdata(dev);
1579 1580
1580 IUCV_DBF_TEXT(trace, 5, __func__); 1581 IUCV_DBF_TEXT(trace, 5, __func__);
1581 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm)); 1582 return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
@@ -1586,7 +1587,7 @@ static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1586static ssize_t conn_fsm_show (struct device *dev, 1587static ssize_t conn_fsm_show (struct device *dev,
1587 struct device_attribute *attr, char *buf) 1588 struct device_attribute *attr, char *buf)
1588{ 1589{
1589 struct netiucv_priv *priv = dev->driver_data; 1590 struct netiucv_priv *priv = dev_get_drvdata(dev);
1590 1591
1591 IUCV_DBF_TEXT(trace, 5, __func__); 1592 IUCV_DBF_TEXT(trace, 5, __func__);
1592 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm)); 1593 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
@@ -1597,7 +1598,7 @@ static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1597static ssize_t maxmulti_show (struct device *dev, 1598static ssize_t maxmulti_show (struct device *dev,
1598 struct device_attribute *attr, char *buf) 1599 struct device_attribute *attr, char *buf)
1599{ 1600{
1600 struct netiucv_priv *priv = dev->driver_data; 1601 struct netiucv_priv *priv = dev_get_drvdata(dev);
1601 1602
1602 IUCV_DBF_TEXT(trace, 5, __func__); 1603 IUCV_DBF_TEXT(trace, 5, __func__);
1603 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); 1604 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
@@ -1607,7 +1608,7 @@ static ssize_t maxmulti_write (struct device *dev,
1607 struct device_attribute *attr, 1608 struct device_attribute *attr,
1608 const char *buf, size_t count) 1609 const char *buf, size_t count)
1609{ 1610{
1610 struct netiucv_priv *priv = dev->driver_data; 1611 struct netiucv_priv *priv = dev_get_drvdata(dev);
1611 1612
1612 IUCV_DBF_TEXT(trace, 4, __func__); 1613 IUCV_DBF_TEXT(trace, 4, __func__);
1613 priv->conn->prof.maxmulti = 0; 1614 priv->conn->prof.maxmulti = 0;
@@ -1619,7 +1620,7 @@ static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1619static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr, 1620static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1620 char *buf) 1621 char *buf)
1621{ 1622{
1622 struct netiucv_priv *priv = dev->driver_data; 1623 struct netiucv_priv *priv = dev_get_drvdata(dev);
1623 1624
1624 IUCV_DBF_TEXT(trace, 5, __func__); 1625 IUCV_DBF_TEXT(trace, 5, __func__);
1625 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); 1626 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
@@ -1628,7 +1629,7 @@ static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1628static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr, 1629static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1629 const char *buf, size_t count) 1630 const char *buf, size_t count)
1630{ 1631{
1631 struct netiucv_priv *priv = dev->driver_data; 1632 struct netiucv_priv *priv = dev_get_drvdata(dev);
1632 1633
1633 IUCV_DBF_TEXT(trace, 4, __func__); 1634 IUCV_DBF_TEXT(trace, 4, __func__);
1634 priv->conn->prof.maxcqueue = 0; 1635 priv->conn->prof.maxcqueue = 0;
@@ -1640,7 +1641,7 @@ static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1640static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr, 1641static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1641 char *buf) 1642 char *buf)
1642{ 1643{
1643 struct netiucv_priv *priv = dev->driver_data; 1644 struct netiucv_priv *priv = dev_get_drvdata(dev);
1644 1645
1645 IUCV_DBF_TEXT(trace, 5, __func__); 1646 IUCV_DBF_TEXT(trace, 5, __func__);
1646 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); 1647 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
@@ -1649,7 +1650,7 @@ static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1649static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr, 1650static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1650 const char *buf, size_t count) 1651 const char *buf, size_t count)
1651{ 1652{
1652 struct netiucv_priv *priv = dev->driver_data; 1653 struct netiucv_priv *priv = dev_get_drvdata(dev);
1653 1654
1654 IUCV_DBF_TEXT(trace, 4, __func__); 1655 IUCV_DBF_TEXT(trace, 4, __func__);
1655 priv->conn->prof.doios_single = 0; 1656 priv->conn->prof.doios_single = 0;
@@ -1661,7 +1662,7 @@ static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1661static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr, 1662static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1662 char *buf) 1663 char *buf)
1663{ 1664{
1664 struct netiucv_priv *priv = dev->driver_data; 1665 struct netiucv_priv *priv = dev_get_drvdata(dev);
1665 1666
1666 IUCV_DBF_TEXT(trace, 5, __func__); 1667 IUCV_DBF_TEXT(trace, 5, __func__);
1667 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); 1668 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
@@ -1670,7 +1671,7 @@ static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1670static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr, 1671static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1671 const char *buf, size_t count) 1672 const char *buf, size_t count)
1672{ 1673{
1673 struct netiucv_priv *priv = dev->driver_data; 1674 struct netiucv_priv *priv = dev_get_drvdata(dev);
1674 1675
1675 IUCV_DBF_TEXT(trace, 5, __func__); 1676 IUCV_DBF_TEXT(trace, 5, __func__);
1676 priv->conn->prof.doios_multi = 0; 1677 priv->conn->prof.doios_multi = 0;
@@ -1682,7 +1683,7 @@ static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1682static ssize_t txlen_show (struct device *dev, struct device_attribute *attr, 1683static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1683 char *buf) 1684 char *buf)
1684{ 1685{
1685 struct netiucv_priv *priv = dev->driver_data; 1686 struct netiucv_priv *priv = dev_get_drvdata(dev);
1686 1687
1687 IUCV_DBF_TEXT(trace, 5, __func__); 1688 IUCV_DBF_TEXT(trace, 5, __func__);
1688 return sprintf(buf, "%ld\n", priv->conn->prof.txlen); 1689 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
@@ -1691,7 +1692,7 @@ static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1691static ssize_t txlen_write (struct device *dev, struct device_attribute *attr, 1692static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1692 const char *buf, size_t count) 1693 const char *buf, size_t count)
1693{ 1694{
1694 struct netiucv_priv *priv = dev->driver_data; 1695 struct netiucv_priv *priv = dev_get_drvdata(dev);
1695 1696
1696 IUCV_DBF_TEXT(trace, 4, __func__); 1697 IUCV_DBF_TEXT(trace, 4, __func__);
1697 priv->conn->prof.txlen = 0; 1698 priv->conn->prof.txlen = 0;
@@ -1703,7 +1704,7 @@ static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1703static ssize_t txtime_show (struct device *dev, struct device_attribute *attr, 1704static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1704 char *buf) 1705 char *buf)
1705{ 1706{
1706 struct netiucv_priv *priv = dev->driver_data; 1707 struct netiucv_priv *priv = dev_get_drvdata(dev);
1707 1708
1708 IUCV_DBF_TEXT(trace, 5, __func__); 1709 IUCV_DBF_TEXT(trace, 5, __func__);
1709 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); 1710 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
@@ -1712,7 +1713,7 @@ static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1712static ssize_t txtime_write (struct device *dev, struct device_attribute *attr, 1713static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1713 const char *buf, size_t count) 1714 const char *buf, size_t count)
1714{ 1715{
1715 struct netiucv_priv *priv = dev->driver_data; 1716 struct netiucv_priv *priv = dev_get_drvdata(dev);
1716 1717
1717 IUCV_DBF_TEXT(trace, 4, __func__); 1718 IUCV_DBF_TEXT(trace, 4, __func__);
1718 priv->conn->prof.tx_time = 0; 1719 priv->conn->prof.tx_time = 0;
@@ -1724,7 +1725,7 @@ static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1724static ssize_t txpend_show (struct device *dev, struct device_attribute *attr, 1725static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1725 char *buf) 1726 char *buf)
1726{ 1727{
1727 struct netiucv_priv *priv = dev->driver_data; 1728 struct netiucv_priv *priv = dev_get_drvdata(dev);
1728 1729
1729 IUCV_DBF_TEXT(trace, 5, __func__); 1730 IUCV_DBF_TEXT(trace, 5, __func__);
1730 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); 1731 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
@@ -1733,7 +1734,7 @@ static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1733static ssize_t txpend_write (struct device *dev, struct device_attribute *attr, 1734static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1734 const char *buf, size_t count) 1735 const char *buf, size_t count)
1735{ 1736{
1736 struct netiucv_priv *priv = dev->driver_data; 1737 struct netiucv_priv *priv = dev_get_drvdata(dev);
1737 1738
1738 IUCV_DBF_TEXT(trace, 4, __func__); 1739 IUCV_DBF_TEXT(trace, 4, __func__);
1739 priv->conn->prof.tx_pending = 0; 1740 priv->conn->prof.tx_pending = 0;
@@ -1745,7 +1746,7 @@ static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1745static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr, 1746static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1746 char *buf) 1747 char *buf)
1747{ 1748{
1748 struct netiucv_priv *priv = dev->driver_data; 1749 struct netiucv_priv *priv = dev_get_drvdata(dev);
1749 1750
1750 IUCV_DBF_TEXT(trace, 5, __func__); 1751 IUCV_DBF_TEXT(trace, 5, __func__);
1751 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); 1752 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
@@ -1754,7 +1755,7 @@ static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1754static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr, 1755static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1755 const char *buf, size_t count) 1756 const char *buf, size_t count)
1756{ 1757{
1757 struct netiucv_priv *priv = dev->driver_data; 1758 struct netiucv_priv *priv = dev_get_drvdata(dev);
1758 1759
1759 IUCV_DBF_TEXT(trace, 4, __func__); 1760 IUCV_DBF_TEXT(trace, 4, __func__);
1760 priv->conn->prof.tx_max_pending = 0; 1761 priv->conn->prof.tx_max_pending = 0;
@@ -1845,7 +1846,7 @@ static int netiucv_register_device(struct net_device *ndev)
1845 if (ret) 1846 if (ret)
1846 goto out_unreg; 1847 goto out_unreg;
1847 priv->dev = dev; 1848 priv->dev = dev;
1848 dev->driver_data = priv; 1849 dev_set_drvdata(dev, priv);
1849 return 0; 1850 return 0;
1850 1851
1851out_unreg: 1852out_unreg:
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 3ac27ee47396..2ccbd185a5fb 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -470,6 +470,12 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
470 if (!adapter) 470 if (!adapter)
471 return -ENOMEM; 471 return -ENOMEM;
472 472
473 adapter->gs = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL);
474 if (!adapter->gs) {
475 kfree(adapter);
476 return -ENOMEM;
477 }
478
473 ccw_device->handler = NULL; 479 ccw_device->handler = NULL;
474 adapter->ccw_device = ccw_device; 480 adapter->ccw_device = ccw_device;
475 atomic_set(&adapter->refcount, 0); 481 atomic_set(&adapter->refcount, 0);
@@ -523,8 +529,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
523 goto sysfs_failed; 529 goto sysfs_failed;
524 530
525 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 531 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
526 532 zfcp_fc_wka_ports_init(adapter);
527 zfcp_fc_nameserver_init(adapter);
528 533
529 if (!zfcp_adapter_scsi_register(adapter)) 534 if (!zfcp_adapter_scsi_register(adapter))
530 return 0; 535 return 0;
@@ -571,6 +576,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
571 kfree(adapter->req_list); 576 kfree(adapter->req_list);
572 kfree(adapter->fc_stats); 577 kfree(adapter->fc_stats);
573 kfree(adapter->stats_reset_data); 578 kfree(adapter->stats_reset_data);
579 kfree(adapter->gs);
574 kfree(adapter); 580 kfree(adapter);
575} 581}
576 582
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 2074d45dbf6c..49d0532bca1c 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -22,6 +22,8 @@
22#include <linux/syscalls.h> 22#include <linux/syscalls.h>
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24#include <linux/ioctl.h> 24#include <linux/ioctl.h>
25#include <scsi/fc/fc_fs.h>
26#include <scsi/fc/fc_gs.h>
25#include <scsi/scsi.h> 27#include <scsi/scsi.h>
26#include <scsi/scsi_tcq.h> 28#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
@@ -29,6 +31,7 @@
29#include <scsi/scsi_host.h> 31#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport.h> 32#include <scsi/scsi_transport.h>
31#include <scsi/scsi_transport_fc.h> 33#include <scsi/scsi_transport_fc.h>
34#include <scsi/scsi_bsg_fc.h>
32#include <asm/ccwdev.h> 35#include <asm/ccwdev.h>
33#include <asm/qdio.h> 36#include <asm/qdio.h>
34#include <asm/debug.h> 37#include <asm/debug.h>
@@ -228,11 +231,6 @@ struct zfcp_ls_adisc {
228 231
229/* FC-PH/FC-GS well-known address identifiers for generic services */ 232/* FC-PH/FC-GS well-known address identifiers for generic services */
230#define ZFCP_DID_WKA 0xFFFFF0 233#define ZFCP_DID_WKA 0xFFFFF0
231#define ZFCP_DID_MANAGEMENT_SERVICE 0xFFFFFA
232#define ZFCP_DID_TIME_SERVICE 0xFFFFFB
233#define ZFCP_DID_DIRECTORY_SERVICE 0xFFFFFC
234#define ZFCP_DID_ALIAS_SERVICE 0xFFFFF8
235#define ZFCP_DID_KEY_DISTRIBUTION_SERVICE 0xFFFFF7
236 234
237/* remote port status */ 235/* remote port status */
238#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 236#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
@@ -376,6 +374,14 @@ struct zfcp_wka_port {
376 struct delayed_work work; 374 struct delayed_work work;
377}; 375};
378 376
377struct zfcp_wka_ports {
378 struct zfcp_wka_port ms; /* management service */
379 struct zfcp_wka_port ts; /* time service */
380 struct zfcp_wka_port ds; /* directory service */
381 struct zfcp_wka_port as; /* alias service */
382 struct zfcp_wka_port ks; /* key distribution service */
383};
384
379struct zfcp_qdio_queue { 385struct zfcp_qdio_queue {
380 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; 386 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
381 u8 first; /* index of next free bfr in queue */ 387 u8 first; /* index of next free bfr in queue */
@@ -461,7 +467,7 @@ struct zfcp_adapter {
461 actions */ 467 actions */
462 u32 erp_low_mem_count; /* nr of erp actions waiting 468 u32 erp_low_mem_count; /* nr of erp actions waiting
463 for memory */ 469 for memory */
464 struct zfcp_wka_port nsp; /* adapter's nameserver */ 470 struct zfcp_wka_ports *gs; /* generic services */
465 debug_info_t *rec_dbf; 471 debug_info_t *rec_dbf;
466 debug_info_t *hba_dbf; 472 debug_info_t *hba_dbf;
467 debug_info_t *san_dbf; /* debug feature areas */ 473 debug_info_t *san_dbf; /* debug feature areas */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index e50ea465bc2b..8030e25152fb 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -719,7 +719,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
719 zfcp_qdio_close(adapter); 719 zfcp_qdio_close(adapter);
720 zfcp_fsf_req_dismiss_all(adapter); 720 zfcp_fsf_req_dismiss_all(adapter);
721 adapter->fsf_req_seq_no = 0; 721 adapter->fsf_req_seq_no = 0;
722 zfcp_fc_wka_port_force_offline(&adapter->nsp); 722 zfcp_fc_wka_port_force_offline(&adapter->gs->ds);
723 /* all ports and units are closed */ 723 /* all ports and units are closed */
724 zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL, 724 zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL,
725 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); 725 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 120a9a1c81f7..3044c6010306 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -106,8 +106,12 @@ extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *);
106extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); 106extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
107extern void zfcp_test_link(struct zfcp_port *); 107extern void zfcp_test_link(struct zfcp_port *);
108extern void zfcp_fc_link_test_work(struct work_struct *); 108extern void zfcp_fc_link_test_work(struct work_struct *);
109extern void zfcp_fc_nameserver_init(struct zfcp_adapter *);
110extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *); 109extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *);
110extern void zfcp_fc_wka_ports_init(struct zfcp_adapter *);
111extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *);
112extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *);
113extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *);
114
111 115
112/* zfcp_fsf.c */ 116/* zfcp_fsf.c */
113extern int zfcp_fsf_open_port(struct zfcp_erp_action *); 117extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 35493a82d2a8..2f0705d76b72 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -120,14 +120,13 @@ static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port)
120 schedule_delayed_work(&wka_port->work, HZ / 100); 120 schedule_delayed_work(&wka_port->work, HZ / 100);
121} 121}
122 122
123void zfcp_fc_nameserver_init(struct zfcp_adapter *adapter) 123static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id,
124 struct zfcp_adapter *adapter)
124{ 125{
125 struct zfcp_wka_port *wka_port = &adapter->nsp;
126
127 init_waitqueue_head(&wka_port->completion_wq); 126 init_waitqueue_head(&wka_port->completion_wq);
128 127
129 wka_port->adapter = adapter; 128 wka_port->adapter = adapter;
130 wka_port->d_id = ZFCP_DID_DIRECTORY_SERVICE; 129 wka_port->d_id = d_id;
131 130
132 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 131 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
133 atomic_set(&wka_port->refcount, 0); 132 atomic_set(&wka_port->refcount, 0);
@@ -143,6 +142,17 @@ void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka)
143 mutex_unlock(&wka->mutex); 142 mutex_unlock(&wka->mutex);
144} 143}
145 144
145void zfcp_fc_wka_ports_init(struct zfcp_adapter *adapter)
146{
147 struct zfcp_wka_ports *gs = adapter->gs;
148
149 zfcp_fc_wka_port_init(&gs->ms, FC_FID_MGMT_SERV, adapter);
150 zfcp_fc_wka_port_init(&gs->ts, FC_FID_TIME_SERV, adapter);
151 zfcp_fc_wka_port_init(&gs->ds, FC_FID_DIR_SERV, adapter);
152 zfcp_fc_wka_port_init(&gs->as, FC_FID_ALIASES, adapter);
153 zfcp_fc_wka_port_init(&gs->ks, FC_FID_SEC_KEY, adapter);
154}
155
146static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, 156static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
147 struct fcp_rscn_element *elem) 157 struct fcp_rscn_element *elem)
148{ 158{
@@ -282,7 +292,7 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
282 292
283 /* setup parameters for send generic command */ 293 /* setup parameters for send generic command */
284 gid_pn->port = erp_action->port; 294 gid_pn->port = erp_action->port;
285 gid_pn->ct.wka_port = &adapter->nsp; 295 gid_pn->ct.wka_port = &adapter->gs->ds;
286 gid_pn->ct.handler = zfcp_fc_ns_handler; 296 gid_pn->ct.handler = zfcp_fc_ns_handler;
287 gid_pn->ct.handler_data = (unsigned long) &compl_rec; 297 gid_pn->ct.handler_data = (unsigned long) &compl_rec;
288 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT; 298 gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
@@ -329,13 +339,13 @@ int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action)
329 339
330 memset(gid_pn, 0, sizeof(*gid_pn)); 340 memset(gid_pn, 0, sizeof(*gid_pn));
331 341
332 ret = zfcp_wka_port_get(&adapter->nsp); 342 ret = zfcp_wka_port_get(&adapter->gs->ds);
333 if (ret) 343 if (ret)
334 goto out; 344 goto out;
335 345
336 ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn); 346 ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn);
337 347
338 zfcp_wka_port_put(&adapter->nsp); 348 zfcp_wka_port_put(&adapter->gs->ds);
339out: 349out:
340 mempool_free(gid_pn, adapter->pool.data_gid_pn); 350 mempool_free(gid_pn, adapter->pool.data_gid_pn);
341 return ret; 351 return ret;
@@ -525,7 +535,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
525 req->fc4_type = ZFCP_CT_SCSI_FCP; 535 req->fc4_type = ZFCP_CT_SCSI_FCP;
526 536
527 /* prepare zfcp_send_ct */ 537 /* prepare zfcp_send_ct */
528 ct->wka_port = &adapter->nsp; 538 ct->wka_port = &adapter->gs->ds;
529 ct->handler = zfcp_fc_ns_handler; 539 ct->handler = zfcp_fc_ns_handler;
530 ct->handler_data = (unsigned long)&compl_rec; 540 ct->handler_data = (unsigned long)&compl_rec;
531 ct->timeout = 10; 541 ct->timeout = 10;
@@ -644,7 +654,7 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
644 fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) 654 fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
645 return 0; 655 return 0;
646 656
647 ret = zfcp_wka_port_get(&adapter->nsp); 657 ret = zfcp_wka_port_get(&adapter->gs->ds);
648 if (ret) 658 if (ret)
649 return ret; 659 return ret;
650 660
@@ -666,7 +676,7 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter)
666 } 676 }
667 zfcp_free_sg_env(gpn_ft, buf_num); 677 zfcp_free_sg_env(gpn_ft, buf_num);
668out: 678out:
669 zfcp_wka_port_put(&adapter->nsp); 679 zfcp_wka_port_put(&adapter->gs->ds);
670 return ret; 680 return ret;
671} 681}
672 682
@@ -675,3 +685,158 @@ void _zfcp_scan_ports_later(struct work_struct *work)
675{ 685{
676 zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work)); 686 zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work));
677} 687}
688
689struct zfcp_els_fc_job {
690 struct zfcp_send_els els;
691 struct fc_bsg_job *job;
692};
693
694static void zfcp_fc_generic_els_handler(unsigned long data)
695{
696 struct zfcp_els_fc_job *els_fc_job = (struct zfcp_els_fc_job *) data;
697 struct fc_bsg_job *job = els_fc_job->job;
698 struct fc_bsg_reply *reply = job->reply;
699
700 if (els_fc_job->els.status) {
701 /* request rejected or timed out */
702 reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_REJECT;
703 goto out;
704 }
705
706 reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
707 reply->reply_payload_rcv_len = job->reply_payload.payload_len;
708
709out:
710 job->state_flags = FC_RQST_STATE_DONE;
711 job->job_done(job);
712 kfree(els_fc_job);
713}
714
715int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job)
716{
717 struct zfcp_els_fc_job *els_fc_job;
718 struct fc_rport *rport = job->rport;
719 struct Scsi_Host *shost;
720 struct zfcp_adapter *adapter;
721 struct zfcp_port *port;
722 u8 *port_did;
723
724 shost = rport ? rport_to_shost(rport) : job->shost;
725 adapter = (struct zfcp_adapter *)shost->hostdata[0];
726
727 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
728 return -EINVAL;
729
730 els_fc_job = kzalloc(sizeof(struct zfcp_els_fc_job), GFP_KERNEL);
731 if (!els_fc_job)
732 return -ENOMEM;
733
734 els_fc_job->els.adapter = adapter;
735 if (rport) {
736 read_lock_irq(&zfcp_data.config_lock);
737 port = rport->dd_data;
738 if (port)
739 els_fc_job->els.d_id = port->d_id;
740 read_unlock_irq(&zfcp_data.config_lock);
741 if (!port) {
742 kfree(els_fc_job);
743 return -EINVAL;
744 }
745 } else {
746 port_did = job->request->rqst_data.h_els.port_id;
747 els_fc_job->els.d_id = (port_did[0] << 16) +
748 (port_did[1] << 8) + port_did[2];
749 }
750
751 els_fc_job->els.req = job->request_payload.sg_list;
752 els_fc_job->els.resp = job->reply_payload.sg_list;
753 els_fc_job->els.handler = zfcp_fc_generic_els_handler;
754 els_fc_job->els.handler_data = (unsigned long) els_fc_job;
755 els_fc_job->job = job;
756
757 return zfcp_fsf_send_els(&els_fc_job->els);
758}
759
760struct zfcp_ct_fc_job {
761 struct zfcp_send_ct ct;
762 struct fc_bsg_job *job;
763};
764
765static void zfcp_fc_generic_ct_handler(unsigned long data)
766{
767 struct zfcp_ct_fc_job *ct_fc_job = (struct zfcp_ct_fc_job *) data;
768 struct fc_bsg_job *job = ct_fc_job->job;
769
770 job->reply->reply_data.ctels_reply.status = ct_fc_job->ct.status ?
771 FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK;
772 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
773 job->state_flags = FC_RQST_STATE_DONE;
774 job->job_done(job);
775
776 zfcp_wka_port_put(ct_fc_job->ct.wka_port);
777
778 kfree(ct_fc_job);
779}
780
781int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job)
782{
783 int ret;
784 u8 gs_type;
785 struct fc_rport *rport = job->rport;
786 struct Scsi_Host *shost;
787 struct zfcp_adapter *adapter;
788 struct zfcp_ct_fc_job *ct_fc_job;
789 u32 preamble_word1;
790
791 shost = rport ? rport_to_shost(rport) : job->shost;
792
793 adapter = (struct zfcp_adapter *)shost->hostdata[0];
794 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
795 return -EINVAL;
796
797 ct_fc_job = kzalloc(sizeof(struct zfcp_ct_fc_job), GFP_KERNEL);
798 if (!ct_fc_job)
799 return -ENOMEM;
800
801 preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
802 gs_type = (preamble_word1 & 0xff000000) >> 24;
803
804 switch (gs_type) {
805 case FC_FST_ALIAS:
806 ct_fc_job->ct.wka_port = &adapter->gs->as;
807 break;
808 case FC_FST_MGMT:
809 ct_fc_job->ct.wka_port = &adapter->gs->ms;
810 break;
811 case FC_FST_TIME:
812 ct_fc_job->ct.wka_port = &adapter->gs->ts;
813 break;
814 case FC_FST_DIR:
815 ct_fc_job->ct.wka_port = &adapter->gs->ds;
816 break;
817 default:
818 kfree(ct_fc_job);
819 return -EINVAL; /* no such service */
820 }
821
822 ret = zfcp_wka_port_get(ct_fc_job->ct.wka_port);
823 if (ret) {
824 kfree(ct_fc_job);
825 return ret;
826 }
827
828 ct_fc_job->ct.req = job->request_payload.sg_list;
829 ct_fc_job->ct.resp = job->reply_payload.sg_list;
830 ct_fc_job->ct.timeout = ZFCP_FSF_REQUEST_TIMEOUT;
831 ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler;
832 ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job;
833 ct_fc_job->ct.completion = NULL;
834 ct_fc_job->job = job;
835
836 ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL, NULL);
837 if (ret) {
838 kfree(ct_fc_job);
839 zfcp_wka_port_put(ct_fc_job->ct.wka_port);
840 }
841 return ret;
842}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index e6dae3744e79..c57658f3d34f 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1146,7 +1146,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1146 case FSF_RESPONSE_SIZE_TOO_LARGE: 1146 case FSF_RESPONSE_SIZE_TOO_LARGE:
1147 break; 1147 break;
1148 case FSF_ACCESS_DENIED: 1148 case FSF_ACCESS_DENIED:
1149 zfcp_fsf_access_denied_port(req, port); 1149 if (port)
1150 zfcp_fsf_access_denied_port(req, port);
1150 break; 1151 break;
1151 case FSF_SBAL_MISMATCH: 1152 case FSF_SBAL_MISMATCH:
1152 /* should never occure, avoided in zfcp_fsf_send_els */ 1153 /* should never occure, avoided in zfcp_fsf_send_els */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 7d0da230eb63..967ede73f4c5 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -623,6 +623,20 @@ void zfcp_scsi_scan(struct work_struct *work)
623 zfcp_unit_put(unit); 623 zfcp_unit_put(unit);
624} 624}
625 625
626static int zfcp_execute_fc_job(struct fc_bsg_job *job)
627{
628 switch (job->request->msgcode) {
629 case FC_BSG_RPT_ELS:
630 case FC_BSG_HST_ELS_NOLOGIN:
631 return zfcp_fc_execute_els_fc_job(job);
632 case FC_BSG_RPT_CT:
633 case FC_BSG_HST_CT:
634 return zfcp_fc_execute_ct_fc_job(job);
635 default:
636 return -EINVAL;
637 }
638}
639
626struct fc_function_template zfcp_transport_functions = { 640struct fc_function_template zfcp_transport_functions = {
627 .show_starget_port_id = 1, 641 .show_starget_port_id = 1,
628 .show_starget_port_name = 1, 642 .show_starget_port_name = 1,
@@ -644,6 +658,7 @@ struct fc_function_template zfcp_transport_functions = {
644 .dev_loss_tmo_callbk = zfcp_scsi_dev_loss_tmo_callbk, 658 .dev_loss_tmo_callbk = zfcp_scsi_dev_loss_tmo_callbk,
645 .terminate_rport_io = zfcp_scsi_terminate_rport_io, 659 .terminate_rport_io = zfcp_scsi_terminate_rport_io,
646 .show_host_port_state = 1, 660 .show_host_port_state = 1,
661 .bsg_request = zfcp_execute_fc_job,
647 /* no functions registered for following dynamic attributes but 662 /* no functions registered for following dynamic attributes but
648 directly set by LLDD */ 663 directly set by LLDD */
649 .show_host_port_type = 1, 664 .show_host_port_type = 1,
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index ed0e3e55652a..538135783aab 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -646,7 +646,7 @@ static int aha1740_probe (struct device *dev)
646 646
647static __devexit int aha1740_remove (struct device *dev) 647static __devexit int aha1740_remove (struct device *dev)
648{ 648{
649 struct Scsi_Host *shpnt = dev->driver_data; 649 struct Scsi_Host *shpnt = dev_get_drvdata(dev);
650 struct aha1740_hostdata *host = HOSTDATA (shpnt); 650 struct aha1740_hostdata *host = HOSTDATA (shpnt);
651 651
652 scsi_remove_host(shpnt); 652 scsi_remove_host(shpnt);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 0f829b3b8ab7..75b23317bd26 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -627,19 +627,15 @@ ahd_linux_target_alloc(struct scsi_target *starget)
627 starget->id, &tstate); 627 starget->id, &tstate);
628 628
629 if ((flags & CFPACKETIZED) == 0) { 629 if ((flags & CFPACKETIZED) == 0) {
630 /* Do not negotiate packetized transfers */ 630 /* don't negotiate packetized (IU) transfers */
631 spi_rd_strm(starget) = 0; 631 spi_max_iu(starget) = 0;
632 spi_pcomp_en(starget) = 0;
633 spi_rti(starget) = 0;
634 spi_wr_flow(starget) = 0;
635 spi_hold_mcs(starget) = 0;
636 } else { 632 } else {
637 if ((ahd->features & AHD_RTI) == 0) 633 if ((ahd->features & AHD_RTI) == 0)
638 spi_rti(starget) = 0; 634 spi_rti(starget) = 0;
639 } 635 }
640 636
641 if ((flags & CFQAS) == 0) 637 if ((flags & CFQAS) == 0)
642 spi_qas(starget) = 0; 638 spi_max_qas(starget) = 0;
643 639
644 /* Transinfo values have been set to BIOS settings */ 640 /* Transinfo values have been set to BIOS settings */
645 spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0; 641 spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0;
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
index 820d428ae839..b62b482e55e7 100644
--- a/drivers/scsi/bnx2i/Kconfig
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -2,6 +2,7 @@ config SCSI_BNX2_ISCSI
2 tristate "Broadcom NetXtreme II iSCSI support" 2 tristate "Broadcom NetXtreme II iSCSI support"
3 select SCSI_ISCSI_ATTRS 3 select SCSI_ISCSI_ATTRS
4 select CNIC 4 select CNIC
5 depends on PCI
5 ---help--- 6 ---help---
6 This driver supports iSCSI offload for the Broadcom NetXtreme II 7 This driver supports iSCSI offload for the Broadcom NetXtreme II
7 devices. 8 devices.
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 11d2602ae88e..869a11bdccbd 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1877,7 +1877,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1877 unsigned long wait_switch = 0; 1877 unsigned long wait_switch = 0;
1878 int rc; 1878 int rc;
1879 1879
1880 vdev->dev.driver_data = NULL; 1880 dev_set_drvdata(&vdev->dev, NULL);
1881 1881
1882 host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); 1882 host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1883 if (!host) { 1883 if (!host) {
@@ -1949,7 +1949,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1949 scsi_scan_host(host); 1949 scsi_scan_host(host);
1950 } 1950 }
1951 1951
1952 vdev->dev.driver_data = hostdata; 1952 dev_set_drvdata(&vdev->dev, hostdata);
1953 return 0; 1953 return 0;
1954 1954
1955 add_srp_port_failed: 1955 add_srp_port_failed:
@@ -1968,7 +1968,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1968 1968
1969static int ibmvscsi_remove(struct vio_dev *vdev) 1969static int ibmvscsi_remove(struct vio_dev *vdev)
1970{ 1970{
1971 struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; 1971 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
1972 unmap_persist_bufs(hostdata); 1972 unmap_persist_bufs(hostdata);
1973 release_event_pool(&hostdata->pool, hostdata); 1973 release_event_pool(&hostdata->pool, hostdata);
1974 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, 1974 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index e2dd6a45924a..d5eaf9727109 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -892,7 +892,7 @@ free_vport:
892 892
893static int ibmvstgt_remove(struct vio_dev *dev) 893static int ibmvstgt_remove(struct vio_dev *dev)
894{ 894{
895 struct srp_target *target = (struct srp_target *) dev->dev.driver_data; 895 struct srp_target *target = dev_get_drvdata(&dev->dev);
896 struct Scsi_Host *shost = target->shost; 896 struct Scsi_Host *shost = target->shost;
897 struct vio_port *vport = target->ldata; 897 struct vio_port *vport = target->ldata;
898 898
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 15e2d132e8b9..2742ae8a3d09 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -135,7 +135,7 @@ int srp_target_alloc(struct srp_target *target, struct device *dev,
135 INIT_LIST_HEAD(&target->cmd_queue); 135 INIT_LIST_HEAD(&target->cmd_queue);
136 136
137 target->dev = dev; 137 target->dev = dev;
138 target->dev->driver_data = target; 138 dev_set_drvdata(target->dev, target);
139 139
140 target->srp_iu_size = iu_size; 140 target->srp_iu_size = iu_size;
141 target->rx_ring_size = nr; 141 target->rx_ring_size = nr;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 540569849099..1877d9811831 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -457,10 +457,6 @@ struct lpfc_hba {
457 void (*lpfc_scsi_prep_cmnd) 457 void (*lpfc_scsi_prep_cmnd)
458 (struct lpfc_vport *, struct lpfc_scsi_buf *, 458 (struct lpfc_vport *, struct lpfc_scsi_buf *,
459 struct lpfc_nodelist *); 459 struct lpfc_nodelist *);
460 int (*lpfc_scsi_prep_task_mgmt_cmd)
461 (struct lpfc_vport *, struct lpfc_scsi_buf *,
462 unsigned int, uint8_t);
463
464 /* IOCB interface function jump table entries */ 460 /* IOCB interface function jump table entries */
465 int (*__lpfc_sli_issue_iocb) 461 int (*__lpfc_sli_issue_iocb)
466 (struct lpfc_hba *, uint32_t, 462 (struct lpfc_hba *, uint32_t,
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d73e677201f8..fc07be5fbce9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3113,6 +3113,9 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
3113 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3113 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3114 struct lpfc_hba *phba = vport->phba; 3114 struct lpfc_hba *phba = vport->phba;
3115 3115
3116 if (phba->sli_rev >= LPFC_SLI_REV4)
3117 return -EPERM;
3118
3116 if ((off + count) > FF_REG_AREA_SIZE) 3119 if ((off + count) > FF_REG_AREA_SIZE)
3117 return -ERANGE; 3120 return -ERANGE;
3118 3121
@@ -3163,6 +3166,9 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
3163 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3166 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3164 struct lpfc_hba *phba = vport->phba; 3167 struct lpfc_hba *phba = vport->phba;
3165 3168
3169 if (phba->sli_rev >= LPFC_SLI_REV4)
3170 return -EPERM;
3171
3166 if (off > FF_REG_AREA_SIZE) 3172 if (off > FF_REG_AREA_SIZE)
3167 return -ERANGE; 3173 return -ERANGE;
3168 3174
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 1dbccfd3d022..0e532f072eb3 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1732,7 +1732,9 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1732 uint32_t *ptr, str[4]; 1732 uint32_t *ptr, str[4];
1733 uint8_t *fwname; 1733 uint8_t *fwname;
1734 1734
1735 if (vp->rev.rBit) { 1735 if (phba->sli_rev == LPFC_SLI_REV4)
1736 sprintf(fwrevision, "%s", vp->rev.opFwName);
1737 else if (vp->rev.rBit) {
1736 if (psli->sli_flag & LPFC_SLI_ACTIVE) 1738 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1737 rev = vp->rev.sli2FwRev; 1739 rev = vp->rev.sli2FwRev;
1738 else 1740 else
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 2b02b1fb39a0..8d0f0de76b63 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -53,8 +53,7 @@
53 * debugfs interface 53 * debugfs interface
54 * 54 *
55 * To access this interface the user should: 55 * To access this interface the user should:
56 * # mkdir /debug 56 * # mount -t debugfs none /sys/kernel/debug
57 * # mount -t debugfs none /debug
58 * 57 *
59 * The lpfc debugfs directory hierarchy is: 58 * The lpfc debugfs directory hierarchy is:
60 * lpfc/lpfcX/vportY 59 * lpfc/lpfcX/vportY
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6bdeb14878a2..f72fdf23bf1b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -168,6 +168,19 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
168 if (elsiocb == NULL) 168 if (elsiocb == NULL)
169 return NULL; 169 return NULL;
170 170
171 /*
172 * If this command is for fabric controller and HBA running
173 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
174 */
175 if ((did == Fabric_DID) &&
176 bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags) &&
177 ((elscmd == ELS_CMD_FLOGI) ||
178 (elscmd == ELS_CMD_FDISC) ||
179 (elscmd == ELS_CMD_LOGO)))
180 elsiocb->iocb_flag |= LPFC_FIP_ELS;
181 else
182 elsiocb->iocb_flag &= ~LPFC_FIP_ELS;
183
171 icmd = &elsiocb->iocb; 184 icmd = &elsiocb->iocb;
172 185
173 /* fill in BDEs for command */ 186 /* fill in BDEs for command */
@@ -6108,9 +6121,17 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6108 icmd->un.elsreq64.myID = 0; 6121 icmd->un.elsreq64.myID = 0;
6109 icmd->un.elsreq64.fl = 1; 6122 icmd->un.elsreq64.fl = 1;
6110 6123
6111 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ 6124 if (phba->sli_rev == LPFC_SLI_REV4) {
6112 icmd->ulpCt_h = 1; 6125 /* FDISC needs to be 1 for WQE VPI */
6113 icmd->ulpCt_l = 0; 6126 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
6127 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
6128 /* Set the ulpContext to the vpi */
6129 elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
6130 } else {
6131 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
6132 icmd->ulpCt_h = 1;
6133 icmd->ulpCt_l = 0;
6134 }
6114 6135
6115 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6136 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6116 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 6137 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 35c41ae75be2..ed46b24a3380 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1197,6 +1197,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1197{ 1197{
1198 struct lpfc_fcf_conn_entry *conn_entry; 1198 struct lpfc_fcf_conn_entry *conn_entry;
1199 1199
1200 /* If FCF not available return 0 */
1201 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1202 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
1203 return 0;
1204
1200 if (!phba->cfg_enable_fip) { 1205 if (!phba->cfg_enable_fip) {
1201 *boot_flag = 0; 1206 *boot_flag = 0;
1202 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1207 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
@@ -1216,6 +1221,14 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1216 *boot_flag = 0; 1221 *boot_flag = 0;
1217 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1222 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1218 new_fcf_record); 1223 new_fcf_record);
1224
1225 /*
1226 * When there are no FCF connect entries, use driver's default
1227 * addressing mode - FPMA.
1228 */
1229 if (*addr_mode & LPFC_FCF_FPMA)
1230 *addr_mode = LPFC_FCF_FPMA;
1231
1219 *vlan_id = 0xFFFF; 1232 *vlan_id = 0xFFFF;
1220 return 1; 1233 return 1;
1221 } 1234 }
@@ -1241,6 +1254,14 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1241 } 1254 }
1242 1255
1243 /* 1256 /*
1257 * If connection record does not support any addressing mode,
1258 * skip the FCF record.
1259 */
1260 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1261 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1262 continue;
1263
1264 /*
1244 * Check if the connection record specifies a required 1265 * Check if the connection record specifies a required
1245 * addressing mode. 1266 * addressing mode.
1246 */ 1267 */
@@ -1272,6 +1293,11 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1272 else 1293 else
1273 *boot_flag = 0; 1294 *boot_flag = 0;
1274 1295
1296 /*
1297 * If user did not specify any addressing mode, or if the
1298 * prefered addressing mode specified by user is not supported
1299 * by FCF, allow fabric to pick the addressing mode.
1300 */
1275 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1301 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1276 new_fcf_record); 1302 new_fcf_record);
1277 /* 1303 /*
@@ -1297,12 +1323,6 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1297 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 1323 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1298 (*addr_mode & LPFC_FCF_FPMA)) 1324 (*addr_mode & LPFC_FCF_FPMA))
1299 *addr_mode = LPFC_FCF_FPMA; 1325 *addr_mode = LPFC_FCF_FPMA;
1300 /*
1301 * If user did not specify any addressing mode, use FPMA if
1302 * possible else use SPMA.
1303 */
1304 else if (*addr_mode & LPFC_FCF_FPMA)
1305 *addr_mode = LPFC_FCF_FPMA;
1306 1326
1307 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) 1327 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1308 *vlan_id = conn_entry->conn_rec.vlan_tag; 1328 *vlan_id = conn_entry->conn_rec.vlan_tag;
@@ -1864,7 +1884,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1864 vport->fc_flag &= ~FC_BYPASSED_MODE; 1884 vport->fc_flag &= ~FC_BYPASSED_MODE;
1865 spin_unlock_irq(shost->host_lock); 1885 spin_unlock_irq(shost->host_lock);
1866 1886
1867 if (((phba->fc_eventTag + 1) < la->eventTag) || 1887 if ((phba->fc_eventTag < la->eventTag) ||
1868 (phba->fc_eventTag == la->eventTag)) { 1888 (phba->fc_eventTag == la->eventTag)) {
1869 phba->fc_stat.LinkMultiEvent++; 1889 phba->fc_stat.LinkMultiEvent++;
1870 if (la->attType == AT_LINK_UP) 1890 if (la->attType == AT_LINK_UP)
@@ -2925,6 +2945,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2925 lpfc_no_rpi(phba, ndlp); 2945 lpfc_no_rpi(phba, ndlp);
2926 ndlp->nlp_rpi = 0; 2946 ndlp->nlp_rpi = 0;
2927 ndlp->nlp_flag &= ~NLP_RPI_VALID; 2947 ndlp->nlp_flag &= ~NLP_RPI_VALID;
2948 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2928 return 1; 2949 return 1;
2929 } 2950 }
2930 return 0; 2951 return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 02aa016b93e9..8a3a026667e4 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1183,7 +1183,6 @@ typedef struct {
1183#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 1183#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
1184#define PCI_VENDOR_ID_SERVERENGINE 0x19a2 1184#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
1185#define PCI_DEVICE_ID_TIGERSHARK 0x0704 1185#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1186#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705
1187 1186
1188#define JEDEC_ID_ADDRESS 0x0080001c 1187#define JEDEC_ID_ADDRESS 0x0080001c
1189#define FIREFLY_JEDEC_ID 0x1ACC 1188#define FIREFLY_JEDEC_ID 0x1ACC
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 39c34b3ad29d..2995d128f07f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -422,9 +422,9 @@ struct lpfc_wqe_generic{
422#define lpfc_wqe_gen_pri_WORD word10 422#define lpfc_wqe_gen_pri_WORD word10
423 uint32_t word11; 423 uint32_t word11;
424#define lpfc_wqe_gen_cq_id_SHIFT 16 424#define lpfc_wqe_gen_cq_id_SHIFT 16
425#define lpfc_wqe_gen_cq_id_MASK 0x000003FF 425#define lpfc_wqe_gen_cq_id_MASK 0x0000FFFF
426#define lpfc_wqe_gen_cq_id_WORD word11 426#define lpfc_wqe_gen_cq_id_WORD word11
427#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff 427#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
428#define lpfc_wqe_gen_wqec_SHIFT 7 428#define lpfc_wqe_gen_wqec_SHIFT 7
429#define lpfc_wqe_gen_wqec_MASK 0x00000001 429#define lpfc_wqe_gen_wqec_MASK 0x00000001
430#define lpfc_wqe_gen_wqec_WORD word11 430#define lpfc_wqe_gen_wqec_WORD word11
@@ -1128,7 +1128,7 @@ struct fcf_record {
1128#define lpfc_fcf_record_mac_5_WORD word4 1128#define lpfc_fcf_record_mac_5_WORD word4
1129#define lpfc_fcf_record_fcf_avail_SHIFT 16 1129#define lpfc_fcf_record_fcf_avail_SHIFT 16
1130#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF 1130#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF
1131#define lpfc_fcf_record_fc_avail_WORD word4 1131#define lpfc_fcf_record_fcf_avail_WORD word4
1132#define lpfc_fcf_record_mac_addr_prov_SHIFT 24 1132#define lpfc_fcf_record_mac_addr_prov_SHIFT 24
1133#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF 1133#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF
1134#define lpfc_fcf_record_mac_addr_prov_WORD word4 1134#define lpfc_fcf_record_mac_addr_prov_WORD word4
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2f5907f92eea..fc67cc65c63b 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -428,7 +428,8 @@ lpfc_config_port_post(struct lpfc_hba *phba)
428 /* Reset the DFT_HBA_Q_DEPTH to the max xri */ 428 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
429 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) 429 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
430 phba->cfg_hba_queue_depth = 430 phba->cfg_hba_queue_depth =
431 mb->un.varRdConfig.max_xri + 1; 431 (mb->un.varRdConfig.max_xri + 1) -
432 lpfc_sli4_get_els_iocb_cnt(phba);
432 433
433 phba->lmt = mb->un.varRdConfig.lmt; 434 phba->lmt = mb->un.varRdConfig.lmt;
434 435
@@ -1646,10 +1647,6 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1646 oneConnect = 1; 1647 oneConnect = 1;
1647 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; 1648 m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
1648 break; 1649 break;
1649 case PCI_DEVICE_ID_TIGERSHARK_S:
1650 oneConnect = 1;
1651 m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
1652 break;
1653 default: 1650 default:
1654 m = (typeof(m)){ NULL }; 1651 m = (typeof(m)){ NULL };
1655 break; 1652 break;
@@ -3543,6 +3540,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
3543 3540
3544 /* Free the allocated rpi headers. */ 3541 /* Free the allocated rpi headers. */
3545 lpfc_sli4_remove_rpi_hdrs(phba); 3542 lpfc_sli4_remove_rpi_hdrs(phba);
3543 lpfc_sli4_remove_rpis(phba);
3546 3544
3547 /* Free the ELS sgl list */ 3545 /* Free the ELS sgl list */
3548 lpfc_free_active_sgl(phba); 3546 lpfc_free_active_sgl(phba);
@@ -7184,16 +7182,19 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
7184{ 7182{
7185 int max_xri = phba->sli4_hba.max_cfg_param.max_xri; 7183 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
7186 7184
7187 if (max_xri <= 100) 7185 if (phba->sli_rev == LPFC_SLI_REV4) {
7188 return 4; 7186 if (max_xri <= 100)
7189 else if (max_xri <= 256) 7187 return 4;
7190 return 8; 7188 else if (max_xri <= 256)
7191 else if (max_xri <= 512) 7189 return 8;
7192 return 16; 7190 else if (max_xri <= 512)
7193 else if (max_xri <= 1024) 7191 return 16;
7194 return 32; 7192 else if (max_xri <= 1024)
7195 else 7193 return 32;
7196 return 48; 7194 else
7195 return 48;
7196 } else
7197 return 0;
7197} 7198}
7198 7199
7199/** 7200/**
@@ -7642,7 +7643,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
7642 7643
7643 switch (dev_id) { 7644 switch (dev_id) {
7644 case PCI_DEVICE_ID_TIGERSHARK: 7645 case PCI_DEVICE_ID_TIGERSHARK:
7645 case PCI_DEVICE_ID_TIGERSHARK_S:
7646 rc = lpfc_pci_probe_one_s4(pdev, pid); 7646 rc = lpfc_pci_probe_one_s4(pdev, pid);
7647 break; 7647 break;
7648 default: 7648 default:
@@ -7941,8 +7941,6 @@ static struct pci_device_id lpfc_id_table[] = {
7941 PCI_ANY_ID, PCI_ANY_ID, }, 7941 PCI_ANY_ID, PCI_ANY_ID, },
7942 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, 7942 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
7943 PCI_ANY_ID, PCI_ANY_ID, }, 7943 PCI_ANY_ID, PCI_ANY_ID, },
7944 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
7945 PCI_ANY_ID, PCI_ANY_ID, },
7946 { 0 } 7944 { 0 }
7947}; 7945};
7948 7946
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index b9b451c09010..3423571dd1b3 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1631,6 +1631,7 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1631 /* In case of malloc fails, proceed with whatever we have */ 1631 /* In case of malloc fails, proceed with whatever we have */
1632 if (!viraddr) 1632 if (!viraddr)
1633 break; 1633 break;
1634 memset(viraddr, 0, PAGE_SIZE);
1634 mbox->sge_array->addr[pagen] = viraddr; 1635 mbox->sge_array->addr[pagen] = viraddr;
1635 /* Keep the first page for later sub-header construction */ 1636 /* Keep the first page for later sub-header construction */
1636 if (pagen == 0) 1637 if (pagen == 0)
@@ -1715,8 +1716,10 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
1715 /* Set up host requested features. */ 1716 /* Set up host requested features. */
1716 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); 1717 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
1717 1718
1718 /* Virtual fabrics and FIPs are not supported yet. */ 1719 if (phba->cfg_enable_fip)
1719 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0); 1720 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
1721 else
1722 bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 1);
1720 1723
1721 /* Enable DIF (block guard) only if configured to do so. */ 1724 /* Enable DIF (block guard) only if configured to do so. */
1722 if (phba->cfg_enable_bg) 1725 if (phba->cfg_enable_bg)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 09f659f77bb3..3e74136f1ede 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -497,7 +497,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
497 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); 497 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
498 else 498 else
499 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 499 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
500 if ((ndlp->nlp_type & NLP_FABRIC) && 500 if ((ndlp->nlp_DID == Fabric_DID) &&
501 vport->port_type == LPFC_NPIV_PORT) { 501 vport->port_type == LPFC_NPIV_PORT) {
502 lpfc_linkdown_port(vport); 502 lpfc_linkdown_port(vport);
503 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 503 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7991ba1980ae..da59c4f0168f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -116,6 +116,27 @@ lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
116} 116}
117 117
118/** 118/**
119 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
120 * @phba: Pointer to HBA object.
121 * @lpfc_cmd: lpfc scsi command object pointer.
122 *
123 * This function is called from the lpfc_prep_task_mgmt_cmd function to
124 * set the last bit in the response sge entry.
125 **/
126static void
127lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
128 struct lpfc_scsi_buf *lpfc_cmd)
129{
130 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
131 if (sgl) {
132 sgl += 1;
133 sgl->word2 = le32_to_cpu(sgl->word2);
134 bf_set(lpfc_sli4_sge_last, sgl, 1);
135 sgl->word2 = cpu_to_le32(sgl->word2);
136 }
137}
138
139/**
119 * lpfc_update_stats - Update statistical data for the command completion 140 * lpfc_update_stats - Update statistical data for the command completion
120 * @phba: Pointer to HBA object. 141 * @phba: Pointer to HBA object.
121 * @lpfc_cmd: lpfc scsi command object pointer. 142 * @lpfc_cmd: lpfc scsi command object pointer.
@@ -1978,7 +1999,7 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1978} 1999}
1979 2000
1980/** 2001/**
1981 * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev 2002 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
1982 * @phba: The HBA for which this call is being executed. 2003 * @phba: The HBA for which this call is being executed.
1983 * @psb: The scsi buffer which is going to be un-mapped. 2004 * @psb: The scsi buffer which is going to be un-mapped.
1984 * 2005 *
@@ -1986,7 +2007,7 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1986 * field of @lpfc_cmd for device with SLI-3 interface spec. 2007 * field of @lpfc_cmd for device with SLI-3 interface spec.
1987 **/ 2008 **/
1988static void 2009static void
1989lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) 2010lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1990{ 2011{
1991 /* 2012 /*
1992 * There are only two special cases to consider. (1) the scsi command 2013 * There are only two special cases to consider. (1) the scsi command
@@ -2003,36 +2024,6 @@ lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2003} 2024}
2004 2025
2005/** 2026/**
2006 * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev
2007 * @phba: The Hba for which this call is being executed.
2008 * @psb: The scsi buffer which is going to be un-mapped.
2009 *
2010 * This routine does DMA un-mapping of scatter gather list of scsi command
2011 * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to
2012 * remove the sgl for this scsi buffer then we will do it here. For now
2013 * we should be able to just call the sli3 unprep routine.
2014 **/
2015static void
2016lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2017{
2018 lpfc_scsi_unprep_dma_buf_s3(phba, psb);
2019}
2020
2021/**
2022 * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list
2023 * @phba: The Hba for which this call is being executed.
2024 * @psb: The scsi buffer which is going to be un-mapped.
2025 *
2026 * This routine does DMA un-mapping of scatter gather list of scsi command
2027 * field of @lpfc_cmd for device with SLI-4 interface spec.
2028 **/
2029static void
2030lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2031{
2032 phba->lpfc_scsi_unprep_dma_buf(phba, psb);
2033}
2034
2035/**
2036 * lpfc_handler_fcp_err - FCP response handler 2027 * lpfc_handler_fcp_err - FCP response handler
2037 * @vport: The virtual port for which this call is being executed. 2028 * @vport: The virtual port for which this call is being executed.
2038 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2029 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
@@ -2461,7 +2452,7 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
2461} 2452}
2462 2453
2463/** 2454/**
2464 * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev 2455 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
2465 * @vport: The virtual port for which this call is being executed. 2456 * @vport: The virtual port for which this call is being executed.
2466 * @lpfc_cmd: The scsi command which needs to send. 2457 * @lpfc_cmd: The scsi command which needs to send.
2467 * @pnode: Pointer to lpfc_nodelist. 2458 * @pnode: Pointer to lpfc_nodelist.
@@ -2470,7 +2461,7 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
2470 * to transfer for device with SLI3 interface spec. 2461 * to transfer for device with SLI3 interface spec.
2471 **/ 2462 **/
2472static void 2463static void
2473lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, 2464lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2474 struct lpfc_nodelist *pnode) 2465 struct lpfc_nodelist *pnode)
2475{ 2466{
2476 struct lpfc_hba *phba = vport->phba; 2467 struct lpfc_hba *phba = vport->phba;
@@ -2558,46 +2549,7 @@ lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2558} 2549}
2559 2550
2560/** 2551/**
2561 * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev 2552 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
2562 * @vport: The virtual port for which this call is being executed.
2563 * @lpfc_cmd: The scsi command which needs to send.
2564 * @pnode: Pointer to lpfc_nodelist.
2565 *
2566 * This routine initializes fcp_cmnd and iocb data structure from scsi command
2567 * to transfer for device with SLI4 interface spec.
2568 **/
2569static void
2570lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2571 struct lpfc_nodelist *pnode)
2572{
2573 /*
2574 * The prep cmnd routines do not touch the sgl or its
2575 * entries. We may not have to do anything different.
2576 * I will leave this function in place until we can
2577 * run some IO through the driver and determine if changes
2578 * are needed.
2579 */
2580 return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode);
2581}
2582
2583/**
2584 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
2585 * @vport: The virtual port for which this call is being executed.
2586 * @lpfc_cmd: The scsi command which needs to send.
2587 * @pnode: Pointer to lpfc_nodelist.
2588 *
2589 * This routine wraps the actual convert SCSI cmnd function pointer from
2590 * the lpfc_hba struct.
2591 **/
2592static inline void
2593lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2594 struct lpfc_nodelist *pnode)
2595{
2596 vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode);
2597}
2598
2599/**
2600 * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
2601 * @vport: The virtual port for which this call is being executed. 2553 * @vport: The virtual port for which this call is being executed.
2602 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. 2554 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2603 * @lun: Logical unit number. 2555 * @lun: Logical unit number.
@@ -2611,7 +2563,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2611 * 1 - Success 2563 * 1 - Success
2612 **/ 2564 **/
2613static int 2565static int
2614lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, 2566lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2615 struct lpfc_scsi_buf *lpfc_cmd, 2567 struct lpfc_scsi_buf *lpfc_cmd,
2616 unsigned int lun, 2568 unsigned int lun,
2617 uint8_t task_mgmt_cmd) 2569 uint8_t task_mgmt_cmd)
@@ -2653,68 +2605,13 @@ lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
2653 * The driver will provide the timeout mechanism. 2605 * The driver will provide the timeout mechanism.
2654 */ 2606 */
2655 piocb->ulpTimeout = 0; 2607 piocb->ulpTimeout = 0;
2656 } else { 2608 } else
2657 piocb->ulpTimeout = lpfc_cmd->timeout; 2609 piocb->ulpTimeout = lpfc_cmd->timeout;
2658 }
2659
2660 return 1;
2661}
2662
2663/**
2664 * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
2665 * @vport: The virtual port for which this call is being executed.
2666 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2667 * @lun: Logical unit number.
2668 * @task_mgmt_cmd: SCSI task management command.
2669 *
2670 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2671 * for device with SLI-4 interface spec.
2672 *
2673 * Return codes:
2674 * 0 - Error
2675 * 1 - Success
2676 **/
2677static int
2678lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
2679 struct lpfc_scsi_buf *lpfc_cmd,
2680 unsigned int lun,
2681 uint8_t task_mgmt_cmd)
2682{
2683 /*
2684 * The prep cmnd routines do not touch the sgl or its
2685 * entries. We may not have to do anything different.
2686 * I will leave this function in place until we can
2687 * run some IO through the driver and determine if changes
2688 * are needed.
2689 */
2690 return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun,
2691 task_mgmt_cmd);
2692}
2693 2610
2694/** 2611 if (vport->phba->sli_rev == LPFC_SLI_REV4)
2695 * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info 2612 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
2696 * @vport: The virtual port for which this call is being executed.
2697 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2698 * @lun: Logical unit number.
2699 * @task_mgmt_cmd: SCSI task management command.
2700 *
2701 * This routine wraps the actual convert SCSI TM to FCP information unit
2702 * function pointer from the lpfc_hba struct.
2703 *
2704 * Return codes:
2705 * 0 - Error
2706 * 1 - Success
2707 **/
2708static inline int
2709lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2710 struct lpfc_scsi_buf *lpfc_cmd,
2711 unsigned int lun,
2712 uint8_t task_mgmt_cmd)
2713{
2714 struct lpfc_hba *phba = vport->phba;
2715 2613
2716 return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, 2614 return 1;
2717 task_mgmt_cmd);
2718} 2615}
2719 2616
2720/** 2617/**
@@ -2730,23 +2627,19 @@ int
2730lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 2627lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2731{ 2628{
2732 2629
2630 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
2631 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
2632 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2633
2733 switch (dev_grp) { 2634 switch (dev_grp) {
2734 case LPFC_PCI_DEV_LP: 2635 case LPFC_PCI_DEV_LP:
2735 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; 2636 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2736 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; 2637 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
2737 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3;
2738 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3;
2739 phba->lpfc_scsi_prep_task_mgmt_cmd =
2740 lpfc_scsi_prep_task_mgmt_cmd_s3;
2741 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; 2638 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2742 break; 2639 break;
2743 case LPFC_PCI_DEV_OC: 2640 case LPFC_PCI_DEV_OC:
2744 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; 2641 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2745 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; 2642 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2746 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4;
2747 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4;
2748 phba->lpfc_scsi_prep_task_mgmt_cmd =
2749 lpfc_scsi_prep_task_mgmt_cmd_s4;
2750 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; 2643 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2751 break; 2644 break;
2752 default: 2645 default:
@@ -2783,72 +2676,6 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
2783} 2676}
2784 2677
2785/** 2678/**
2786 * lpfc_scsi_tgt_reset - Target reset handler
2787 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
2788 * @vport: The virtual port for which this call is being executed.
2789 * @tgt_id: Target ID.
2790 * @lun: Lun number.
2791 * @rdata: Pointer to lpfc_rport_data.
2792 *
2793 * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
2794 *
2795 * Return Code:
2796 * 0x2003 - Error
2797 * 0x2002 - Success.
2798 **/
2799static int
2800lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
2801 unsigned tgt_id, unsigned int lun,
2802 struct lpfc_rport_data *rdata)
2803{
2804 struct lpfc_hba *phba = vport->phba;
2805 struct lpfc_iocbq *iocbq;
2806 struct lpfc_iocbq *iocbqrsp;
2807 int ret;
2808 int status;
2809
2810 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
2811 return FAILED;
2812
2813 lpfc_cmd->rdata = rdata;
2814 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
2815 FCP_TARGET_RESET);
2816 if (!status)
2817 return FAILED;
2818
2819 iocbq = &lpfc_cmd->cur_iocbq;
2820 iocbqrsp = lpfc_sli_get_iocbq(phba);
2821
2822 if (!iocbqrsp)
2823 return FAILED;
2824
2825 /* Issue Target Reset to TGT <num> */
2826 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2827 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
2828 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
2829 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
2830 iocbq, iocbqrsp, lpfc_cmd->timeout);
2831 if (status != IOCB_SUCCESS) {
2832 if (status == IOCB_TIMEDOUT) {
2833 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
2834 ret = TIMEOUT_ERROR;
2835 } else
2836 ret = FAILED;
2837 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2838 } else {
2839 ret = SUCCESS;
2840 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
2841 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
2842 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
2843 (lpfc_cmd->result & IOERR_DRVR_MASK))
2844 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2845 }
2846
2847 lpfc_sli_release_iocbq(phba, iocbqrsp);
2848 return ret;
2849}
2850
2851/**
2852 * lpfc_info - Info entry point of scsi_host_template data structure 2679 * lpfc_info - Info entry point of scsi_host_template data structure
2853 * @host: The scsi host for which this call is being executed. 2680 * @host: The scsi host for which this call is being executed.
2854 * 2681 *
@@ -3228,156 +3055,334 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
3228 return ret; 3055 return ret;
3229} 3056}
3230 3057
3058static char *
3059lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
3060{
3061 switch (task_mgmt_cmd) {
3062 case FCP_ABORT_TASK_SET:
3063 return "ABORT_TASK_SET";
3064 case FCP_CLEAR_TASK_SET:
3065 return "FCP_CLEAR_TASK_SET";
3066 case FCP_BUS_RESET:
3067 return "FCP_BUS_RESET";
3068 case FCP_LUN_RESET:
3069 return "FCP_LUN_RESET";
3070 case FCP_TARGET_RESET:
3071 return "FCP_TARGET_RESET";
3072 case FCP_CLEAR_ACA:
3073 return "FCP_CLEAR_ACA";
3074 case FCP_TERMINATE_TASK:
3075 return "FCP_TERMINATE_TASK";
3076 default:
3077 return "unknown";
3078 }
3079}
3080
3231/** 3081/**
3232 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point 3082 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
3233 * @cmnd: Pointer to scsi_cmnd data structure. 3083 * @vport: The virtual port for which this call is being executed.
3084 * @rdata: Pointer to remote port local data
3085 * @tgt_id: Target ID of remote device.
3086 * @lun_id: Lun number for the TMF
3087 * @task_mgmt_cmd: type of TMF to send
3234 * 3088 *
3235 * This routine does a device reset by sending a TARGET_RESET task management 3089 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
3236 * command. 3090 * a remote port.
3237 * 3091 *
3238 * Return code : 3092 * Return Code:
3239 * 0x2003 - Error 3093 * 0x2003 - Error
3240 * 0x2002 - Success 3094 * 0x2002 - Success.
3241 **/ 3095 **/
3242static int 3096static int
3243lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 3097lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3098 unsigned tgt_id, unsigned int lun_id,
3099 uint8_t task_mgmt_cmd)
3244{ 3100{
3245 struct Scsi_Host *shost = cmnd->device->host;
3246 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3247 struct lpfc_hba *phba = vport->phba; 3101 struct lpfc_hba *phba = vport->phba;
3248 struct lpfc_scsi_buf *lpfc_cmd; 3102 struct lpfc_scsi_buf *lpfc_cmd;
3249 struct lpfc_iocbq *iocbq, *iocbqrsp; 3103 struct lpfc_iocbq *iocbq;
3250 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 3104 struct lpfc_iocbq *iocbqrsp;
3251 struct lpfc_nodelist *pnode = rdata->pnode; 3105 int ret;
3252 unsigned long later;
3253 int ret = SUCCESS;
3254 int status; 3106 int status;
3255 int cnt;
3256 struct lpfc_scsi_event_header scsi_event;
3257 3107
3258 lpfc_block_error_handler(cmnd); 3108 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
3259 /*
3260 * If target is not in a MAPPED state, delay the reset until
3261 * target is rediscovered or devloss timeout expires.
3262 */
3263 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3264 while (time_after(later, jiffies)) {
3265 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3266 return FAILED;
3267 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
3268 break;
3269 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
3270 rdata = cmnd->device->hostdata;
3271 if (!rdata)
3272 break;
3273 pnode = rdata->pnode;
3274 }
3275
3276 scsi_event.event_type = FC_REG_SCSI_EVENT;
3277 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
3278 scsi_event.lun = 0;
3279 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3280 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3281
3282 fc_host_post_vendor_event(shost,
3283 fc_get_event_number(),
3284 sizeof(scsi_event),
3285 (char *)&scsi_event,
3286 LPFC_NL_VENDOR_ID);
3287
3288 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
3289 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3290 "0721 LUN Reset rport "
3291 "failure: msec x%x rdata x%p\n",
3292 jiffies_to_msecs(jiffies - later), rdata);
3293 return FAILED; 3109 return FAILED;
3294 } 3110
3295 lpfc_cmd = lpfc_get_scsi_buf(phba); 3111 lpfc_cmd = lpfc_get_scsi_buf(phba);
3296 if (lpfc_cmd == NULL) 3112 if (lpfc_cmd == NULL)
3297 return FAILED; 3113 return FAILED;
3298 lpfc_cmd->timeout = 60; 3114 lpfc_cmd->timeout = 60;
3299 lpfc_cmd->rdata = rdata; 3115 lpfc_cmd->rdata = rdata;
3300 3116
3301 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, 3117 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
3302 cmnd->device->lun, 3118 task_mgmt_cmd);
3303 FCP_TARGET_RESET);
3304 if (!status) { 3119 if (!status) {
3305 lpfc_release_scsi_buf(phba, lpfc_cmd); 3120 lpfc_release_scsi_buf(phba, lpfc_cmd);
3306 return FAILED; 3121 return FAILED;
3307 } 3122 }
3308 iocbq = &lpfc_cmd->cur_iocbq;
3309 3123
3310 /* get a buffer for this IOCB command response */ 3124 iocbq = &lpfc_cmd->cur_iocbq;
3311 iocbqrsp = lpfc_sli_get_iocbq(phba); 3125 iocbqrsp = lpfc_sli_get_iocbq(phba);
3312 if (iocbqrsp == NULL) { 3126 if (iocbqrsp == NULL) {
3313 lpfc_release_scsi_buf(phba, lpfc_cmd); 3127 lpfc_release_scsi_buf(phba, lpfc_cmd);
3314 return FAILED; 3128 return FAILED;
3315 } 3129 }
3130
3316 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 3131 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3317 "0703 Issue target reset to TGT %d LUN %d " 3132 "0702 Issue %s to TGT %d LUN %d "
3318 "rpi x%x nlp_flag x%x\n", cmnd->device->id, 3133 "rpi x%x nlp_flag x%x\n",
3319 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 3134 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
3135 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
3136
3320 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, 3137 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
3321 iocbq, iocbqrsp, lpfc_cmd->timeout); 3138 iocbq, iocbqrsp, lpfc_cmd->timeout);
3322 if (status == IOCB_TIMEDOUT) { 3139 if (status != IOCB_SUCCESS) {
3323 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; 3140 if (status == IOCB_TIMEDOUT) {
3324 ret = TIMEOUT_ERROR; 3141 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
3325 } else { 3142 ret = TIMEOUT_ERROR;
3326 if (status != IOCB_SUCCESS) 3143 } else
3327 ret = FAILED; 3144 ret = FAILED;
3328 lpfc_release_scsi_buf(phba, lpfc_cmd); 3145 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3329 } 3146 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3330 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3147 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
3331 "0713 SCSI layer issued device reset (%d, %d) " 3148 lpfc_taskmgmt_name(task_mgmt_cmd),
3332 "return x%x status x%x result x%x\n", 3149 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
3333 cmnd->device->id, cmnd->device->lun, ret,
3334 iocbqrsp->iocb.ulpStatus,
3335 iocbqrsp->iocb.un.ulpWord[4]); 3150 iocbqrsp->iocb.un.ulpWord[4]);
3151 } else
3152 ret = SUCCESS;
3153
3336 lpfc_sli_release_iocbq(phba, iocbqrsp); 3154 lpfc_sli_release_iocbq(phba, iocbqrsp);
3337 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun, 3155
3338 LPFC_CTX_TGT); 3156 if (ret != TIMEOUT_ERROR)
3157 lpfc_release_scsi_buf(phba, lpfc_cmd);
3158
3159 return ret;
3160}
3161
3162/**
3163 * lpfc_chk_tgt_mapped -
3164 * @vport: The virtual port to check on
3165 * @cmnd: Pointer to scsi_cmnd data structure.
3166 *
3167 * This routine delays until the scsi target (aka rport) for the
3168 * command exists (is present and logged in) or we declare it non-existent.
3169 *
3170 * Return code :
3171 * 0x2003 - Error
3172 * 0x2002 - Success
3173 **/
3174static int
3175lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
3176{
3177 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3178 struct lpfc_nodelist *pnode = rdata->pnode;
3179 unsigned long later;
3180
3181 /*
3182 * If target is not in a MAPPED state, delay until
3183 * target is rediscovered or devloss timeout expires.
3184 */
3185 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3186 while (time_after(later, jiffies)) {
3187 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3188 return FAILED;
3189 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
3190 return SUCCESS;
3191 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
3192 rdata = cmnd->device->hostdata;
3193 if (!rdata)
3194 return FAILED;
3195 pnode = rdata->pnode;
3196 }
3197 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
3198 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3199 return FAILED;
3200 return SUCCESS;
3201}
3202
3203/**
3204 * lpfc_reset_flush_io_context -
3205 * @vport: The virtual port (scsi_host) for the flush context
3206 * @tgt_id: If aborting by Target contect - specifies the target id
3207 * @lun_id: If aborting by Lun context - specifies the lun id
3208 * @context: specifies the context level to flush at.
3209 *
3210 * After a reset condition via TMF, we need to flush orphaned i/o
3211 * contexts from the adapter. This routine aborts any contexts
3212 * outstanding, then waits for their completions. The wait is
3213 * bounded by devloss_tmo though.
3214 *
3215 * Return code :
3216 * 0x2003 - Error
3217 * 0x2002 - Success
3218 **/
3219static int
3220lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
3221 uint64_t lun_id, lpfc_ctx_cmd context)
3222{
3223 struct lpfc_hba *phba = vport->phba;
3224 unsigned long later;
3225 int cnt;
3226
3227 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
3339 if (cnt) 3228 if (cnt)
3340 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 3229 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
3341 cmnd->device->id, cmnd->device->lun, 3230 tgt_id, lun_id, context);
3342 LPFC_CTX_TGT);
3343 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; 3231 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3344 while (time_after(later, jiffies) && cnt) { 3232 while (time_after(later, jiffies) && cnt) {
3345 schedule_timeout_uninterruptible(msecs_to_jiffies(20)); 3233 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
3346 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, 3234 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
3347 cmnd->device->lun, LPFC_CTX_TGT);
3348 } 3235 }
3349 if (cnt) { 3236 if (cnt) {
3350 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3237 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3351 "0719 device reset I/O flush failure: " 3238 "0724 I/O flush failure for context %s : cnt x%x\n",
3352 "cnt x%x\n", cnt); 3239 ((context == LPFC_CTX_LUN) ? "LUN" :
3353 ret = FAILED; 3240 ((context == LPFC_CTX_TGT) ? "TGT" :
3241 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
3242 cnt);
3243 return FAILED;
3354 } 3244 }
3355 return ret; 3245 return SUCCESS;
3246}
3247
3248/**
3249 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
3250 * @cmnd: Pointer to scsi_cmnd data structure.
3251 *
3252 * This routine does a device reset by sending a LUN_RESET task management
3253 * command.
3254 *
3255 * Return code :
3256 * 0x2003 - Error
3257 * 0x2002 - Success
3258 **/
3259static int
3260lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3261{
3262 struct Scsi_Host *shost = cmnd->device->host;
3263 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3264 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3265 struct lpfc_nodelist *pnode = rdata->pnode;
3266 unsigned tgt_id = cmnd->device->id;
3267 unsigned int lun_id = cmnd->device->lun;
3268 struct lpfc_scsi_event_header scsi_event;
3269 int status;
3270
3271 lpfc_block_error_handler(cmnd);
3272
3273 status = lpfc_chk_tgt_mapped(vport, cmnd);
3274 if (status == FAILED) {
3275 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3276 "0721 Device Reset rport failure: rdata x%p\n", rdata);
3277 return FAILED;
3278 }
3279
3280 scsi_event.event_type = FC_REG_SCSI_EVENT;
3281 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
3282 scsi_event.lun = lun_id;
3283 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3284 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3285
3286 fc_host_post_vendor_event(shost, fc_get_event_number(),
3287 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3288
3289 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3290 FCP_LUN_RESET);
3291
3292 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3293 "0713 SCSI layer issued Device Reset (%d, %d) "
3294 "return x%x\n", tgt_id, lun_id, status);
3295
3296 /*
3297 * We have to clean up i/o as : they may be orphaned by the TMF;
3298 * or if the TMF failed, they may be in an indeterminate state.
3299 * So, continue on.
3300 * We will report success if all the i/o aborts successfully.
3301 */
3302 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3303 LPFC_CTX_LUN);
3304 return status;
3305}
3306
3307/**
3308 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
3309 * @cmnd: Pointer to scsi_cmnd data structure.
3310 *
3311 * This routine does a target reset by sending a TARGET_RESET task management
3312 * command.
3313 *
3314 * Return code :
3315 * 0x2003 - Error
3316 * 0x2002 - Success
3317 **/
3318static int
3319lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3320{
3321 struct Scsi_Host *shost = cmnd->device->host;
3322 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3323 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3324 struct lpfc_nodelist *pnode = rdata->pnode;
3325 unsigned tgt_id = cmnd->device->id;
3326 unsigned int lun_id = cmnd->device->lun;
3327 struct lpfc_scsi_event_header scsi_event;
3328 int status;
3329
3330 lpfc_block_error_handler(cmnd);
3331
3332 status = lpfc_chk_tgt_mapped(vport, cmnd);
3333 if (status == FAILED) {
3334 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3335 "0722 Target Reset rport failure: rdata x%p\n", rdata);
3336 return FAILED;
3337 }
3338
3339 scsi_event.event_type = FC_REG_SCSI_EVENT;
3340 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
3341 scsi_event.lun = 0;
3342 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3343 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3344
3345 fc_host_post_vendor_event(shost, fc_get_event_number(),
3346 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3347
3348 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3349 FCP_TARGET_RESET);
3350
3351 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3352 "0723 SCSI layer issued Target Reset (%d, %d) "
3353 "return x%x\n", tgt_id, lun_id, status);
3354
3355 /*
3356 * We have to clean up i/o as : they may be orphaned by the TMF;
3357 * or if the TMF failed, they may be in an indeterminate state.
3358 * So, continue on.
3359 * We will report success if all the i/o aborts successfully.
3360 */
3361 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3362 LPFC_CTX_TGT);
3363 return status;
3356} 3364}
3357 3365
3358/** 3366/**
3359 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point 3367 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
3360 * @cmnd: Pointer to scsi_cmnd data structure. 3368 * @cmnd: Pointer to scsi_cmnd data structure.
3361 * 3369 *
3362 * This routine does target reset to all target on @cmnd->device->host. 3370 * This routine does target reset to all targets on @cmnd->device->host.
3371 * This emulates Parallel SCSI Bus Reset Semantics.
3363 * 3372 *
3364 * Return Code: 3373 * Return code :
3365 * 0x2003 - Error 3374 * 0x2003 - Error
3366 * 0x2002 - Success 3375 * 0x2002 - Success
3367 **/ 3376 **/
3368static int 3377static int
3369lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 3378lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3370{ 3379{
3371 struct Scsi_Host *shost = cmnd->device->host; 3380 struct Scsi_Host *shost = cmnd->device->host;
3372 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 3381 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3373 struct lpfc_hba *phba = vport->phba;
3374 struct lpfc_nodelist *ndlp = NULL; 3382 struct lpfc_nodelist *ndlp = NULL;
3375 int match;
3376 int ret = SUCCESS, status = SUCCESS, i;
3377 int cnt;
3378 struct lpfc_scsi_buf * lpfc_cmd;
3379 unsigned long later;
3380 struct lpfc_scsi_event_header scsi_event; 3383 struct lpfc_scsi_event_header scsi_event;
3384 int match;
3385 int ret = SUCCESS, status, i;
3381 3386
3382 scsi_event.event_type = FC_REG_SCSI_EVENT; 3387 scsi_event.event_type = FC_REG_SCSI_EVENT;
3383 scsi_event.subcategory = LPFC_EVENT_BUSRESET; 3388 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
@@ -3385,13 +3390,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3385 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name)); 3390 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
3386 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); 3391 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
3387 3392
3388 fc_host_post_vendor_event(shost, 3393 fc_host_post_vendor_event(shost, fc_get_event_number(),
3389 fc_get_event_number(), 3394 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3390 sizeof(scsi_event),
3391 (char *)&scsi_event,
3392 LPFC_NL_VENDOR_ID);
3393 3395
3394 lpfc_block_error_handler(cmnd); 3396 lpfc_block_error_handler(cmnd);
3397
3395 /* 3398 /*
3396 * Since the driver manages a single bus device, reset all 3399 * Since the driver manages a single bus device, reset all
3397 * targets known to the driver. Should any target reset 3400 * targets known to the driver. Should any target reset
@@ -3414,16 +3417,11 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3414 spin_unlock_irq(shost->host_lock); 3417 spin_unlock_irq(shost->host_lock);
3415 if (!match) 3418 if (!match)
3416 continue; 3419 continue;
3417 lpfc_cmd = lpfc_get_scsi_buf(phba); 3420
3418 if (lpfc_cmd) { 3421 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
3419 lpfc_cmd->timeout = 60; 3422 i, 0, FCP_TARGET_RESET);
3420 status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i, 3423
3421 cmnd->device->lun, 3424 if (status != SUCCESS) {
3422 ndlp->rport->dd_data);
3423 if (status != TIMEOUT_ERROR)
3424 lpfc_release_scsi_buf(phba, lpfc_cmd);
3425 }
3426 if (!lpfc_cmd || status != SUCCESS) {
3427 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3425 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3428 "0700 Bus Reset on target %d failed\n", 3426 "0700 Bus Reset on target %d failed\n",
3429 i); 3427 i);
@@ -3431,25 +3429,16 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3431 } 3429 }
3432 } 3430 }
3433 /* 3431 /*
3434 * All outstanding txcmplq I/Os should have been aborted by 3432 * We have to clean up i/o as : they may be orphaned by the TMFs
3435 * the targets. Unfortunately, some targets do not abide by 3433 * above; or if any of the TMFs failed, they may be in an
3436 * this forcing the driver to double check. 3434 * indeterminate state.
3435 * We will report success if all the i/o aborts successfully.
3437 */ 3436 */
3438 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST); 3437
3439 if (cnt) 3438 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
3440 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 3439 if (status != SUCCESS)
3441 0, 0, LPFC_CTX_HOST);
3442 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3443 while (time_after(later, jiffies) && cnt) {
3444 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
3445 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
3446 }
3447 if (cnt) {
3448 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3449 "0715 Bus Reset I/O flush failure: "
3450 "cnt x%x left x%x\n", cnt, i);
3451 ret = FAILED; 3440 ret = FAILED;
3452 } 3441
3453 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 3442 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3454 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret); 3443 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
3455 return ret; 3444 return ret;
@@ -3582,7 +3571,8 @@ struct scsi_host_template lpfc_template = {
3582 .info = lpfc_info, 3571 .info = lpfc_info,
3583 .queuecommand = lpfc_queuecommand, 3572 .queuecommand = lpfc_queuecommand,
3584 .eh_abort_handler = lpfc_abort_handler, 3573 .eh_abort_handler = lpfc_abort_handler,
3585 .eh_device_reset_handler= lpfc_device_reset_handler, 3574 .eh_device_reset_handler = lpfc_device_reset_handler,
3575 .eh_target_reset_handler = lpfc_target_reset_handler,
3586 .eh_bus_reset_handler = lpfc_bus_reset_handler, 3576 .eh_bus_reset_handler = lpfc_bus_reset_handler,
3587 .slave_alloc = lpfc_slave_alloc, 3577 .slave_alloc = lpfc_slave_alloc,
3588 .slave_configure = lpfc_slave_configure, 3578 .slave_configure = lpfc_slave_configure,
@@ -3602,7 +3592,8 @@ struct scsi_host_template lpfc_vport_template = {
3602 .info = lpfc_info, 3592 .info = lpfc_info,
3603 .queuecommand = lpfc_queuecommand, 3593 .queuecommand = lpfc_queuecommand,
3604 .eh_abort_handler = lpfc_abort_handler, 3594 .eh_abort_handler = lpfc_abort_handler,
3605 .eh_device_reset_handler= lpfc_device_reset_handler, 3595 .eh_device_reset_handler = lpfc_device_reset_handler,
3596 .eh_target_reset_handler = lpfc_target_reset_handler,
3606 .eh_bus_reset_handler = lpfc_bus_reset_handler, 3597 .eh_bus_reset_handler = lpfc_bus_reset_handler,
3607 .slave_alloc = lpfc_slave_alloc, 3598 .slave_alloc = lpfc_slave_alloc,
3608 .slave_configure = lpfc_slave_configure, 3599 .slave_configure = lpfc_slave_configure,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index ff04daf18f48..acc43b061ba1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4139,8 +4139,11 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4139 return -EIO; 4139 return -EIO;
4140 } 4140 }
4141 data_length = mqe->un.mb_words[5]; 4141 data_length = mqe->un.mb_words[5];
4142 if (data_length > DMP_FCOEPARAM_RGN_SIZE) 4142 if (data_length > DMP_FCOEPARAM_RGN_SIZE) {
4143 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4144 kfree(mp);
4143 return -EIO; 4145 return -EIO;
4146 }
4144 4147
4145 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 4148 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4146 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4149 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -4211,27 +4214,6 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4211 return -EIO; 4214 return -EIO;
4212 } 4215 }
4213 4216
4214 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4215 "(%d):0380 Mailbox cmd x%x Status x%x "
4216 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4217 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4218 "CQ: x%x x%x x%x x%x\n",
4219 mboxq->vport ? mboxq->vport->vpi : 0,
4220 bf_get(lpfc_mqe_command, mqe),
4221 bf_get(lpfc_mqe_status, mqe),
4222 mqe->un.mb_words[0], mqe->un.mb_words[1],
4223 mqe->un.mb_words[2], mqe->un.mb_words[3],
4224 mqe->un.mb_words[4], mqe->un.mb_words[5],
4225 mqe->un.mb_words[6], mqe->un.mb_words[7],
4226 mqe->un.mb_words[8], mqe->un.mb_words[9],
4227 mqe->un.mb_words[10], mqe->un.mb_words[11],
4228 mqe->un.mb_words[12], mqe->un.mb_words[13],
4229 mqe->un.mb_words[14], mqe->un.mb_words[15],
4230 mqe->un.mb_words[16], mqe->un.mb_words[50],
4231 mboxq->mcqe.word0,
4232 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4233 mboxq->mcqe.trailer);
4234
4235 /* 4217 /*
4236 * The available vpd length cannot be bigger than the 4218 * The available vpd length cannot be bigger than the
4237 * DMA buffer passed to the port. Catch the less than 4219 * DMA buffer passed to the port. Catch the less than
@@ -4337,21 +4319,18 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4337 goto out_free_vpd; 4319 goto out_free_vpd;
4338 4320
4339 mqe = &mboxq->u.mqe; 4321 mqe = &mboxq->u.mqe;
4340 if ((bf_get(lpfc_mbx_rd_rev_sli_lvl, 4322 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
4341 &mqe->un.read_rev) != LPFC_SLI_REV4) || 4323 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
4342 (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) { 4324 phba->hba_flag |= HBA_FCOE_SUPPORT;
4325 if (phba->sli_rev != LPFC_SLI_REV4 ||
4326 !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
4343 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4327 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4344 "0376 READ_REV Error. SLI Level %d " 4328 "0376 READ_REV Error. SLI Level %d "
4345 "FCoE enabled %d\n", 4329 "FCoE enabled %d\n",
4346 bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev), 4330 phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
4347 bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev));
4348 rc = -EIO; 4331 rc = -EIO;
4349 goto out_free_vpd; 4332 goto out_free_vpd;
4350 } 4333 }
4351 /* Single threaded at this point, no need for lock */
4352 spin_lock_irq(&phba->hbalock);
4353 phba->hba_flag |= HBA_FCOE_SUPPORT;
4354 spin_unlock_irq(&phba->hbalock);
4355 /* 4334 /*
4356 * Evaluate the read rev and vpd data. Populate the driver 4335 * Evaluate the read rev and vpd data. Populate the driver
4357 * state with the results. If this routine fails, the failure 4336 * state with the results. If this routine fails, the failure
@@ -4365,8 +4344,32 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4365 rc = 0; 4344 rc = 0;
4366 } 4345 }
4367 4346
4368 /* By now, we should determine the SLI revision, hard code for now */ 4347 /* Save information as VPD data */
4369 phba->sli_rev = LPFC_SLI_REV4; 4348 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
4349 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
4350 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
4351 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
4352 &mqe->un.read_rev);
4353 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
4354 &mqe->un.read_rev);
4355 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
4356 &mqe->un.read_rev);
4357 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
4358 &mqe->un.read_rev);
4359 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
4360 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
4361 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
4362 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
4363 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
4364 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
4365 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4366 "(%d):0380 READ_REV Status x%x "
4367 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
4368 mboxq->vport ? mboxq->vport->vpi : 0,
4369 bf_get(lpfc_mqe_status, mqe),
4370 phba->vpd.rev.opFwName,
4371 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
4372 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
4370 4373
4371 /* 4374 /*
4372 * Discover the port's supported feature set and match it against the 4375 * Discover the port's supported feature set and match it against the
@@ -4491,8 +4494,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4491 rc = -ENODEV; 4494 rc = -ENODEV;
4492 goto out_free_vpd; 4495 goto out_free_vpd;
4493 } 4496 }
4494 /* Temporary initialization of lpfc_fip_flag to non-fip */ 4497 if (phba->cfg_enable_fip)
4495 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); 4498 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1);
4499 else
4500 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4496 4501
4497 /* Set up all the queues to the device */ 4502 /* Set up all the queues to the device */
4498 rc = lpfc_sli4_queue_setup(phba); 4503 rc = lpfc_sli4_queue_setup(phba);
@@ -5030,6 +5035,92 @@ out_not_finished:
5030} 5035}
5031 5036
5032/** 5037/**
5038 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
5039 * @phba: Pointer to HBA context object.
5040 *
5041 * The function blocks the posting of SLI4 asynchronous mailbox commands from
5042 * the driver internal pending mailbox queue. It will then try to wait out the
5043 * possible outstanding mailbox command before return.
5044 *
5045 * Returns:
5046 * 0 - the outstanding mailbox command completed; otherwise, the wait for
5047 * the outstanding mailbox command timed out.
5048 **/
5049static int
5050lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
5051{
5052 struct lpfc_sli *psli = &phba->sli;
5053 uint8_t actcmd = MBX_HEARTBEAT;
5054 int rc = 0;
5055 unsigned long timeout;
5056
5057 /* Mark the asynchronous mailbox command posting as blocked */
5058 spin_lock_irq(&phba->hbalock);
5059 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
5060 if (phba->sli.mbox_active)
5061 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
5062 spin_unlock_irq(&phba->hbalock);
5063 /* Determine how long we might wait for the active mailbox
5064 * command to be gracefully completed by firmware.
5065 */
5066 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
5067 jiffies;
5068 /* Wait for the outstnading mailbox command to complete */
5069 while (phba->sli.mbox_active) {
5070 /* Check active mailbox complete status every 2ms */
5071 msleep(2);
5072 if (time_after(jiffies, timeout)) {
5073 /* Timeout, marked the outstanding cmd not complete */
5074 rc = 1;
5075 break;
5076 }
5077 }
5078
5079 /* Can not cleanly block async mailbox command, fails it */
5080 if (rc) {
5081 spin_lock_irq(&phba->hbalock);
5082 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5083 spin_unlock_irq(&phba->hbalock);
5084 }
5085 return rc;
5086}
5087
5088/**
5089 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
5090 * @phba: Pointer to HBA context object.
5091 *
5092 * The function unblocks and resume posting of SLI4 asynchronous mailbox
5093 * commands from the driver internal pending mailbox queue. It makes sure
5094 * that there is no outstanding mailbox command before resuming posting
5095 * asynchronous mailbox commands. If, for any reason, there is outstanding
5096 * mailbox command, it will try to wait it out before resuming asynchronous
5097 * mailbox command posting.
5098 **/
5099static void
5100lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
5101{
5102 struct lpfc_sli *psli = &phba->sli;
5103
5104 spin_lock_irq(&phba->hbalock);
5105 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5106 /* Asynchronous mailbox posting is not blocked, do nothing */
5107 spin_unlock_irq(&phba->hbalock);
5108 return;
5109 }
5110
5111 /* Outstanding synchronous mailbox command is guaranteed to be done,
5112 * successful or timeout, after timing-out the outstanding mailbox
5113 * command shall always be removed, so just unblock posting async
5114 * mailbox command and resume
5115 */
5116 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5117 spin_unlock_irq(&phba->hbalock);
5118
5119 /* wake up worker thread to post asynchronlous mailbox command */
5120 lpfc_worker_wake_up(phba);
5121}
5122
5123/**
5033 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 5124 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
5034 * @phba: Pointer to HBA context object. 5125 * @phba: Pointer to HBA context object.
5035 * @mboxq: Pointer to mailbox object. 5126 * @mboxq: Pointer to mailbox object.
@@ -5204,14 +5295,35 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5204 psli->sli_flag, flag); 5295 psli->sli_flag, flag);
5205 return rc; 5296 return rc;
5206 } else if (flag == MBX_POLL) { 5297 } else if (flag == MBX_POLL) {
5207 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5298 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
5208 "(%d):2542 Mailbox command x%x (x%x) " 5299 "(%d):2542 Try to issue mailbox command "
5209 "cannot issue Data: x%x x%x\n", 5300 "x%x (x%x) synchronously ahead of async"
5301 "mailbox command queue: x%x x%x\n",
5210 mboxq->vport ? mboxq->vport->vpi : 0, 5302 mboxq->vport ? mboxq->vport->vpi : 0,
5211 mboxq->u.mb.mbxCommand, 5303 mboxq->u.mb.mbxCommand,
5212 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5304 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5213 psli->sli_flag, flag); 5305 psli->sli_flag, flag);
5214 return -EIO; 5306 /* Try to block the asynchronous mailbox posting */
5307 rc = lpfc_sli4_async_mbox_block(phba);
5308 if (!rc) {
5309 /* Successfully blocked, now issue sync mbox cmd */
5310 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5311 if (rc != MBX_SUCCESS)
5312 lpfc_printf_log(phba, KERN_ERR,
5313 LOG_MBOX | LOG_SLI,
5314 "(%d):2597 Mailbox command "
5315 "x%x (x%x) cannot issue "
5316 "Data: x%x x%x\n",
5317 mboxq->vport ?
5318 mboxq->vport->vpi : 0,
5319 mboxq->u.mb.mbxCommand,
5320 lpfc_sli4_mbox_opcode_get(phba,
5321 mboxq),
5322 psli->sli_flag, flag);
5323 /* Unblock the async mailbox posting afterward */
5324 lpfc_sli4_async_mbox_unblock(phba);
5325 }
5326 return rc;
5215 } 5327 }
5216 5328
5217 /* Now, interrupt mode asynchrous mailbox command */ 5329 /* Now, interrupt mode asynchrous mailbox command */
@@ -5749,18 +5861,13 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5749 5861
5750 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); 5862 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
5751 /* The fcp commands will set command type */ 5863 /* The fcp commands will set command type */
5752 if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip)) 5864 if (iocbq->iocb_flag & LPFC_IO_FCP)
5753 command_type = ELS_COMMAND_NON_FIP;
5754 else if (!(iocbq->iocb_flag & LPFC_IO_FCP))
5755 command_type = ELS_COMMAND_FIP;
5756 else if (iocbq->iocb_flag & LPFC_IO_FCP)
5757 command_type = FCP_COMMAND; 5865 command_type = FCP_COMMAND;
5758 else { 5866 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS))
5759 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5867 command_type = ELS_COMMAND_FIP;
5760 "2019 Invalid cmd 0x%x\n", 5868 else
5761 iocbq->iocb.ulpCommand); 5869 command_type = ELS_COMMAND_NON_FIP;
5762 return IOCB_ERROR; 5870
5763 }
5764 /* Some of the fields are in the right position already */ 5871 /* Some of the fields are in the right position already */
5765 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 5872 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
5766 abort_tag = (uint32_t) iocbq->iotag; 5873 abort_tag = (uint32_t) iocbq->iotag;
@@ -5814,11 +5921,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5814 bf_set(lpfc_wqe_gen_context, &wqe->generic, 5921 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5815 iocbq->iocb.ulpContext); 5922 iocbq->iocb.ulpContext);
5816 5923
5817 if (iocbq->vport->fc_myDID != 0) {
5818 bf_set(els_req64_sid, &wqe->els_req,
5819 iocbq->vport->fc_myDID);
5820 bf_set(els_req64_sp, &wqe->els_req, 1);
5821 }
5822 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); 5924 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5823 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); 5925 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5824 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 5926 /* CCP CCPE PV PRI in word10 were set in the memcpy */
@@ -5877,14 +5979,19 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5877 * is set and we are sending our 2nd or greater command on 5979 * is set and we are sending our 2nd or greater command on
5878 * this exchange. 5980 * this exchange.
5879 */ 5981 */
5982 /* Always open the exchange */
5983 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5880 5984
5881 /* ALLOW read & write to fall through to ICMD64 */ 5985 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5986 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5987 break;
5882 case CMD_FCP_ICMND64_CR: 5988 case CMD_FCP_ICMND64_CR:
5883 /* Always open the exchange */ 5989 /* Always open the exchange */
5884 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 5990 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5885 5991
5992 wqe->words[4] = 0;
5886 wqe->words[10] &= 0xffff0000; /* zero out ebde count */ 5993 wqe->words[10] &= 0xffff0000; /* zero out ebde count */
5887 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); 5994 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5888 break; 5995 break;
5889 case CMD_GEN_REQUEST64_CR: 5996 case CMD_GEN_REQUEST64_CR:
5890 /* word3 command length is described as byte offset to the 5997 /* word3 command length is described as byte offset to the
@@ -7247,6 +7354,32 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7247} 7354}
7248 7355
7249/** 7356/**
7357 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
7358 * @phba: Pointer to HBA context object..
7359 * @piocbq: Pointer to command iocb.
7360 * @flag: Flag to test.
7361 *
7362 * This routine grabs the hbalock and then test the iocb_flag to
7363 * see if the passed in flag is set.
7364 * Returns:
7365 * 1 if flag is set.
7366 * 0 if flag is not set.
7367 **/
7368static int
7369lpfc_chk_iocb_flg(struct lpfc_hba *phba,
7370 struct lpfc_iocbq *piocbq, uint32_t flag)
7371{
7372 unsigned long iflags;
7373 int ret;
7374
7375 spin_lock_irqsave(&phba->hbalock, iflags);
7376 ret = piocbq->iocb_flag & flag;
7377 spin_unlock_irqrestore(&phba->hbalock, iflags);
7378 return ret;
7379
7380}
7381
7382/**
7250 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 7383 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
7251 * @phba: Pointer to HBA context object.. 7384 * @phba: Pointer to HBA context object..
7252 * @pring: Pointer to sli ring. 7385 * @pring: Pointer to sli ring.
@@ -7313,7 +7446,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
7313 if (retval == IOCB_SUCCESS) { 7446 if (retval == IOCB_SUCCESS) {
7314 timeout_req = timeout * HZ; 7447 timeout_req = timeout * HZ;
7315 timeleft = wait_event_timeout(done_q, 7448 timeleft = wait_event_timeout(done_q,
7316 piocb->iocb_flag & LPFC_IO_WAKE, 7449 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
7317 timeout_req); 7450 timeout_req);
7318 7451
7319 if (piocb->iocb_flag & LPFC_IO_WAKE) { 7452 if (piocb->iocb_flag & LPFC_IO_WAKE) {
@@ -7498,20 +7631,16 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba)
7498 if ((HS_FFER1 & phba->work_hs) && 7631 if ((HS_FFER1 & phba->work_hs) &&
7499 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 7632 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7500 HS_FFER6 | HS_FFER7) & phba->work_hs)) { 7633 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
7501 spin_lock_irq(&phba->hbalock);
7502 phba->hba_flag |= DEFER_ERATT; 7634 phba->hba_flag |= DEFER_ERATT;
7503 spin_unlock_irq(&phba->hbalock);
7504 /* Clear all interrupt enable conditions */ 7635 /* Clear all interrupt enable conditions */
7505 writel(0, phba->HCregaddr); 7636 writel(0, phba->HCregaddr);
7506 readl(phba->HCregaddr); 7637 readl(phba->HCregaddr);
7507 } 7638 }
7508 7639
7509 /* Set the driver HA work bitmap */ 7640 /* Set the driver HA work bitmap */
7510 spin_lock_irq(&phba->hbalock);
7511 phba->work_ha |= HA_ERATT; 7641 phba->work_ha |= HA_ERATT;
7512 /* Indicate polling handles this ERATT */ 7642 /* Indicate polling handles this ERATT */
7513 phba->hba_flag |= HBA_ERATT_HANDLED; 7643 phba->hba_flag |= HBA_ERATT_HANDLED;
7514 spin_unlock_irq(&phba->hbalock);
7515 return 1; 7644 return 1;
7516 } 7645 }
7517 return 0; 7646 return 0;
@@ -7557,12 +7686,10 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7557 return 0; 7686 return 0;
7558 phba->work_status[0] = uerr_sta_lo; 7687 phba->work_status[0] = uerr_sta_lo;
7559 phba->work_status[1] = uerr_sta_hi; 7688 phba->work_status[1] = uerr_sta_hi;
7560 spin_lock_irq(&phba->hbalock);
7561 /* Set the driver HA work bitmap */ 7689 /* Set the driver HA work bitmap */
7562 phba->work_ha |= HA_ERATT; 7690 phba->work_ha |= HA_ERATT;
7563 /* Indicate polling handles this ERATT */ 7691 /* Indicate polling handles this ERATT */
7564 phba->hba_flag |= HBA_ERATT_HANDLED; 7692 phba->hba_flag |= HBA_ERATT_HANDLED;
7565 spin_unlock_irq(&phba->hbalock);
7566 return 1; 7693 return 1;
7567 } 7694 }
7568 } 7695 }
@@ -9245,6 +9372,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9245 kfree(dmabuf); 9372 kfree(dmabuf);
9246 goto out_fail; 9373 goto out_fail;
9247 } 9374 }
9375 memset(dmabuf->virt, 0, PAGE_SIZE);
9248 dmabuf->buffer_tag = x; 9376 dmabuf->buffer_tag = x;
9249 list_add_tail(&dmabuf->list, &queue->page_list); 9377 list_add_tail(&dmabuf->list, &queue->page_list);
9250 /* initialize queue's entry array */ 9378 /* initialize queue's entry array */
@@ -9667,7 +9795,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9667 /* link the wq onto the parent cq child list */ 9795 /* link the wq onto the parent cq child list */
9668 list_add_tail(&wq->list, &cq->child_list); 9796 list_add_tail(&wq->list, &cq->child_list);
9669out: 9797out:
9670 if (rc == MBX_TIMEOUT) 9798 if (rc != MBX_TIMEOUT)
9671 mempool_free(mbox, phba->mbox_mem_pool); 9799 mempool_free(mbox, phba->mbox_mem_pool);
9672 return status; 9800 return status;
9673} 9801}
@@ -11020,10 +11148,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
11020 rpi_page->start_rpi); 11148 rpi_page->start_rpi);
11021 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 11149 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
11022 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 11150 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
11023 if (!phba->sli4_hba.intr_enable) 11151 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11024 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11025 else
11026 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11027 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 11152 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
11028 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11153 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11029 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11154 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
@@ -11363,6 +11488,7 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11363 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 11488 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
11364 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 11489 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
11365 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 11490 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
11491 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
11366 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 11492 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
11367 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 11493 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
11368 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 11494 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 7d37eb7459bf..3c53316cf6d0 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -56,6 +56,7 @@ struct lpfc_iocbq {
56#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ 56#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
57#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ 57#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
58#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ 58#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
59#define LPFC_FIP_ELS 0x40
59 60
60 uint8_t abort_count; 61 uint8_t abort_count;
61 uint8_t rsvd2; 62 uint8_t rsvd2;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 5196b46608d7..3b276b47d18f 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -229,7 +229,7 @@ struct lpfc_bmbx {
229 229
230#define LPFC_EQE_DEF_COUNT 1024 230#define LPFC_EQE_DEF_COUNT 1024
231#define LPFC_CQE_DEF_COUNT 256 231#define LPFC_CQE_DEF_COUNT 256
232#define LPFC_WQE_DEF_COUNT 64 232#define LPFC_WQE_DEF_COUNT 256
233#define LPFC_MQE_DEF_COUNT 16 233#define LPFC_MQE_DEF_COUNT 16
234#define LPFC_RQE_DEF_COUNT 512 234#define LPFC_RQE_DEF_COUNT 512
235 235
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 6b8a148f0a55..41094e02304b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.2" 21#define LPFC_DRIVER_VERSION "8.3.3"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 24#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index a6313ee84ac5..e0b49922193e 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -695,8 +695,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
695 } 695 }
696 vport->unreg_vpi_cmpl = VPORT_INVAL; 696 vport->unreg_vpi_cmpl = VPORT_INVAL;
697 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 697 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
698 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
699 goto skip_logo;
700 if (!lpfc_issue_els_npiv_logo(vport, ndlp)) 698 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
701 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) 699 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
702 timeout = schedule_timeout(timeout); 700 timeout = schedule_timeout(timeout);
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 3b7240e40819..e3c482aa87b5 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -5444,7 +5444,7 @@ static void ncr_getsync(struct ncb *np, u_char sfac, u_char *fakp, u_char *scntl
5444 ** input speed faster than the period. 5444 ** input speed faster than the period.
5445 */ 5445 */
5446 kpc = per * clk; 5446 kpc = per * clk;
5447 while (--div >= 0) 5447 while (--div > 0)
5448 if (kpc >= (div_10M[div] << 2)) break; 5448 if (kpc >= (div_10M[div] << 2)) break;
5449 5449
5450 /* 5450 /*
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 11a61ea8d5d9..70b60ade049e 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -530,7 +530,7 @@ static int nsp_negate_signal(struct scsi_cmnd *SCpnt, unsigned char mask,
530 if (reg == 0xff) { 530 if (reg == 0xff) {
531 break; 531 break;
532 } 532 }
533 } while ((time_out-- != 0) && (reg & mask) != 0); 533 } while ((--time_out != 0) && (reg & mask) != 0);
534 534
535 if (time_out == 0) { 535 if (time_out == 0) {
536 nsp_msg(KERN_DEBUG, " %s signal off timeut", str); 536 nsp_msg(KERN_DEBUG, " %s signal off timeut", str);
@@ -801,7 +801,7 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
801 801
802 data->FifoCount = ocount; 802 data->FifoCount = ocount;
803 803
804 if (time_out == 0) { 804 if (time_out < 0) {
805 nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d", 805 nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d",
806 scsi_get_resid(SCpnt), SCpnt->SCp.this_residual, 806 scsi_get_resid(SCpnt), SCpnt->SCp.this_residual,
807 SCpnt->SCp.buffers_residual); 807 SCpnt->SCp.buffers_residual);
@@ -897,7 +897,7 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
897 897
898 data->FifoCount = ocount; 898 data->FifoCount = ocount;
899 899
900 if (time_out == 0) { 900 if (time_out < 0) {
901 nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x", 901 nsp_msg(KERN_DEBUG, "pio write timeout resid=0x%x",
902 scsi_get_resid(SCpnt)); 902 scsi_get_resid(SCpnt));
903 } 903 }
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index c8d0a176fea4..245e7afb4c4d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -37,6 +37,7 @@ qla2100_intr_handler(int irq, void *dev_id)
37 uint16_t hccr; 37 uint16_t hccr;
38 uint16_t mb[4]; 38 uint16_t mb[4];
39 struct rsp_que *rsp; 39 struct rsp_que *rsp;
40 unsigned long flags;
40 41
41 rsp = (struct rsp_que *) dev_id; 42 rsp = (struct rsp_que *) dev_id;
42 if (!rsp) { 43 if (!rsp) {
@@ -49,7 +50,7 @@ qla2100_intr_handler(int irq, void *dev_id)
49 reg = &ha->iobase->isp; 50 reg = &ha->iobase->isp;
50 status = 0; 51 status = 0;
51 52
52 spin_lock(&ha->hardware_lock); 53 spin_lock_irqsave(&ha->hardware_lock, flags);
53 vha = pci_get_drvdata(ha->pdev); 54 vha = pci_get_drvdata(ha->pdev);
54 for (iter = 50; iter--; ) { 55 for (iter = 50; iter--; ) {
55 hccr = RD_REG_WORD(&reg->hccr); 56 hccr = RD_REG_WORD(&reg->hccr);
@@ -101,7 +102,7 @@ qla2100_intr_handler(int irq, void *dev_id)
101 RD_REG_WORD(&reg->hccr); 102 RD_REG_WORD(&reg->hccr);
102 } 103 }
103 } 104 }
104 spin_unlock(&ha->hardware_lock); 105 spin_unlock_irqrestore(&ha->hardware_lock, flags);
105 106
106 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 107 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
107 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 108 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -133,6 +134,7 @@ qla2300_intr_handler(int irq, void *dev_id)
133 uint16_t mb[4]; 134 uint16_t mb[4];
134 struct rsp_que *rsp; 135 struct rsp_que *rsp;
135 struct qla_hw_data *ha; 136 struct qla_hw_data *ha;
137 unsigned long flags;
136 138
137 rsp = (struct rsp_que *) dev_id; 139 rsp = (struct rsp_que *) dev_id;
138 if (!rsp) { 140 if (!rsp) {
@@ -145,7 +147,7 @@ qla2300_intr_handler(int irq, void *dev_id)
145 reg = &ha->iobase->isp; 147 reg = &ha->iobase->isp;
146 status = 0; 148 status = 0;
147 149
148 spin_lock(&ha->hardware_lock); 150 spin_lock_irqsave(&ha->hardware_lock, flags);
149 vha = pci_get_drvdata(ha->pdev); 151 vha = pci_get_drvdata(ha->pdev);
150 for (iter = 50; iter--; ) { 152 for (iter = 50; iter--; ) {
151 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 153 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
@@ -216,7 +218,7 @@ qla2300_intr_handler(int irq, void *dev_id)
216 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 218 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
217 RD_REG_WORD_RELAXED(&reg->hccr); 219 RD_REG_WORD_RELAXED(&reg->hccr);
218 } 220 }
219 spin_unlock(&ha->hardware_lock); 221 spin_unlock_irqrestore(&ha->hardware_lock, flags);
220 222
221 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 223 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
222 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 224 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -1626,6 +1628,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1626 uint32_t hccr; 1628 uint32_t hccr;
1627 uint16_t mb[4]; 1629 uint16_t mb[4];
1628 struct rsp_que *rsp; 1630 struct rsp_que *rsp;
1631 unsigned long flags;
1629 1632
1630 rsp = (struct rsp_que *) dev_id; 1633 rsp = (struct rsp_que *) dev_id;
1631 if (!rsp) { 1634 if (!rsp) {
@@ -1638,7 +1641,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1638 reg = &ha->iobase->isp24; 1641 reg = &ha->iobase->isp24;
1639 status = 0; 1642 status = 0;
1640 1643
1641 spin_lock(&ha->hardware_lock); 1644 spin_lock_irqsave(&ha->hardware_lock, flags);
1642 vha = pci_get_drvdata(ha->pdev); 1645 vha = pci_get_drvdata(ha->pdev);
1643 for (iter = 50; iter--; ) { 1646 for (iter = 50; iter--; ) {
1644 stat = RD_REG_DWORD(&reg->host_status); 1647 stat = RD_REG_DWORD(&reg->host_status);
@@ -1688,7 +1691,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1688 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1691 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1689 RD_REG_DWORD_RELAXED(&reg->hccr); 1692 RD_REG_DWORD_RELAXED(&reg->hccr);
1690 } 1693 }
1691 spin_unlock(&ha->hardware_lock); 1694 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1692 1695
1693 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 1696 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1694 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 1697 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 6260505dceb5..010e69b29afe 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -945,7 +945,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
945 945
946 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx " 946 DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx "
947 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, 947 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
948 vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id), 948 (unsigned long long)vid.port_name,
949 (unsigned long long)vid.node_name,
950 le16_to_cpu(entry->vf_id),
949 entry->q_qos, entry->f_qos)); 951 entry->q_qos, entry->f_qos));
950 952
951 if (i < QLA_PRECONFIG_VPORTS) { 953 if (i < QLA_PRECONFIG_VPORTS) {
@@ -954,7 +956,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
954 qla_printk(KERN_INFO, ha, 956 qla_printk(KERN_INFO, ha,
955 "NPIV-Config: Failed to create vport [%02x]: " 957 "NPIV-Config: Failed to create vport [%02x]: "
956 "wwpn=%llx wwnn=%llx.\n", cnt, 958 "wwpn=%llx wwnn=%llx.\n", cnt,
957 vid.port_name, vid.node_name); 959 (unsigned long long)vid.port_name,
960 (unsigned long long)vid.node_name);
958 } 961 }
959 } 962 }
960done: 963done:
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index b13481369642..8821df9a277b 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -225,6 +225,7 @@ static struct {
225 {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 225 {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
226 {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 226 {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
227 {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 227 {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
228 {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
228 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, 229 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
229 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, 230 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
230 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ 231 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index a152f89ae51c..3f64d93b6c8b 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -35,6 +35,7 @@
35#include <linux/netlink.h> 35#include <linux/netlink.h>
36#include <net/netlink.h> 36#include <net/netlink.h>
37#include <scsi/scsi_netlink_fc.h> 37#include <scsi/scsi_netlink_fc.h>
38#include <scsi/scsi_bsg_fc.h>
38#include "scsi_priv.h" 39#include "scsi_priv.h"
39#include "scsi_transport_fc_internal.h" 40#include "scsi_transport_fc_internal.h"
40 41
@@ -43,6 +44,10 @@ static void fc_vport_sched_delete(struct work_struct *work);
43static int fc_vport_setup(struct Scsi_Host *shost, int channel, 44static int fc_vport_setup(struct Scsi_Host *shost, int channel,
44 struct device *pdev, struct fc_vport_identifiers *ids, 45 struct device *pdev, struct fc_vport_identifiers *ids,
45 struct fc_vport **vport); 46 struct fc_vport **vport);
47static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
48static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
49static void fc_bsg_remove(struct request_queue *);
50static void fc_bsg_goose_queue(struct fc_rport *);
46 51
47/* 52/*
48 * Redefine so that we can have same named attributes in the 53 * Redefine so that we can have same named attributes in the
@@ -411,13 +416,26 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
411 return -ENOMEM; 416 return -ENOMEM;
412 } 417 }
413 418
419 fc_bsg_hostadd(shost, fc_host);
420 /* ignore any bsg add error - we just can't do sgio */
421
422 return 0;
423}
424
425static int fc_host_remove(struct transport_container *tc, struct device *dev,
426 struct device *cdev)
427{
428 struct Scsi_Host *shost = dev_to_shost(dev);
429 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
430
431 fc_bsg_remove(fc_host->rqst_q);
414 return 0; 432 return 0;
415} 433}
416 434
417static DECLARE_TRANSPORT_CLASS(fc_host_class, 435static DECLARE_TRANSPORT_CLASS(fc_host_class,
418 "fc_host", 436 "fc_host",
419 fc_host_setup, 437 fc_host_setup,
420 NULL, 438 fc_host_remove,
421 NULL); 439 NULL);
422 440
423/* 441/*
@@ -2375,6 +2393,7 @@ fc_rport_final_delete(struct work_struct *work)
2375 scsi_flush_work(shost); 2393 scsi_flush_work(shost);
2376 2394
2377 fc_terminate_rport_io(rport); 2395 fc_terminate_rport_io(rport);
2396
2378 /* 2397 /*
2379 * Cancel any outstanding timers. These should really exist 2398 * Cancel any outstanding timers. These should really exist
2380 * only when rmmod'ing the LLDD and we're asking for 2399 * only when rmmod'ing the LLDD and we're asking for
@@ -2407,6 +2426,8 @@ fc_rport_final_delete(struct work_struct *work)
2407 (i->f->dev_loss_tmo_callbk)) 2426 (i->f->dev_loss_tmo_callbk))
2408 i->f->dev_loss_tmo_callbk(rport); 2427 i->f->dev_loss_tmo_callbk(rport);
2409 2428
2429 fc_bsg_remove(rport->rqst_q);
2430
2410 transport_remove_device(dev); 2431 transport_remove_device(dev);
2411 device_del(dev); 2432 device_del(dev);
2412 transport_destroy_device(dev); 2433 transport_destroy_device(dev);
@@ -2494,6 +2515,9 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
2494 transport_add_device(dev); 2515 transport_add_device(dev);
2495 transport_configure_device(dev); 2516 transport_configure_device(dev);
2496 2517
2518 fc_bsg_rportadd(shost, rport);
2519 /* ignore any bsg add error - we just can't do sgio */
2520
2497 if (rport->roles & FC_PORT_ROLE_FCP_TARGET) { 2521 if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
2498 /* initiate a scan of the target */ 2522 /* initiate a scan of the target */
2499 rport->flags |= FC_RPORT_SCAN_PENDING; 2523 rport->flags |= FC_RPORT_SCAN_PENDING;
@@ -2658,6 +2682,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2658 spin_unlock_irqrestore(shost->host_lock, 2682 spin_unlock_irqrestore(shost->host_lock,
2659 flags); 2683 flags);
2660 2684
2685 fc_bsg_goose_queue(rport);
2686
2661 return rport; 2687 return rport;
2662 } 2688 }
2663 } 2689 }
@@ -3343,6 +3369,592 @@ fc_vport_sched_delete(struct work_struct *work)
3343} 3369}
3344 3370
3345 3371
3372/*
3373 * BSG support
3374 */
3375
3376
3377/**
3378 * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job
3379 * @job: fc_bsg_job that is to be torn down
3380 */
3381static void
3382fc_destroy_bsgjob(struct fc_bsg_job *job)
3383{
3384 unsigned long flags;
3385
3386 spin_lock_irqsave(&job->job_lock, flags);
3387 if (job->ref_cnt) {
3388 spin_unlock_irqrestore(&job->job_lock, flags);
3389 return;
3390 }
3391 spin_unlock_irqrestore(&job->job_lock, flags);
3392
3393 put_device(job->dev); /* release reference for the request */
3394
3395 kfree(job->request_payload.sg_list);
3396 kfree(job->reply_payload.sg_list);
3397 kfree(job);
3398}
3399
3400
3401/**
3402 * fc_bsg_jobdone - completion routine for bsg requests that the LLD has
3403 * completed
3404 * @job: fc_bsg_job that is complete
3405 */
3406static void
3407fc_bsg_jobdone(struct fc_bsg_job *job)
3408{
3409 struct request *req = job->req;
3410 struct request *rsp = req->next_rq;
3411 unsigned long flags;
3412 int err;
3413
3414 spin_lock_irqsave(&job->job_lock, flags);
3415 job->state_flags |= FC_RQST_STATE_DONE;
3416 job->ref_cnt--;
3417 spin_unlock_irqrestore(&job->job_lock, flags);
3418
3419 err = job->req->errors = job->reply->result;
3420 if (err < 0)
3421 /* we're only returning the result field in the reply */
3422 job->req->sense_len = sizeof(uint32_t);
3423 else
3424 job->req->sense_len = job->reply_len;
3425
3426 /* we assume all request payload was transferred, residual == 0 */
3427 req->resid_len = 0;
3428
3429 if (rsp) {
3430 WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
3431
3432 /* set reply (bidi) residual */
3433 rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
3434 rsp->resid_len);
3435 }
3436
3437 blk_end_request_all(req, err);
3438
3439 fc_destroy_bsgjob(job);
3440}
3441
3442
3443/**
3444 * fc_bsg_job_timeout - handler for when a bsg request timesout
3445 * @req: request that timed out
3446 */
3447static enum blk_eh_timer_return
3448fc_bsg_job_timeout(struct request *req)
3449{
3450 struct fc_bsg_job *job = (void *) req->special;
3451 struct Scsi_Host *shost = job->shost;
3452 struct fc_internal *i = to_fc_internal(shost->transportt);
3453 unsigned long flags;
3454 int err = 0, done = 0;
3455
3456 if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED)
3457 return BLK_EH_RESET_TIMER;
3458
3459 spin_lock_irqsave(&job->job_lock, flags);
3460 if (job->state_flags & FC_RQST_STATE_DONE)
3461 done = 1;
3462 else
3463 job->ref_cnt++;
3464 spin_unlock_irqrestore(&job->job_lock, flags);
3465
3466 if (!done && i->f->bsg_timeout) {
3467 /* call LLDD to abort the i/o as it has timed out */
3468 err = i->f->bsg_timeout(job);
3469 if (err)
3470 printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
3471 "abort failed with status %d\n", err);
3472 }
3473
3474 if (!done) {
3475 spin_lock_irqsave(&job->job_lock, flags);
3476 job->ref_cnt--;
3477 spin_unlock_irqrestore(&job->job_lock, flags);
3478 fc_destroy_bsgjob(job);
3479 }
3480
3481 /* the blk_end_sync_io() doesn't check the error */
3482 return BLK_EH_HANDLED;
3483}
3484
3485
3486
3487static int
3488fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
3489{
3490 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
3491
3492 BUG_ON(!req->nr_phys_segments);
3493
3494 buf->sg_list = kzalloc(sz, GFP_KERNEL);
3495 if (!buf->sg_list)
3496 return -ENOMEM;
3497 sg_init_table(buf->sg_list, req->nr_phys_segments);
3498 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
3499 buf->payload_len = blk_rq_bytes(req);
3500 return 0;
3501}
3502
3503
3504/**
3505 * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the
3506 * bsg request
3507 * @shost: SCSI Host corresponding to the bsg object
3508 * @rport: (optional) FC Remote Port corresponding to the bsg object
3509 * @req: BSG request that needs a job structure
3510 */
3511static int
3512fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
3513 struct request *req)
3514{
3515 struct fc_internal *i = to_fc_internal(shost->transportt);
3516 struct request *rsp = req->next_rq;
3517 struct fc_bsg_job *job;
3518 int ret;
3519
3520 BUG_ON(req->special);
3521
3522 job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
3523 GFP_KERNEL);
3524 if (!job)
3525 return -ENOMEM;
3526
3527 /*
3528 * Note: this is a bit silly.
3529 * The request gets formatted as a SGIO v4 ioctl request, which
3530 * then gets reformatted as a blk request, which then gets
3531 * reformatted as a fc bsg request. And on completion, we have
3532 * to wrap return results such that SGIO v4 thinks it was a scsi
3533 * status. I hope this was all worth it.
3534 */
3535
3536 req->special = job;
3537 job->shost = shost;
3538 job->rport = rport;
3539 job->req = req;
3540 if (i->f->dd_bsg_size)
3541 job->dd_data = (void *)&job[1];
3542 spin_lock_init(&job->job_lock);
3543 job->request = (struct fc_bsg_request *)req->cmd;
3544 job->request_len = req->cmd_len;
3545 job->reply = req->sense;
3546 job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
3547 * allocated */
3548 if (req->bio) {
3549 ret = fc_bsg_map_buffer(&job->request_payload, req);
3550 if (ret)
3551 goto failjob_rls_job;
3552 }
3553 if (rsp && rsp->bio) {
3554 ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
3555 if (ret)
3556 goto failjob_rls_rqst_payload;
3557 }
3558 job->job_done = fc_bsg_jobdone;
3559 if (rport)
3560 job->dev = &rport->dev;
3561 else
3562 job->dev = &shost->shost_gendev;
3563 get_device(job->dev); /* take a reference for the request */
3564
3565 job->ref_cnt = 1;
3566
3567 return 0;
3568
3569
3570failjob_rls_rqst_payload:
3571 kfree(job->request_payload.sg_list);
3572failjob_rls_job:
3573 kfree(job);
3574 return -ENOMEM;
3575}
3576
3577
3578enum fc_dispatch_result {
3579 FC_DISPATCH_BREAK, /* on return, q is locked, break from q loop */
3580 FC_DISPATCH_LOCKED, /* on return, q is locked, continue on */
3581 FC_DISPATCH_UNLOCKED, /* on return, q is unlocked, continue on */
3582};
3583
3584
3585/**
3586 * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
3587 * @shost: scsi host rport attached to
3588 * @job: bsg job to be processed
3589 */
3590static enum fc_dispatch_result
3591fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3592 struct fc_bsg_job *job)
3593{
3594 struct fc_internal *i = to_fc_internal(shost->transportt);
3595 int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
3596 int ret;
3597
3598 /* Validate the host command */
3599 switch (job->request->msgcode) {
3600 case FC_BSG_HST_ADD_RPORT:
3601 cmdlen += sizeof(struct fc_bsg_host_add_rport);
3602 break;
3603
3604 case FC_BSG_HST_DEL_RPORT:
3605 cmdlen += sizeof(struct fc_bsg_host_del_rport);
3606 break;
3607
3608 case FC_BSG_HST_ELS_NOLOGIN:
3609 cmdlen += sizeof(struct fc_bsg_host_els);
3610 /* there better be a xmt and rcv payloads */
3611 if ((!job->request_payload.payload_len) ||
3612 (!job->reply_payload.payload_len)) {
3613 ret = -EINVAL;
3614 goto fail_host_msg;
3615 }
3616 break;
3617
3618 case FC_BSG_HST_CT:
3619 cmdlen += sizeof(struct fc_bsg_host_ct);
3620 /* there better be xmt and rcv payloads */
3621 if ((!job->request_payload.payload_len) ||
3622 (!job->reply_payload.payload_len)) {
3623 ret = -EINVAL;
3624 goto fail_host_msg;
3625 }
3626 break;
3627
3628 case FC_BSG_HST_VENDOR:
3629 cmdlen += sizeof(struct fc_bsg_host_vendor);
3630 if ((shost->hostt->vendor_id == 0L) ||
3631 (job->request->rqst_data.h_vendor.vendor_id !=
3632 shost->hostt->vendor_id)) {
3633 ret = -ESRCH;
3634 goto fail_host_msg;
3635 }
3636 break;
3637
3638 default:
3639 ret = -EBADR;
3640 goto fail_host_msg;
3641 }
3642
3643 /* check if we really have all the request data needed */
3644 if (job->request_len < cmdlen) {
3645 ret = -ENOMSG;
3646 goto fail_host_msg;
3647 }
3648
3649 ret = i->f->bsg_request(job);
3650 if (!ret)
3651 return FC_DISPATCH_UNLOCKED;
3652
3653fail_host_msg:
3654 /* return the errno failure code as the only status */
3655 BUG_ON(job->reply_len < sizeof(uint32_t));
3656 job->reply->result = ret;
3657 job->reply_len = sizeof(uint32_t);
3658 fc_bsg_jobdone(job);
3659 return FC_DISPATCH_UNLOCKED;
3660}
3661
3662
3663/*
3664 * fc_bsg_goose_queue - restart rport queue in case it was stopped
3665 * @rport: rport to be restarted
3666 */
3667static void
3668fc_bsg_goose_queue(struct fc_rport *rport)
3669{
3670 int flagset;
3671
3672 if (!rport->rqst_q)
3673 return;
3674
3675 get_device(&rport->dev);
3676
3677 spin_lock(rport->rqst_q->queue_lock);
3678 flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
3679 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
3680 if (flagset)
3681 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
3682 __blk_run_queue(rport->rqst_q);
3683 if (flagset)
3684 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
3685 spin_unlock(rport->rqst_q->queue_lock);
3686
3687 put_device(&rport->dev);
3688}
3689
3690
3691/**
3692 * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
3693 * @shost: scsi host rport attached to
3694 * @rport: rport request destined to
3695 * @job: bsg job to be processed
3696 */
3697static enum fc_dispatch_result
3698fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3699 struct fc_rport *rport, struct fc_bsg_job *job)
3700{
3701 struct fc_internal *i = to_fc_internal(shost->transportt);
3702 int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
3703 int ret;
3704
3705 /* Validate the rport command */
3706 switch (job->request->msgcode) {
3707 case FC_BSG_RPT_ELS:
3708 cmdlen += sizeof(struct fc_bsg_rport_els);
3709 goto check_bidi;
3710
3711 case FC_BSG_RPT_CT:
3712 cmdlen += sizeof(struct fc_bsg_rport_ct);
3713check_bidi:
3714 /* there better be xmt and rcv payloads */
3715 if ((!job->request_payload.payload_len) ||
3716 (!job->reply_payload.payload_len)) {
3717 ret = -EINVAL;
3718 goto fail_rport_msg;
3719 }
3720 break;
3721 default:
3722 ret = -EBADR;
3723 goto fail_rport_msg;
3724 }
3725
3726 /* check if we really have all the request data needed */
3727 if (job->request_len < cmdlen) {
3728 ret = -ENOMSG;
3729 goto fail_rport_msg;
3730 }
3731
3732 ret = i->f->bsg_request(job);
3733 if (!ret)
3734 return FC_DISPATCH_UNLOCKED;
3735
3736fail_rport_msg:
3737 /* return the errno failure code as the only status */
3738 BUG_ON(job->reply_len < sizeof(uint32_t));
3739 job->reply->result = ret;
3740 job->reply_len = sizeof(uint32_t);
3741 fc_bsg_jobdone(job);
3742 return FC_DISPATCH_UNLOCKED;
3743}
3744
3745
3746/**
3747 * fc_bsg_request_handler - generic handler for bsg requests
3748 * @q: request queue to manage
3749 * @shost: Scsi_Host related to the bsg object
3750 * @rport: FC remote port related to the bsg object (optional)
3751 * @dev: device structure for bsg object
3752 */
3753static void
3754fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
3755 struct fc_rport *rport, struct device *dev)
3756{
3757 struct request *req;
3758 struct fc_bsg_job *job;
3759 enum fc_dispatch_result ret;
3760
3761 if (!get_device(dev))
3762 return;
3763
3764 while (!blk_queue_plugged(q)) {
3765 if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED))
3766 break;
3767
3768 req = blk_fetch_request(q);
3769 if (!req)
3770 break;
3771
3772 if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
3773 req->errors = -ENXIO;
3774 spin_unlock_irq(q->queue_lock);
3775 blk_end_request(req, -ENXIO, blk_rq_bytes(req));
3776 spin_lock_irq(q->queue_lock);
3777 continue;
3778 }
3779
3780 spin_unlock_irq(q->queue_lock);
3781
3782 ret = fc_req_to_bsgjob(shost, rport, req);
3783 if (ret) {
3784 req->errors = ret;
3785 blk_end_request(req, ret, blk_rq_bytes(req));
3786 spin_lock_irq(q->queue_lock);
3787 continue;
3788 }
3789
3790 job = req->special;
3791
3792 /* check if we have the msgcode value at least */
3793 if (job->request_len < sizeof(uint32_t)) {
3794 BUG_ON(job->reply_len < sizeof(uint32_t));
3795 job->reply->result = -ENOMSG;
3796 job->reply_len = sizeof(uint32_t);
3797 fc_bsg_jobdone(job);
3798 spin_lock_irq(q->queue_lock);
3799 continue;
3800 }
3801
3802 /* the dispatch routines will unlock the queue_lock */
3803 if (rport)
3804 ret = fc_bsg_rport_dispatch(q, shost, rport, job);
3805 else
3806 ret = fc_bsg_host_dispatch(q, shost, job);
3807
3808 /* did dispatcher hit state that can't process any more */
3809 if (ret == FC_DISPATCH_BREAK)
3810 break;
3811
3812 /* did dispatcher had released the lock */
3813 if (ret == FC_DISPATCH_UNLOCKED)
3814 spin_lock_irq(q->queue_lock);
3815 }
3816
3817 spin_unlock_irq(q->queue_lock);
3818 put_device(dev);
3819 spin_lock_irq(q->queue_lock);
3820}
3821
3822
3823/**
3824 * fc_bsg_host_handler - handler for bsg requests for a fc host
3825 * @q: fc host request queue
3826 */
3827static void
3828fc_bsg_host_handler(struct request_queue *q)
3829{
3830 struct Scsi_Host *shost = q->queuedata;
3831
3832 fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
3833}
3834
3835
3836/**
3837 * fc_bsg_rport_handler - handler for bsg requests for a fc rport
3838 * @q: rport request queue
3839 */
3840static void
3841fc_bsg_rport_handler(struct request_queue *q)
3842{
3843 struct fc_rport *rport = q->queuedata;
3844 struct Scsi_Host *shost = rport_to_shost(rport);
3845
3846 fc_bsg_request_handler(q, shost, rport, &rport->dev);
3847}
3848
3849
3850/**
3851 * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
3852 * @shost: shost for fc_host
3853 * @fc_host: fc_host adding the structures to
3854 */
3855static int
3856fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
3857{
3858 struct device *dev = &shost->shost_gendev;
3859 struct fc_internal *i = to_fc_internal(shost->transportt);
3860 struct request_queue *q;
3861 int err;
3862 char bsg_name[BUS_ID_SIZE]; /*20*/
3863
3864 fc_host->rqst_q = NULL;
3865
3866 if (!i->f->bsg_request)
3867 return -ENOTSUPP;
3868
3869 snprintf(bsg_name, sizeof(bsg_name),
3870 "fc_host%d", shost->host_no);
3871
3872 q = __scsi_alloc_queue(shost, fc_bsg_host_handler);
3873 if (!q) {
3874 printk(KERN_ERR "fc_host%d: bsg interface failed to "
3875 "initialize - no request queue\n",
3876 shost->host_no);
3877 return -ENOMEM;
3878 }
3879
3880 q->queuedata = shost;
3881 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
3882 blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
3883 blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
3884
3885 err = bsg_register_queue(q, dev, bsg_name, NULL);
3886 if (err) {
3887 printk(KERN_ERR "fc_host%d: bsg interface failed to "
3888 "initialize - register queue\n",
3889 shost->host_no);
3890 blk_cleanup_queue(q);
3891 return err;
3892 }
3893
3894 fc_host->rqst_q = q;
3895 return 0;
3896}
3897
3898
3899/**
3900 * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
3901 * @shost: shost that rport is attached to
3902 * @rport: rport that the bsg hooks are being attached to
3903 */
3904static int
3905fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
3906{
3907 struct device *dev = &rport->dev;
3908 struct fc_internal *i = to_fc_internal(shost->transportt);
3909 struct request_queue *q;
3910 int err;
3911
3912 rport->rqst_q = NULL;
3913
3914 if (!i->f->bsg_request)
3915 return -ENOTSUPP;
3916
3917 q = __scsi_alloc_queue(shost, fc_bsg_rport_handler);
3918 if (!q) {
3919 printk(KERN_ERR "%s: bsg interface failed to "
3920 "initialize - no request queue\n",
3921 dev->kobj.name);
3922 return -ENOMEM;
3923 }
3924
3925 q->queuedata = rport;
3926 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
3927 blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
3928 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
3929
3930 err = bsg_register_queue(q, dev, NULL, NULL);
3931 if (err) {
3932 printk(KERN_ERR "%s: bsg interface failed to "
3933 "initialize - register queue\n",
3934 dev->kobj.name);
3935 blk_cleanup_queue(q);
3936 return err;
3937 }
3938
3939 rport->rqst_q = q;
3940 return 0;
3941}
3942
3943
3944/**
3945 * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports
3946 * @q: the request_queue that is to be torn down.
3947 */
3948static void
3949fc_bsg_remove(struct request_queue *q)
3950{
3951 if (q) {
3952 bsg_unregister_queue(q);
3953 blk_cleanup_queue(q);
3954 }
3955}
3956
3957
3346/* Original Author: Martin Hicks */ 3958/* Original Author: Martin Hicks */
3347MODULE_AUTHOR("James Smart"); 3959MODULE_AUTHOR("James Smart");
3348MODULE_DESCRIPTION("FC Transport Attributes"); 3960MODULE_DESCRIPTION("FC Transport Attributes");
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index f49f55c6bfc8..654a34fb04cb 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -234,8 +234,10 @@ static int spi_setup_transport_attrs(struct transport_container *tc,
234 spi_width(starget) = 0; /* narrow */ 234 spi_width(starget) = 0; /* narrow */
235 spi_max_width(starget) = 1; 235 spi_max_width(starget) = 1;
236 spi_iu(starget) = 0; /* no IU */ 236 spi_iu(starget) = 0; /* no IU */
237 spi_max_iu(starget) = 1;
237 spi_dt(starget) = 0; /* ST */ 238 spi_dt(starget) = 0; /* ST */
238 spi_qas(starget) = 0; 239 spi_qas(starget) = 0;
240 spi_max_qas(starget) = 1;
239 spi_wr_flow(starget) = 0; 241 spi_wr_flow(starget) = 0;
240 spi_rd_strm(starget) = 0; 242 spi_rd_strm(starget) = 0;
241 spi_rti(starget) = 0; 243 spi_rti(starget) = 0;
@@ -360,9 +362,9 @@ static DEVICE_ATTR(field, S_IRUGO, \
360/* The Parallel SCSI Tranport Attributes: */ 362/* The Parallel SCSI Tranport Attributes: */
361spi_transport_max_attr(offset, "%d\n"); 363spi_transport_max_attr(offset, "%d\n");
362spi_transport_max_attr(width, "%d\n"); 364spi_transport_max_attr(width, "%d\n");
363spi_transport_rd_attr(iu, "%d\n"); 365spi_transport_max_attr(iu, "%d\n");
364spi_transport_rd_attr(dt, "%d\n"); 366spi_transport_rd_attr(dt, "%d\n");
365spi_transport_rd_attr(qas, "%d\n"); 367spi_transport_max_attr(qas, "%d\n");
366spi_transport_rd_attr(wr_flow, "%d\n"); 368spi_transport_rd_attr(wr_flow, "%d\n");
367spi_transport_rd_attr(rd_strm, "%d\n"); 369spi_transport_rd_attr(rd_strm, "%d\n");
368spi_transport_rd_attr(rti, "%d\n"); 370spi_transport_rd_attr(rti, "%d\n");
@@ -874,13 +876,13 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
874 876
875 /* try QAS requests; this should be harmless to set if the 877 /* try QAS requests; this should be harmless to set if the
876 * target supports it */ 878 * target supports it */
877 if (scsi_device_qas(sdev)) { 879 if (scsi_device_qas(sdev) && spi_max_qas(starget)) {
878 DV_SET(qas, 1); 880 DV_SET(qas, 1);
879 } else { 881 } else {
880 DV_SET(qas, 0); 882 DV_SET(qas, 0);
881 } 883 }
882 884
883 if (scsi_device_ius(sdev) && min_period < 9) { 885 if (scsi_device_ius(sdev) && spi_max_iu(starget) && min_period < 9) {
884 /* This u320 (or u640). Set IU transfers */ 886 /* This u320 (or u640). Set IU transfers */
885 DV_SET(iu, 1); 887 DV_SET(iu, 1);
886 /* Then set the optional parameters */ 888 /* Then set the optional parameters */
@@ -1412,12 +1414,18 @@ static mode_t target_attribute_is_visible(struct kobject *kobj,
1412 else if (attr == &dev_attr_iu.attr && 1414 else if (attr == &dev_attr_iu.attr &&
1413 spi_support_ius(starget)) 1415 spi_support_ius(starget))
1414 return TARGET_ATTRIBUTE_HELPER(iu); 1416 return TARGET_ATTRIBUTE_HELPER(iu);
1417 else if (attr == &dev_attr_max_iu.attr &&
1418 spi_support_ius(starget))
1419 return TARGET_ATTRIBUTE_HELPER(iu);
1415 else if (attr == &dev_attr_dt.attr && 1420 else if (attr == &dev_attr_dt.attr &&
1416 spi_support_dt(starget)) 1421 spi_support_dt(starget))
1417 return TARGET_ATTRIBUTE_HELPER(dt); 1422 return TARGET_ATTRIBUTE_HELPER(dt);
1418 else if (attr == &dev_attr_qas.attr && 1423 else if (attr == &dev_attr_qas.attr &&
1419 spi_support_qas(starget)) 1424 spi_support_qas(starget))
1420 return TARGET_ATTRIBUTE_HELPER(qas); 1425 return TARGET_ATTRIBUTE_HELPER(qas);
1426 else if (attr == &dev_attr_max_qas.attr &&
1427 spi_support_qas(starget))
1428 return TARGET_ATTRIBUTE_HELPER(qas);
1421 else if (attr == &dev_attr_wr_flow.attr && 1429 else if (attr == &dev_attr_wr_flow.attr &&
1422 spi_support_ius(starget)) 1430 spi_support_ius(starget))
1423 return TARGET_ATTRIBUTE_HELPER(wr_flow); 1431 return TARGET_ATTRIBUTE_HELPER(wr_flow);
@@ -1447,8 +1455,10 @@ static struct attribute *target_attributes[] = {
1447 &dev_attr_width.attr, 1455 &dev_attr_width.attr,
1448 &dev_attr_max_width.attr, 1456 &dev_attr_max_width.attr,
1449 &dev_attr_iu.attr, 1457 &dev_attr_iu.attr,
1458 &dev_attr_max_iu.attr,
1450 &dev_attr_dt.attr, 1459 &dev_attr_dt.attr,
1451 &dev_attr_qas.attr, 1460 &dev_attr_qas.attr,
1461 &dev_attr_max_qas.attr,
1452 &dev_attr_wr_flow.attr, 1462 &dev_attr_wr_flow.attr,
1453 &dev_attr_rd_strm.attr, 1463 &dev_attr_rd_strm.attr,
1454 &dev_attr_rti.attr, 1464 &dev_attr_rti.attr,
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 3f2027c70e46..02406ba6da1c 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -122,7 +122,7 @@ static int __devinit of_platform_serial_probe(struct of_device *ofdev,
122 122
123 info->type = port_type; 123 info->type = port_type;
124 info->line = ret; 124 info->line = ret;
125 ofdev->dev.driver_data = info; 125 dev_set_drvdata(&ofdev->dev, info);
126 return 0; 126 return 0;
127out: 127out:
128 kfree(info); 128 kfree(info);
@@ -135,7 +135,7 @@ out:
135 */ 135 */
136static int of_platform_serial_remove(struct of_device *ofdev) 136static int of_platform_serial_remove(struct of_device *ofdev)
137{ 137{
138 struct of_serial_info *info = ofdev->dev.driver_data; 138 struct of_serial_info *info = dev_get_drvdata(&ofdev->dev);
139 switch (info->type) { 139 switch (info->type) {
140#ifdef CONFIG_SERIAL_8250 140#ifdef CONFIG_SERIAL_8250
141 case PORT_8250 ... PORT_MAX_8250: 141 case PORT_8250 ... PORT_MAX_8250:
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c
index f4573a96af24..a32ccb44065e 100644
--- a/drivers/spi/spi_mpc83xx.c
+++ b/drivers/spi/spi_mpc83xx.c
@@ -711,12 +711,12 @@ static int of_mpc83xx_spi_get_chipselects(struct device *dev)
711 return 0; 711 return 0;
712 } 712 }
713 713
714 pinfo->gpios = kmalloc(ngpios * sizeof(pinfo->gpios), GFP_KERNEL); 714 pinfo->gpios = kmalloc(ngpios * sizeof(*pinfo->gpios), GFP_KERNEL);
715 if (!pinfo->gpios) 715 if (!pinfo->gpios)
716 return -ENOMEM; 716 return -ENOMEM;
717 memset(pinfo->gpios, -1, ngpios * sizeof(pinfo->gpios)); 717 memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios));
718 718
719 pinfo->alow_flags = kzalloc(ngpios * sizeof(pinfo->alow_flags), 719 pinfo->alow_flags = kzalloc(ngpios * sizeof(*pinfo->alow_flags),
720 GFP_KERNEL); 720 GFP_KERNEL);
721 if (!pinfo->alow_flags) { 721 if (!pinfo->alow_flags) {
722 ret = -ENOMEM; 722 ret = -ENOMEM;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 0dcf9ca0b0ac..925657889f0f 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -115,5 +115,9 @@ source "drivers/staging/line6/Kconfig"
115 115
116source "drivers/staging/serqt_usb/Kconfig" 116source "drivers/staging/serqt_usb/Kconfig"
117 117
118source "drivers/gpu/drm/radeon/Kconfig"
119
120source "drivers/staging/octeon/Kconfig"
121
118endif # !STAGING_EXCLUDE_BUILD 122endif # !STAGING_EXCLUDE_BUILD
119endif # STAGING 123endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 47dfd5b4288b..6da9c74c1840 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -40,3 +40,4 @@ obj-$(CONFIG_PLAN9AUTH) += p9auth/
40obj-$(CONFIG_HECI) += heci/ 40obj-$(CONFIG_HECI) += heci/
41obj-$(CONFIG_LINE6_USB) += line6/ 41obj-$(CONFIG_LINE6_USB) += line6/
42obj-$(CONFIG_USB_SERIAL_QUATECH_ESU100) += serqt_usb/ 42obj-$(CONFIG_USB_SERIAL_QUATECH_ESU100) += serqt_usb/
43obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
new file mode 100644
index 000000000000..536e2382de54
--- /dev/null
+++ b/drivers/staging/octeon/Kconfig
@@ -0,0 +1,12 @@
1config OCTEON_ETHERNET
2 tristate "Cavium Networks Octeon Ethernet support"
3 depends on CPU_CAVIUM_OCTEON
4 select MII
5 help
6 This driver supports the builtin ethernet ports on Cavium
7 Networks' products in the Octeon family. This driver supports the
8 CN3XXX and CN5XXX Octeon processors.
9
10 To compile this driver as a module, choose M here. The module
11 will be called octeon-ethernet.
12
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
new file mode 100644
index 000000000000..3c839e37d37f
--- /dev/null
+++ b/drivers/staging/octeon/Makefile
@@ -0,0 +1,30 @@
1# This file is subject to the terms and conditions of the GNU General Public
2# License. See the file "COPYING" in the main directory of this archive
3# for more details.
4#
5# Copyright (C) 2005-2009 Cavium Networks
6#
7
8#
9# Makefile for Cavium OCTEON on-board ethernet driver
10#
11
12obj-${CONFIG_OCTEON_ETHERNET} := octeon-ethernet.o
13
14octeon-ethernet-objs := ethernet.o
15octeon-ethernet-objs += ethernet-common.o
16octeon-ethernet-objs += ethernet-mdio.o
17octeon-ethernet-objs += ethernet-mem.o
18octeon-ethernet-objs += ethernet-proc.o
19octeon-ethernet-objs += ethernet-rgmii.o
20octeon-ethernet-objs += ethernet-rx.o
21octeon-ethernet-objs += ethernet-sgmii.o
22octeon-ethernet-objs += ethernet-spi.o
23octeon-ethernet-objs += ethernet-tx.o
24octeon-ethernet-objs += ethernet-xaui.o
25octeon-ethernet-objs += cvmx-pko.o cvmx-spi.o cvmx-cmd-queue.o \
26 cvmx-helper-board.o cvmx-helper.o cvmx-helper-xaui.o \
27 cvmx-helper-rgmii.o cvmx-helper-sgmii.o cvmx-helper-npi.o \
28 cvmx-helper-loop.o cvmx-helper-spi.o cvmx-helper-util.o \
29 cvmx-interrupt-decodes.o cvmx-interrupt-rsl.o
30
diff --git a/drivers/staging/octeon/cvmx-address.h b/drivers/staging/octeon/cvmx-address.h
new file mode 100644
index 000000000000..3c74d826e2e6
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-address.h
@@ -0,0 +1,274 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2009 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 * Typedefs and defines for working with Octeon physical addresses.
30 *
31 */
32#ifndef __CVMX_ADDRESS_H__
33#define __CVMX_ADDRESS_H__
34
35#if 0
36typedef enum {
37 CVMX_MIPS_SPACE_XKSEG = 3LL,
38 CVMX_MIPS_SPACE_XKPHYS = 2LL,
39 CVMX_MIPS_SPACE_XSSEG = 1LL,
40 CVMX_MIPS_SPACE_XUSEG = 0LL
41} cvmx_mips_space_t;
42#endif
43
44typedef enum {
45 CVMX_MIPS_XKSEG_SPACE_KSEG0 = 0LL,
46 CVMX_MIPS_XKSEG_SPACE_KSEG1 = 1LL,
47 CVMX_MIPS_XKSEG_SPACE_SSEG = 2LL,
48 CVMX_MIPS_XKSEG_SPACE_KSEG3 = 3LL
49} cvmx_mips_xkseg_space_t;
50
51/* decodes <14:13> of a kseg3 window address */
52typedef enum {
53 CVMX_ADD_WIN_SCR = 0L,
54 /* see cvmx_add_win_dma_dec_t for further decode */
55 CVMX_ADD_WIN_DMA = 1L,
56 CVMX_ADD_WIN_UNUSED = 2L,
57 CVMX_ADD_WIN_UNUSED2 = 3L
58} cvmx_add_win_dec_t;
59
60/* decode within DMA space */
61typedef enum {
62 /*
63 * Add store data to the write buffer entry, allocating it if
64 * necessary.
65 */
66 CVMX_ADD_WIN_DMA_ADD = 0L,
67 /* send out the write buffer entry to DRAM */
68 CVMX_ADD_WIN_DMA_SENDMEM = 1L,
69 /* store data must be normal DRAM memory space address in this case */
70 /* send out the write buffer entry as an IOBDMA command */
71 CVMX_ADD_WIN_DMA_SENDDMA = 2L,
72 /* see CVMX_ADD_WIN_DMA_SEND_DEC for data contents */
73 /* send out the write buffer entry as an IO write */
74 CVMX_ADD_WIN_DMA_SENDIO = 3L,
75 /* store data must be normal IO space address in this case */
76 /* send out a single-tick command on the NCB bus */
77 CVMX_ADD_WIN_DMA_SENDSINGLE = 4L,
78 /* no write buffer data needed/used */
79} cvmx_add_win_dma_dec_t;
80
81/*
82 * Physical Address Decode
83 *
84 * Octeon-I HW never interprets this X (<39:36> reserved
85 * for future expansion), software should set to 0.
86 *
87 * - 0x0 XXX0 0000 0000 to DRAM Cached
88 * - 0x0 XXX0 0FFF FFFF
89 *
90 * - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
91 * - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
92 *
93 * - 0x0 XXX0 2000 0000 to DRAM Cached
94 * - 0x0 XXXF FFFF FFFF
95 *
96 * - 0x1 00X0 0000 0000 to Boot Bus Uncached
97 * - 0x1 00XF FFFF FFFF
98 *
99 * - 0x1 01X0 0000 0000 to Other NCB Uncached
100 * - 0x1 FFXF FFFF FFFF devices
101 *
102 * Decode of all Octeon addresses
103 */
104typedef union {
105
106 uint64_t u64;
107 /* mapped or unmapped virtual address */
108 struct {
109 uint64_t R:2;
110 uint64_t offset:62;
111 } sva;
112
113 /* mapped USEG virtual addresses (typically) */
114 struct {
115 uint64_t zeroes:33;
116 uint64_t offset:31;
117 } suseg;
118
119 /* mapped or unmapped virtual address */
120 struct {
121 uint64_t ones:33;
122 uint64_t sp:2;
123 uint64_t offset:29;
124 } sxkseg;
125
126 /*
127 * physical address accessed through xkphys unmapped virtual
128 * address.
129 */
130 struct {
131 uint64_t R:2; /* CVMX_MIPS_SPACE_XKPHYS in this case */
132 uint64_t cca:3; /* ignored by octeon */
133 uint64_t mbz:10;
134 uint64_t pa:49; /* physical address */
135 } sxkphys;
136
137 /* physical address */
138 struct {
139 uint64_t mbz:15;
140 /* if set, the address is uncached and resides on MCB bus */
141 uint64_t is_io:1;
142 /*
143 * the hardware ignores this field when is_io==0, else
144 * device ID.
145 */
146 uint64_t did:8;
147 /* the hardware ignores <39:36> in Octeon I */
148 uint64_t unaddr:4;
149 uint64_t offset:36;
150 } sphys;
151
152 /* physical mem address */
153 struct {
154 /* techically, <47:40> are dont-cares */
155 uint64_t zeroes:24;
156 /* the hardware ignores <39:36> in Octeon I */
157 uint64_t unaddr:4;
158 uint64_t offset:36;
159 } smem;
160
161 /* physical IO address */
162 struct {
163 uint64_t mem_region:2;
164 uint64_t mbz:13;
165 /* 1 in this case */
166 uint64_t is_io:1;
167 /*
168 * The hardware ignores this field when is_io==0, else
169 * device ID.
170 */
171 uint64_t did:8;
172 /* the hardware ignores <39:36> in Octeon I */
173 uint64_t unaddr:4;
174 uint64_t offset:36;
175 } sio;
176
177 /*
178 * Scratchpad virtual address - accessed through a window at
179 * the end of kseg3
180 */
181 struct {
182 uint64_t ones:49;
183 /* CVMX_ADD_WIN_SCR (0) in this case */
184 cvmx_add_win_dec_t csrdec:2;
185 uint64_t addr:13;
186 } sscr;
187
188 /* there should only be stores to IOBDMA space, no loads */
189 /*
190 * IOBDMA virtual address - accessed through a window at the
191 * end of kseg3
192 */
193 struct {
194 uint64_t ones:49;
195 uint64_t csrdec:2; /* CVMX_ADD_WIN_DMA (1) in this case */
196 uint64_t unused2:3;
197 uint64_t type:3;
198 uint64_t addr:7;
199 } sdma;
200
201 struct {
202 uint64_t didspace:24;
203 uint64_t unused:40;
204 } sfilldidspace;
205
206} cvmx_addr_t;
207
208/* These macros for used by 32 bit applications */
209
210#define CVMX_MIPS32_SPACE_KSEG0 1l
211#define CVMX_ADD_SEG32(segment, add) \
212 (((int32_t)segment << 31) | (int32_t)(add))
213
214/*
215 * Currently all IOs are performed using XKPHYS addressing. Linux uses
216 * the CvmMemCtl register to enable XKPHYS addressing to IO space from
217 * user mode. Future OSes may need to change the upper bits of IO
218 * addresses. The following define controls the upper two bits for all
219 * IO addresses generated by the simple executive library.
220 */
221#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
222
223/* These macros simplify the process of creating common IO addresses */
224#define CVMX_ADD_SEG(segment, add) ((((uint64_t)segment) << 62) | (add))
225#ifndef CVMX_ADD_IO_SEG
226#define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
227#endif
228#define CVMX_ADDR_DIDSPACE(did) (((CVMX_IO_SEG) << 22) | ((1ULL) << 8) | (did))
229#define CVMX_ADDR_DID(did) (CVMX_ADDR_DIDSPACE(did) << 40)
230#define CVMX_FULL_DID(did, subdid) (((did) << 3) | (subdid))
231
232 /* from include/ncb_rsl_id.v */
233#define CVMX_OCT_DID_MIS 0ULL /* misc stuff */
234#define CVMX_OCT_DID_GMX0 1ULL
235#define CVMX_OCT_DID_GMX1 2ULL
236#define CVMX_OCT_DID_PCI 3ULL
237#define CVMX_OCT_DID_KEY 4ULL
238#define CVMX_OCT_DID_FPA 5ULL
239#define CVMX_OCT_DID_DFA 6ULL
240#define CVMX_OCT_DID_ZIP 7ULL
241#define CVMX_OCT_DID_RNG 8ULL
242#define CVMX_OCT_DID_IPD 9ULL
243#define CVMX_OCT_DID_PKT 10ULL
244#define CVMX_OCT_DID_TIM 11ULL
245#define CVMX_OCT_DID_TAG 12ULL
246 /* the rest are not on the IO bus */
247#define CVMX_OCT_DID_L2C 16ULL
248#define CVMX_OCT_DID_LMC 17ULL
249#define CVMX_OCT_DID_SPX0 18ULL
250#define CVMX_OCT_DID_SPX1 19ULL
251#define CVMX_OCT_DID_PIP 20ULL
252#define CVMX_OCT_DID_ASX0 22ULL
253#define CVMX_OCT_DID_ASX1 23ULL
254#define CVMX_OCT_DID_IOB 30ULL
255
256#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)
257#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)
258#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)
259#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)
260#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)
261#define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG, 4ULL)
262#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)
263#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)
264#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)
265#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)
266#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)
267#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)
268#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)
269#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)
270#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)
271#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)
272#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)
273
274#endif /* __CVMX_ADDRESS_H__ */
diff --git a/drivers/staging/octeon/cvmx-asxx-defs.h b/drivers/staging/octeon/cvmx-asxx-defs.h
new file mode 100644
index 000000000000..91415a85e8d2
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-asxx-defs.h
@@ -0,0 +1,475 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28#ifndef __CVMX_ASXX_DEFS_H__
29#define __CVMX_ASXX_DEFS_H__
30
31#define CVMX_ASXX_GMII_RX_CLK_SET(block_id) \
32 CVMX_ADD_IO_SEG(0x00011800B0000180ull + (((block_id) & 0) * 0x8000000ull))
33#define CVMX_ASXX_GMII_RX_DAT_SET(block_id) \
34 CVMX_ADD_IO_SEG(0x00011800B0000188ull + (((block_id) & 0) * 0x8000000ull))
35#define CVMX_ASXX_INT_EN(block_id) \
36 CVMX_ADD_IO_SEG(0x00011800B0000018ull + (((block_id) & 1) * 0x8000000ull))
37#define CVMX_ASXX_INT_REG(block_id) \
38 CVMX_ADD_IO_SEG(0x00011800B0000010ull + (((block_id) & 1) * 0x8000000ull))
39#define CVMX_ASXX_MII_RX_DAT_SET(block_id) \
40 CVMX_ADD_IO_SEG(0x00011800B0000190ull + (((block_id) & 0) * 0x8000000ull))
41#define CVMX_ASXX_PRT_LOOP(block_id) \
42 CVMX_ADD_IO_SEG(0x00011800B0000040ull + (((block_id) & 1) * 0x8000000ull))
43#define CVMX_ASXX_RLD_BYPASS(block_id) \
44 CVMX_ADD_IO_SEG(0x00011800B0000248ull + (((block_id) & 1) * 0x8000000ull))
45#define CVMX_ASXX_RLD_BYPASS_SETTING(block_id) \
46 CVMX_ADD_IO_SEG(0x00011800B0000250ull + (((block_id) & 1) * 0x8000000ull))
47#define CVMX_ASXX_RLD_COMP(block_id) \
48 CVMX_ADD_IO_SEG(0x00011800B0000220ull + (((block_id) & 1) * 0x8000000ull))
49#define CVMX_ASXX_RLD_DATA_DRV(block_id) \
50 CVMX_ADD_IO_SEG(0x00011800B0000218ull + (((block_id) & 1) * 0x8000000ull))
51#define CVMX_ASXX_RLD_FCRAM_MODE(block_id) \
52 CVMX_ADD_IO_SEG(0x00011800B0000210ull + (((block_id) & 1) * 0x8000000ull))
53#define CVMX_ASXX_RLD_NCTL_STRONG(block_id) \
54 CVMX_ADD_IO_SEG(0x00011800B0000230ull + (((block_id) & 1) * 0x8000000ull))
55#define CVMX_ASXX_RLD_NCTL_WEAK(block_id) \
56 CVMX_ADD_IO_SEG(0x00011800B0000240ull + (((block_id) & 1) * 0x8000000ull))
57#define CVMX_ASXX_RLD_PCTL_STRONG(block_id) \
58 CVMX_ADD_IO_SEG(0x00011800B0000228ull + (((block_id) & 1) * 0x8000000ull))
59#define CVMX_ASXX_RLD_PCTL_WEAK(block_id) \
60 CVMX_ADD_IO_SEG(0x00011800B0000238ull + (((block_id) & 1) * 0x8000000ull))
61#define CVMX_ASXX_RLD_SETTING(block_id) \
62 CVMX_ADD_IO_SEG(0x00011800B0000258ull + (((block_id) & 1) * 0x8000000ull))
63#define CVMX_ASXX_RX_CLK_SETX(offset, block_id) \
64 CVMX_ADD_IO_SEG(0x00011800B0000020ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
65#define CVMX_ASXX_RX_PRT_EN(block_id) \
66 CVMX_ADD_IO_SEG(0x00011800B0000000ull + (((block_id) & 1) * 0x8000000ull))
67#define CVMX_ASXX_RX_WOL(block_id) \
68 CVMX_ADD_IO_SEG(0x00011800B0000100ull + (((block_id) & 1) * 0x8000000ull))
69#define CVMX_ASXX_RX_WOL_MSK(block_id) \
70 CVMX_ADD_IO_SEG(0x00011800B0000108ull + (((block_id) & 1) * 0x8000000ull))
71#define CVMX_ASXX_RX_WOL_POWOK(block_id) \
72 CVMX_ADD_IO_SEG(0x00011800B0000118ull + (((block_id) & 1) * 0x8000000ull))
73#define CVMX_ASXX_RX_WOL_SIG(block_id) \
74 CVMX_ADD_IO_SEG(0x00011800B0000110ull + (((block_id) & 1) * 0x8000000ull))
75#define CVMX_ASXX_TX_CLK_SETX(offset, block_id) \
76 CVMX_ADD_IO_SEG(0x00011800B0000048ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
77#define CVMX_ASXX_TX_COMP_BYP(block_id) \
78 CVMX_ADD_IO_SEG(0x00011800B0000068ull + (((block_id) & 1) * 0x8000000ull))
79#define CVMX_ASXX_TX_HI_WATERX(offset, block_id) \
80 CVMX_ADD_IO_SEG(0x00011800B0000080ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
81#define CVMX_ASXX_TX_PRT_EN(block_id) \
82 CVMX_ADD_IO_SEG(0x00011800B0000008ull + (((block_id) & 1) * 0x8000000ull))
83
84union cvmx_asxx_gmii_rx_clk_set {
85 uint64_t u64;
86 struct cvmx_asxx_gmii_rx_clk_set_s {
87 uint64_t reserved_5_63:59;
88 uint64_t setting:5;
89 } s;
90 struct cvmx_asxx_gmii_rx_clk_set_s cn30xx;
91 struct cvmx_asxx_gmii_rx_clk_set_s cn31xx;
92 struct cvmx_asxx_gmii_rx_clk_set_s cn50xx;
93};
94
95union cvmx_asxx_gmii_rx_dat_set {
96 uint64_t u64;
97 struct cvmx_asxx_gmii_rx_dat_set_s {
98 uint64_t reserved_5_63:59;
99 uint64_t setting:5;
100 } s;
101 struct cvmx_asxx_gmii_rx_dat_set_s cn30xx;
102 struct cvmx_asxx_gmii_rx_dat_set_s cn31xx;
103 struct cvmx_asxx_gmii_rx_dat_set_s cn50xx;
104};
105
106union cvmx_asxx_int_en {
107 uint64_t u64;
108 struct cvmx_asxx_int_en_s {
109 uint64_t reserved_12_63:52;
110 uint64_t txpsh:4;
111 uint64_t txpop:4;
112 uint64_t ovrflw:4;
113 } s;
114 struct cvmx_asxx_int_en_cn30xx {
115 uint64_t reserved_11_63:53;
116 uint64_t txpsh:3;
117 uint64_t reserved_7_7:1;
118 uint64_t txpop:3;
119 uint64_t reserved_3_3:1;
120 uint64_t ovrflw:3;
121 } cn30xx;
122 struct cvmx_asxx_int_en_cn30xx cn31xx;
123 struct cvmx_asxx_int_en_s cn38xx;
124 struct cvmx_asxx_int_en_s cn38xxp2;
125 struct cvmx_asxx_int_en_cn30xx cn50xx;
126 struct cvmx_asxx_int_en_s cn58xx;
127 struct cvmx_asxx_int_en_s cn58xxp1;
128};
129
130union cvmx_asxx_int_reg {
131 uint64_t u64;
132 struct cvmx_asxx_int_reg_s {
133 uint64_t reserved_12_63:52;
134 uint64_t txpsh:4;
135 uint64_t txpop:4;
136 uint64_t ovrflw:4;
137 } s;
138 struct cvmx_asxx_int_reg_cn30xx {
139 uint64_t reserved_11_63:53;
140 uint64_t txpsh:3;
141 uint64_t reserved_7_7:1;
142 uint64_t txpop:3;
143 uint64_t reserved_3_3:1;
144 uint64_t ovrflw:3;
145 } cn30xx;
146 struct cvmx_asxx_int_reg_cn30xx cn31xx;
147 struct cvmx_asxx_int_reg_s cn38xx;
148 struct cvmx_asxx_int_reg_s cn38xxp2;
149 struct cvmx_asxx_int_reg_cn30xx cn50xx;
150 struct cvmx_asxx_int_reg_s cn58xx;
151 struct cvmx_asxx_int_reg_s cn58xxp1;
152};
153
154union cvmx_asxx_mii_rx_dat_set {
155 uint64_t u64;
156 struct cvmx_asxx_mii_rx_dat_set_s {
157 uint64_t reserved_5_63:59;
158 uint64_t setting:5;
159 } s;
160 struct cvmx_asxx_mii_rx_dat_set_s cn30xx;
161 struct cvmx_asxx_mii_rx_dat_set_s cn50xx;
162};
163
164union cvmx_asxx_prt_loop {
165 uint64_t u64;
166 struct cvmx_asxx_prt_loop_s {
167 uint64_t reserved_8_63:56;
168 uint64_t ext_loop:4;
169 uint64_t int_loop:4;
170 } s;
171 struct cvmx_asxx_prt_loop_cn30xx {
172 uint64_t reserved_7_63:57;
173 uint64_t ext_loop:3;
174 uint64_t reserved_3_3:1;
175 uint64_t int_loop:3;
176 } cn30xx;
177 struct cvmx_asxx_prt_loop_cn30xx cn31xx;
178 struct cvmx_asxx_prt_loop_s cn38xx;
179 struct cvmx_asxx_prt_loop_s cn38xxp2;
180 struct cvmx_asxx_prt_loop_cn30xx cn50xx;
181 struct cvmx_asxx_prt_loop_s cn58xx;
182 struct cvmx_asxx_prt_loop_s cn58xxp1;
183};
184
185union cvmx_asxx_rld_bypass {
186 uint64_t u64;
187 struct cvmx_asxx_rld_bypass_s {
188 uint64_t reserved_1_63:63;
189 uint64_t bypass:1;
190 } s;
191 struct cvmx_asxx_rld_bypass_s cn38xx;
192 struct cvmx_asxx_rld_bypass_s cn38xxp2;
193 struct cvmx_asxx_rld_bypass_s cn58xx;
194 struct cvmx_asxx_rld_bypass_s cn58xxp1;
195};
196
197union cvmx_asxx_rld_bypass_setting {
198 uint64_t u64;
199 struct cvmx_asxx_rld_bypass_setting_s {
200 uint64_t reserved_5_63:59;
201 uint64_t setting:5;
202 } s;
203 struct cvmx_asxx_rld_bypass_setting_s cn38xx;
204 struct cvmx_asxx_rld_bypass_setting_s cn38xxp2;
205 struct cvmx_asxx_rld_bypass_setting_s cn58xx;
206 struct cvmx_asxx_rld_bypass_setting_s cn58xxp1;
207};
208
209union cvmx_asxx_rld_comp {
210 uint64_t u64;
211 struct cvmx_asxx_rld_comp_s {
212 uint64_t reserved_9_63:55;
213 uint64_t pctl:5;
214 uint64_t nctl:4;
215 } s;
216 struct cvmx_asxx_rld_comp_cn38xx {
217 uint64_t reserved_8_63:56;
218 uint64_t pctl:4;
219 uint64_t nctl:4;
220 } cn38xx;
221 struct cvmx_asxx_rld_comp_cn38xx cn38xxp2;
222 struct cvmx_asxx_rld_comp_s cn58xx;
223 struct cvmx_asxx_rld_comp_s cn58xxp1;
224};
225
226union cvmx_asxx_rld_data_drv {
227 uint64_t u64;
228 struct cvmx_asxx_rld_data_drv_s {
229 uint64_t reserved_8_63:56;
230 uint64_t pctl:4;
231 uint64_t nctl:4;
232 } s;
233 struct cvmx_asxx_rld_data_drv_s cn38xx;
234 struct cvmx_asxx_rld_data_drv_s cn38xxp2;
235 struct cvmx_asxx_rld_data_drv_s cn58xx;
236 struct cvmx_asxx_rld_data_drv_s cn58xxp1;
237};
238
239union cvmx_asxx_rld_fcram_mode {
240 uint64_t u64;
241 struct cvmx_asxx_rld_fcram_mode_s {
242 uint64_t reserved_1_63:63;
243 uint64_t mode:1;
244 } s;
245 struct cvmx_asxx_rld_fcram_mode_s cn38xx;
246 struct cvmx_asxx_rld_fcram_mode_s cn38xxp2;
247};
248
249union cvmx_asxx_rld_nctl_strong {
250 uint64_t u64;
251 struct cvmx_asxx_rld_nctl_strong_s {
252 uint64_t reserved_5_63:59;
253 uint64_t nctl:5;
254 } s;
255 struct cvmx_asxx_rld_nctl_strong_s cn38xx;
256 struct cvmx_asxx_rld_nctl_strong_s cn38xxp2;
257 struct cvmx_asxx_rld_nctl_strong_s cn58xx;
258 struct cvmx_asxx_rld_nctl_strong_s cn58xxp1;
259};
260
261union cvmx_asxx_rld_nctl_weak {
262 uint64_t u64;
263 struct cvmx_asxx_rld_nctl_weak_s {
264 uint64_t reserved_5_63:59;
265 uint64_t nctl:5;
266 } s;
267 struct cvmx_asxx_rld_nctl_weak_s cn38xx;
268 struct cvmx_asxx_rld_nctl_weak_s cn38xxp2;
269 struct cvmx_asxx_rld_nctl_weak_s cn58xx;
270 struct cvmx_asxx_rld_nctl_weak_s cn58xxp1;
271};
272
273union cvmx_asxx_rld_pctl_strong {
274 uint64_t u64;
275 struct cvmx_asxx_rld_pctl_strong_s {
276 uint64_t reserved_5_63:59;
277 uint64_t pctl:5;
278 } s;
279 struct cvmx_asxx_rld_pctl_strong_s cn38xx;
280 struct cvmx_asxx_rld_pctl_strong_s cn38xxp2;
281 struct cvmx_asxx_rld_pctl_strong_s cn58xx;
282 struct cvmx_asxx_rld_pctl_strong_s cn58xxp1;
283};
284
285union cvmx_asxx_rld_pctl_weak {
286 uint64_t u64;
287 struct cvmx_asxx_rld_pctl_weak_s {
288 uint64_t reserved_5_63:59;
289 uint64_t pctl:5;
290 } s;
291 struct cvmx_asxx_rld_pctl_weak_s cn38xx;
292 struct cvmx_asxx_rld_pctl_weak_s cn38xxp2;
293 struct cvmx_asxx_rld_pctl_weak_s cn58xx;
294 struct cvmx_asxx_rld_pctl_weak_s cn58xxp1;
295};
296
297union cvmx_asxx_rld_setting {
298 uint64_t u64;
299 struct cvmx_asxx_rld_setting_s {
300 uint64_t reserved_13_63:51;
301 uint64_t dfaset:5;
302 uint64_t dfalag:1;
303 uint64_t dfalead:1;
304 uint64_t dfalock:1;
305 uint64_t setting:5;
306 } s;
307 struct cvmx_asxx_rld_setting_cn38xx {
308 uint64_t reserved_5_63:59;
309 uint64_t setting:5;
310 } cn38xx;
311 struct cvmx_asxx_rld_setting_cn38xx cn38xxp2;
312 struct cvmx_asxx_rld_setting_s cn58xx;
313 struct cvmx_asxx_rld_setting_s cn58xxp1;
314};
315
316union cvmx_asxx_rx_clk_setx {
317 uint64_t u64;
318 struct cvmx_asxx_rx_clk_setx_s {
319 uint64_t reserved_5_63:59;
320 uint64_t setting:5;
321 } s;
322 struct cvmx_asxx_rx_clk_setx_s cn30xx;
323 struct cvmx_asxx_rx_clk_setx_s cn31xx;
324 struct cvmx_asxx_rx_clk_setx_s cn38xx;
325 struct cvmx_asxx_rx_clk_setx_s cn38xxp2;
326 struct cvmx_asxx_rx_clk_setx_s cn50xx;
327 struct cvmx_asxx_rx_clk_setx_s cn58xx;
328 struct cvmx_asxx_rx_clk_setx_s cn58xxp1;
329};
330
331union cvmx_asxx_rx_prt_en {
332 uint64_t u64;
333 struct cvmx_asxx_rx_prt_en_s {
334 uint64_t reserved_4_63:60;
335 uint64_t prt_en:4;
336 } s;
337 struct cvmx_asxx_rx_prt_en_cn30xx {
338 uint64_t reserved_3_63:61;
339 uint64_t prt_en:3;
340 } cn30xx;
341 struct cvmx_asxx_rx_prt_en_cn30xx cn31xx;
342 struct cvmx_asxx_rx_prt_en_s cn38xx;
343 struct cvmx_asxx_rx_prt_en_s cn38xxp2;
344 struct cvmx_asxx_rx_prt_en_cn30xx cn50xx;
345 struct cvmx_asxx_rx_prt_en_s cn58xx;
346 struct cvmx_asxx_rx_prt_en_s cn58xxp1;
347};
348
349union cvmx_asxx_rx_wol {
350 uint64_t u64;
351 struct cvmx_asxx_rx_wol_s {
352 uint64_t reserved_2_63:62;
353 uint64_t status:1;
354 uint64_t enable:1;
355 } s;
356 struct cvmx_asxx_rx_wol_s cn38xx;
357 struct cvmx_asxx_rx_wol_s cn38xxp2;
358};
359
360union cvmx_asxx_rx_wol_msk {
361 uint64_t u64;
362 struct cvmx_asxx_rx_wol_msk_s {
363 uint64_t msk:64;
364 } s;
365 struct cvmx_asxx_rx_wol_msk_s cn38xx;
366 struct cvmx_asxx_rx_wol_msk_s cn38xxp2;
367};
368
369union cvmx_asxx_rx_wol_powok {
370 uint64_t u64;
371 struct cvmx_asxx_rx_wol_powok_s {
372 uint64_t reserved_1_63:63;
373 uint64_t powerok:1;
374 } s;
375 struct cvmx_asxx_rx_wol_powok_s cn38xx;
376 struct cvmx_asxx_rx_wol_powok_s cn38xxp2;
377};
378
379union cvmx_asxx_rx_wol_sig {
380 uint64_t u64;
381 struct cvmx_asxx_rx_wol_sig_s {
382 uint64_t reserved_32_63:32;
383 uint64_t sig:32;
384 } s;
385 struct cvmx_asxx_rx_wol_sig_s cn38xx;
386 struct cvmx_asxx_rx_wol_sig_s cn38xxp2;
387};
388
389union cvmx_asxx_tx_clk_setx {
390 uint64_t u64;
391 struct cvmx_asxx_tx_clk_setx_s {
392 uint64_t reserved_5_63:59;
393 uint64_t setting:5;
394 } s;
395 struct cvmx_asxx_tx_clk_setx_s cn30xx;
396 struct cvmx_asxx_tx_clk_setx_s cn31xx;
397 struct cvmx_asxx_tx_clk_setx_s cn38xx;
398 struct cvmx_asxx_tx_clk_setx_s cn38xxp2;
399 struct cvmx_asxx_tx_clk_setx_s cn50xx;
400 struct cvmx_asxx_tx_clk_setx_s cn58xx;
401 struct cvmx_asxx_tx_clk_setx_s cn58xxp1;
402};
403
404union cvmx_asxx_tx_comp_byp {
405 uint64_t u64;
406 struct cvmx_asxx_tx_comp_byp_s {
407 uint64_t reserved_0_63:64;
408 } s;
409 struct cvmx_asxx_tx_comp_byp_cn30xx {
410 uint64_t reserved_9_63:55;
411 uint64_t bypass:1;
412 uint64_t pctl:4;
413 uint64_t nctl:4;
414 } cn30xx;
415 struct cvmx_asxx_tx_comp_byp_cn30xx cn31xx;
416 struct cvmx_asxx_tx_comp_byp_cn38xx {
417 uint64_t reserved_8_63:56;
418 uint64_t pctl:4;
419 uint64_t nctl:4;
420 } cn38xx;
421 struct cvmx_asxx_tx_comp_byp_cn38xx cn38xxp2;
422 struct cvmx_asxx_tx_comp_byp_cn50xx {
423 uint64_t reserved_17_63:47;
424 uint64_t bypass:1;
425 uint64_t reserved_13_15:3;
426 uint64_t pctl:5;
427 uint64_t reserved_5_7:3;
428 uint64_t nctl:5;
429 } cn50xx;
430 struct cvmx_asxx_tx_comp_byp_cn58xx {
431 uint64_t reserved_13_63:51;
432 uint64_t pctl:5;
433 uint64_t reserved_5_7:3;
434 uint64_t nctl:5;
435 } cn58xx;
436 struct cvmx_asxx_tx_comp_byp_cn58xx cn58xxp1;
437};
438
439union cvmx_asxx_tx_hi_waterx {
440 uint64_t u64;
441 struct cvmx_asxx_tx_hi_waterx_s {
442 uint64_t reserved_4_63:60;
443 uint64_t mark:4;
444 } s;
445 struct cvmx_asxx_tx_hi_waterx_cn30xx {
446 uint64_t reserved_3_63:61;
447 uint64_t mark:3;
448 } cn30xx;
449 struct cvmx_asxx_tx_hi_waterx_cn30xx cn31xx;
450 struct cvmx_asxx_tx_hi_waterx_s cn38xx;
451 struct cvmx_asxx_tx_hi_waterx_s cn38xxp2;
452 struct cvmx_asxx_tx_hi_waterx_cn30xx cn50xx;
453 struct cvmx_asxx_tx_hi_waterx_s cn58xx;
454 struct cvmx_asxx_tx_hi_waterx_s cn58xxp1;
455};
456
457union cvmx_asxx_tx_prt_en {
458 uint64_t u64;
459 struct cvmx_asxx_tx_prt_en_s {
460 uint64_t reserved_4_63:60;
461 uint64_t prt_en:4;
462 } s;
463 struct cvmx_asxx_tx_prt_en_cn30xx {
464 uint64_t reserved_3_63:61;
465 uint64_t prt_en:3;
466 } cn30xx;
467 struct cvmx_asxx_tx_prt_en_cn30xx cn31xx;
468 struct cvmx_asxx_tx_prt_en_s cn38xx;
469 struct cvmx_asxx_tx_prt_en_s cn38xxp2;
470 struct cvmx_asxx_tx_prt_en_cn30xx cn50xx;
471 struct cvmx_asxx_tx_prt_en_s cn58xx;
472 struct cvmx_asxx_tx_prt_en_s cn58xxp1;
473};
474
475#endif
diff --git a/drivers/staging/octeon/cvmx-cmd-queue.c b/drivers/staging/octeon/cvmx-cmd-queue.c
new file mode 100644
index 000000000000..976227b01273
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-cmd-queue.c
@@ -0,0 +1,306 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Support functions for managing command queues used for
30 * various hardware blocks.
31 */
32
33#include <linux/kernel.h>
34
35#include <asm/octeon/octeon.h>
36
37#include "cvmx-config.h"
38#include "cvmx-fpa.h"
39#include "cvmx-cmd-queue.h"
40
41#include <asm/octeon/cvmx-npei-defs.h>
42#include <asm/octeon/cvmx-pexp-defs.h>
43#include "cvmx-pko-defs.h"
44
45/**
46 * This application uses this pointer to access the global queue
47 * state. It points to a bootmem named block.
48 */
49__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
50
51/**
52 * Initialize the Global queue state pointer.
53 *
54 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
55 */
56static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
57{
58 char *alloc_name = "cvmx_cmd_queues";
59#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
60 extern uint64_t octeon_reserve32_memory;
61#endif
62
63 if (likely(__cvmx_cmd_queue_state_ptr))
64 return CVMX_CMD_QUEUE_SUCCESS;
65
66#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
67 if (octeon_reserve32_memory)
68 __cvmx_cmd_queue_state_ptr =
69 cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
70 octeon_reserve32_memory,
71 octeon_reserve32_memory +
72 (CONFIG_CAVIUM_RESERVE32 <<
73 20) - 1, 128, alloc_name);
74 else
75#endif
76 __cvmx_cmd_queue_state_ptr =
77 cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
78 128,
79 alloc_name);
80 if (__cvmx_cmd_queue_state_ptr)
81 memset(__cvmx_cmd_queue_state_ptr, 0,
82 sizeof(*__cvmx_cmd_queue_state_ptr));
83 else {
84 struct cvmx_bootmem_named_block_desc *block_desc =
85 cvmx_bootmem_find_named_block(alloc_name);
86 if (block_desc)
87 __cvmx_cmd_queue_state_ptr =
88 cvmx_phys_to_ptr(block_desc->base_addr);
89 else {
90 cvmx_dprintf
91 ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
92 alloc_name);
93 return CVMX_CMD_QUEUE_NO_MEMORY;
94 }
95 }
96 return CVMX_CMD_QUEUE_SUCCESS;
97}
98
99/**
100 * Initialize a command queue for use. The initial FPA buffer is
101 * allocated and the hardware unit is configured to point to the
102 * new command queue.
103 *
104 * @queue_id: Hardware command queue to initialize.
105 * @max_depth: Maximum outstanding commands that can be queued.
106 * @fpa_pool: FPA pool the command queues should come from.
107 * @pool_size: Size of each buffer in the FPA pool (bytes)
108 *
109 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
110 */
111cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
112 int max_depth, int fpa_pool,
113 int pool_size)
114{
115 __cvmx_cmd_queue_state_t *qstate;
116 cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
117 if (result != CVMX_CMD_QUEUE_SUCCESS)
118 return result;
119
120 qstate = __cvmx_cmd_queue_get_state(queue_id);
121 if (qstate == NULL)
122 return CVMX_CMD_QUEUE_INVALID_PARAM;
123
124 /*
125 * We artificially limit max_depth to 1<<20 words. It is an
126 * arbitrary limit.
127 */
128 if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
129 if ((max_depth < 0) || (max_depth > 1 << 20))
130 return CVMX_CMD_QUEUE_INVALID_PARAM;
131 } else if (max_depth != 0)
132 return CVMX_CMD_QUEUE_INVALID_PARAM;
133
134 if ((fpa_pool < 0) || (fpa_pool > 7))
135 return CVMX_CMD_QUEUE_INVALID_PARAM;
136 if ((pool_size < 128) || (pool_size > 65536))
137 return CVMX_CMD_QUEUE_INVALID_PARAM;
138
139 /* See if someone else has already initialized the queue */
140 if (qstate->base_ptr_div128) {
141 if (max_depth != (int)qstate->max_depth) {
142 cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
143 "Queue already initalized with different "
144 "max_depth (%d).\n",
145 (int)qstate->max_depth);
146 return CVMX_CMD_QUEUE_INVALID_PARAM;
147 }
148 if (fpa_pool != qstate->fpa_pool) {
149 cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
150 "Queue already initalized with different "
151 "FPA pool (%u).\n",
152 qstate->fpa_pool);
153 return CVMX_CMD_QUEUE_INVALID_PARAM;
154 }
155 if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
156 cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
157 "Queue already initalized with different "
158 "FPA pool size (%u).\n",
159 (qstate->pool_size_m1 + 1) << 3);
160 return CVMX_CMD_QUEUE_INVALID_PARAM;
161 }
162 CVMX_SYNCWS;
163 return CVMX_CMD_QUEUE_ALREADY_SETUP;
164 } else {
165 union cvmx_fpa_ctl_status status;
166 void *buffer;
167
168 status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
169 if (!status.s.enb) {
170 cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
171 "FPA is not enabled.\n");
172 return CVMX_CMD_QUEUE_NO_MEMORY;
173 }
174 buffer = cvmx_fpa_alloc(fpa_pool);
175 if (buffer == NULL) {
176 cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
177 "Unable to allocate initial buffer.\n");
178 return CVMX_CMD_QUEUE_NO_MEMORY;
179 }
180
181 memset(qstate, 0, sizeof(*qstate));
182 qstate->max_depth = max_depth;
183 qstate->fpa_pool = fpa_pool;
184 qstate->pool_size_m1 = (pool_size >> 3) - 1;
185 qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
186 /*
187 * We zeroed the now serving field so we need to also
188 * zero the ticket.
189 */
190 __cvmx_cmd_queue_state_ptr->
191 ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
192 CVMX_SYNCWS;
193 return CVMX_CMD_QUEUE_SUCCESS;
194 }
195}
196
197/**
198 * Shutdown a queue a free it's command buffers to the FPA. The
199 * hardware connected to the queue must be stopped before this
200 * function is called.
201 *
202 * @queue_id: Queue to shutdown
203 *
204 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
205 */
206cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
207{
208 __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
209 if (qptr == NULL) {
210 cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
211 "get queue information.\n");
212 return CVMX_CMD_QUEUE_INVALID_PARAM;
213 }
214
215 if (cvmx_cmd_queue_length(queue_id) > 0) {
216 cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
217 "has data in it.\n");
218 return CVMX_CMD_QUEUE_FULL;
219 }
220
221 __cvmx_cmd_queue_lock(queue_id, qptr);
222 if (qptr->base_ptr_div128) {
223 cvmx_fpa_free(cvmx_phys_to_ptr
224 ((uint64_t) qptr->base_ptr_div128 << 7),
225 qptr->fpa_pool, 0);
226 qptr->base_ptr_div128 = 0;
227 }
228 __cvmx_cmd_queue_unlock(qptr);
229
230 return CVMX_CMD_QUEUE_SUCCESS;
231}
232
233/**
234 * Return the number of command words pending in the queue. This
235 * function may be relatively slow for some hardware units.
236 *
237 * @queue_id: Hardware command queue to query
238 *
239 * Returns Number of outstanding commands
240 */
241int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
242{
243 if (CVMX_ENABLE_PARAMETER_CHECKING) {
244 if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
245 return CVMX_CMD_QUEUE_INVALID_PARAM;
246 }
247
248 /*
249 * The cast is here so gcc with check that all values in the
250 * cvmx_cmd_queue_id_t enumeration are here.
251 */
252 switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
253 case CVMX_CMD_QUEUE_PKO_BASE:
254 /*
255 * FIXME: Need atomic lock on
256 * CVMX_PKO_REG_READ_IDX. Right now we are normally
257 * called with the queue lock, so that is a SLIGHT
258 * amount of protection.
259 */
260 cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
261 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
262 union cvmx_pko_mem_debug9 debug9;
263 debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
264 return debug9.cn38xx.doorbell;
265 } else {
266 union cvmx_pko_mem_debug8 debug8;
267 debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
268 return debug8.cn58xx.doorbell;
269 }
270 case CVMX_CMD_QUEUE_ZIP:
271 case CVMX_CMD_QUEUE_DFA:
272 case CVMX_CMD_QUEUE_RAID:
273 /* FIXME: Implement other lengths */
274 return 0;
275 case CVMX_CMD_QUEUE_DMA_BASE:
276 {
277 union cvmx_npei_dmax_counts dmax_counts;
278 dmax_counts.u64 =
279 cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
280 (queue_id & 0x7));
281 return dmax_counts.s.dbell;
282 }
283 case CVMX_CMD_QUEUE_END:
284 return CVMX_CMD_QUEUE_INVALID_PARAM;
285 }
286 return CVMX_CMD_QUEUE_INVALID_PARAM;
287}
288
289/**
290 * Return the command buffer to be written to. The purpose of this
291 * function is to allow CVMX routine access t othe low level buffer
292 * for initial hardware setup. User applications should not call this
293 * function directly.
294 *
295 * @queue_id: Command queue to query
296 *
297 * Returns Command buffer or NULL on failure
298 */
299void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
300{
301 __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
302 if (qptr && qptr->base_ptr_div128)
303 return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
304 else
305 return NULL;
306}
diff --git a/drivers/staging/octeon/cvmx-cmd-queue.h b/drivers/staging/octeon/cvmx-cmd-queue.h
new file mode 100644
index 000000000000..f0cb20ffa39a
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-cmd-queue.h
@@ -0,0 +1,617 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 *
30 * Support functions for managing command queues used for
31 * various hardware blocks.
32 *
33 * The common command queue infrastructure abstracts out the
34 * software necessary for adding to Octeon's chained queue
35 * structures. These structures are used for commands to the
36 * PKO, ZIP, DFA, RAID, and DMA engine blocks. Although each
37 * hardware unit takes commands and CSRs of different types,
38 * they all use basic linked command buffers to store the
39 * pending request. In general, users of the CVMX API don't
40 * call cvmx-cmd-queue functions directly. Instead the hardware
41 * unit specific wrapper should be used. The wrappers perform
42 * unit specific validation and CSR writes to submit the
43 * commands.
44 *
45 * Even though most software will never directly interact with
46 * cvmx-cmd-queue, knowledge of its internal working can help
47 * in diagnosing performance problems and help with debugging.
48 *
49 * Command queue pointers are stored in a global named block
50 * called "cvmx_cmd_queues". Except for the PKO queues, each
51 * hardware queue is stored in its own cache line to reduce SMP
52 * contention on spin locks. The PKO queues are stored such that
53 * every 16th queue is next to each other in memory. This scheme
54 * allows for queues being in separate cache lines when there
55 * are low number of queues per port. With 16 queues per port,
56 * the first queue for each port is in the same cache area. The
57 * second queues for each port are in another area, etc. This
58 * allows software to implement very efficient lockless PKO with
59 * 16 queues per port using a minimum of cache lines per core.
60 * All queues for a given core will be isolated in the same
61 * cache area.
62 *
63 * In addition to the memory pointer layout, cvmx-cmd-queue
64 * provides an optimized fair ll/sc locking mechanism for the
65 * queues. The lock uses a "ticket / now serving" model to
66 * maintain fair order on contended locks. In addition, it uses
67 * predicted locking time to limit cache contention. When a core
68 * know it must wait in line for a lock, it spins on the
69 * internal cycle counter to completely eliminate any causes of
70 * bus traffic.
71 *
72 */
73
74#ifndef __CVMX_CMD_QUEUE_H__
75#define __CVMX_CMD_QUEUE_H__
76
77#include <linux/prefetch.h>
78
79#include "cvmx-fpa.h"
80/**
81 * By default we disable the max depth support. Most programs
82 * don't use it and it slows down the command queue processing
83 * significantly.
84 */
85#ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH
86#define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0
87#endif
88
89/**
90 * Enumeration representing all hardware blocks that use command
91 * queues. Each hardware block has up to 65536 sub identifiers for
92 * multiple command queues. Not all chips support all hardware
93 * units.
94 */
95typedef enum {
96 CVMX_CMD_QUEUE_PKO_BASE = 0x00000,
97
98#define CVMX_CMD_QUEUE_PKO(queue) \
99 ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue))))
100
101 CVMX_CMD_QUEUE_ZIP = 0x10000,
102 CVMX_CMD_QUEUE_DFA = 0x20000,
103 CVMX_CMD_QUEUE_RAID = 0x30000,
104 CVMX_CMD_QUEUE_DMA_BASE = 0x40000,
105
106#define CVMX_CMD_QUEUE_DMA(queue) \
107 ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue))))
108
109 CVMX_CMD_QUEUE_END = 0x50000,
110} cvmx_cmd_queue_id_t;
111
112/**
113 * Command write operations can fail if the comamnd queue needs
114 * a new buffer and the associated FPA pool is empty. It can also
115 * fail if the number of queued command words reaches the maximum
116 * set at initialization.
117 */
118typedef enum {
119 CVMX_CMD_QUEUE_SUCCESS = 0,
120 CVMX_CMD_QUEUE_NO_MEMORY = -1,
121 CVMX_CMD_QUEUE_FULL = -2,
122 CVMX_CMD_QUEUE_INVALID_PARAM = -3,
123 CVMX_CMD_QUEUE_ALREADY_SETUP = -4,
124} cvmx_cmd_queue_result_t;
125
126typedef struct {
127 /* You have lock when this is your ticket */
128 uint8_t now_serving;
129 uint64_t unused1:24;
130 /* Maximum outstanding command words */
131 uint32_t max_depth;
132 /* FPA pool buffers come from */
133 uint64_t fpa_pool:3;
134 /* Top of command buffer pointer shifted 7 */
135 uint64_t base_ptr_div128:29;
136 uint64_t unused2:6;
137 /* FPA buffer size in 64bit words minus 1 */
138 uint64_t pool_size_m1:13;
139 /* Number of comamnds already used in buffer */
140 uint64_t index:13;
141} __cvmx_cmd_queue_state_t;
142
143/**
144 * This structure contains the global state of all comamnd queues.
145 * It is stored in a bootmem named block and shared by all
146 * applications running on Octeon. Tickets are stored in a differnet
147 * cahce line that queue information to reduce the contention on the
148 * ll/sc used to get a ticket. If this is not the case, the update
149 * of queue state causes the ll/sc to fail quite often.
150 */
151typedef struct {
152 uint64_t ticket[(CVMX_CMD_QUEUE_END >> 16) * 256];
153 __cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END >> 16) * 256];
154} __cvmx_cmd_queue_all_state_t;
155
156/**
157 * Initialize a command queue for use. The initial FPA buffer is
158 * allocated and the hardware unit is configured to point to the
159 * new command queue.
160 *
161 * @queue_id: Hardware command queue to initialize.
162 * @max_depth: Maximum outstanding commands that can be queued.
163 * @fpa_pool: FPA pool the command queues should come from.
164 * @pool_size: Size of each buffer in the FPA pool (bytes)
165 *
166 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
167 */
168cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
169 int max_depth, int fpa_pool,
170 int pool_size);
171
172/**
173 * Shutdown a queue a free it's command buffers to the FPA. The
174 * hardware connected to the queue must be stopped before this
175 * function is called.
176 *
177 * @queue_id: Queue to shutdown
178 *
179 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
180 */
181cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id);
182
183/**
184 * Return the number of command words pending in the queue. This
185 * function may be relatively slow for some hardware units.
186 *
187 * @queue_id: Hardware command queue to query
188 *
189 * Returns Number of outstanding commands
190 */
191int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);
192
193/**
194 * Return the command buffer to be written to. The purpose of this
195 * function is to allow CVMX routine access t othe low level buffer
196 * for initial hardware setup. User applications should not call this
197 * function directly.
198 *
199 * @queue_id: Command queue to query
200 *
201 * Returns Command buffer or NULL on failure
202 */
203void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id);
204
205/**
206 * Get the index into the state arrays for the supplied queue id.
207 *
208 * @queue_id: Queue ID to get an index for
209 *
210 * Returns Index into the state arrays
211 */
212static inline int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id)
213{
214 /*
215 * Warning: This code currently only works with devices that
216 * have 256 queues or less. Devices with more than 16 queues
217 * are layed out in memory to allow cores quick access to
218 * every 16th queue. This reduces cache thrashing when you are
219 * running 16 queues per port to support lockless operation.
220 */
221 int unit = queue_id >> 16;
222 int q = (queue_id >> 4) & 0xf;
223 int core = queue_id & 0xf;
224 return unit * 256 + core * 16 + q;
225}
226
227/**
228 * Lock the supplied queue so nobody else is updating it at the same
229 * time as us.
230 *
231 * @queue_id: Queue ID to lock
232 * @qptr: Pointer to the queue's global state
233 */
234static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
235 __cvmx_cmd_queue_state_t *qptr)
236{
237 extern __cvmx_cmd_queue_all_state_t
238 *__cvmx_cmd_queue_state_ptr;
239 int tmp;
240 int my_ticket;
241 prefetch(qptr);
242 asm volatile (
243 ".set push\n"
244 ".set noreorder\n"
245 "1:\n"
246 /* Atomic add one to ticket_ptr */
247 "ll %[my_ticket], %[ticket_ptr]\n"
248 /* and store the original value */
249 "li %[ticket], 1\n"
250 /* in my_ticket */
251 "baddu %[ticket], %[my_ticket]\n"
252 "sc %[ticket], %[ticket_ptr]\n"
253 "beqz %[ticket], 1b\n"
254 " nop\n"
255 /* Load the current now_serving ticket */
256 "lbu %[ticket], %[now_serving]\n"
257 "2:\n"
258 /* Jump out if now_serving == my_ticket */
259 "beq %[ticket], %[my_ticket], 4f\n"
260 /* Find out how many tickets are in front of me */
261 " subu %[ticket], %[my_ticket], %[ticket]\n"
262 /* Use tickets in front of me minus one to delay */
263 "subu %[ticket], 1\n"
264 /* Delay will be ((tickets in front)-1)*32 loops */
265 "cins %[ticket], %[ticket], 5, 7\n"
266 "3:\n"
267 /* Loop here until our ticket might be up */
268 "bnez %[ticket], 3b\n"
269 " subu %[ticket], 1\n"
270 /* Jump back up to check out ticket again */
271 "b 2b\n"
272 /* Load the current now_serving ticket */
273 " lbu %[ticket], %[now_serving]\n"
274 "4:\n"
275 ".set pop\n" :
276 [ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
277 [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
278 [my_ticket] "=r"(my_ticket)
279 );
280}
281
282/**
283 * Unlock the queue, flushing all writes.
284 *
285 * @qptr: Queue to unlock
286 */
287static inline void __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_state_t *qptr)
288{
289 qptr->now_serving++;
290 CVMX_SYNCWS;
291}
292
293/**
294 * Get the queue state structure for the given queue id
295 *
296 * @queue_id: Queue id to get
297 *
298 * Returns Queue structure or NULL on failure
299 */
300static inline __cvmx_cmd_queue_state_t
301 *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id)
302{
303 extern __cvmx_cmd_queue_all_state_t
304 *__cvmx_cmd_queue_state_ptr;
305 return &__cvmx_cmd_queue_state_ptr->
306 state[__cvmx_cmd_queue_get_index(queue_id)];
307}
308
309/**
310 * Write an arbitrary number of command words to a command queue.
311 * This is a generic function; the fixed number of comamnd word
312 * functions yield higher performance.
313 *
314 * @queue_id: Hardware command queue to write to
315 * @use_locking:
316 * Use internal locking to ensure exclusive access for queue
317 * updates. If you don't use this locking you must ensure
318 * exclusivity some other way. Locking is strongly recommended.
319 * @cmd_count: Number of command words to write
320 * @cmds: Array of comamnds to write
321 *
322 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
323 */
324static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t
325 queue_id,
326 int use_locking,
327 int cmd_count,
328 uint64_t *cmds)
329{
330 __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
331
332 /* Make sure nobody else is updating the same queue */
333 if (likely(use_locking))
334 __cvmx_cmd_queue_lock(queue_id, qptr);
335
336 /*
337 * If a max queue length was specified then make sure we don't
338 * exceed it. If any part of the command would be below the
339 * limit we allow it.
340 */
341 if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
342 if (unlikely
343 (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
344 if (likely(use_locking))
345 __cvmx_cmd_queue_unlock(qptr);
346 return CVMX_CMD_QUEUE_FULL;
347 }
348 }
349
350 /*
351 * Normally there is plenty of room in the current buffer for
352 * the command.
353 */
354 if (likely(qptr->index + cmd_count < qptr->pool_size_m1)) {
355 uint64_t *ptr =
356 (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
357 base_ptr_div128 << 7);
358 ptr += qptr->index;
359 qptr->index += cmd_count;
360 while (cmd_count--)
361 *ptr++ = *cmds++;
362 } else {
363 uint64_t *ptr;
364 int count;
365 /*
366 * We need a new comamnd buffer. Fail if there isn't
367 * one available.
368 */
369 uint64_t *new_buffer =
370 (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
371 if (unlikely(new_buffer == NULL)) {
372 if (likely(use_locking))
373 __cvmx_cmd_queue_unlock(qptr);
374 return CVMX_CMD_QUEUE_NO_MEMORY;
375 }
376 ptr =
377 (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
378 base_ptr_div128 << 7);
379 /*
380 * Figure out how many command words will fit in this
381 * buffer. One location will be needed for the next
382 * buffer pointer.
383 */
384 count = qptr->pool_size_m1 - qptr->index;
385 ptr += qptr->index;
386 cmd_count -= count;
387 while (count--)
388 *ptr++ = *cmds++;
389 *ptr = cvmx_ptr_to_phys(new_buffer);
390 /*
391 * The current buffer is full and has a link to the
392 * next buffer. Time to write the rest of the commands
393 * into the new buffer.
394 */
395 qptr->base_ptr_div128 = *ptr >> 7;
396 qptr->index = cmd_count;
397 ptr = new_buffer;
398 while (cmd_count--)
399 *ptr++ = *cmds++;
400 }
401
402 /* All updates are complete. Release the lock and return */
403 if (likely(use_locking))
404 __cvmx_cmd_queue_unlock(qptr);
405 return CVMX_CMD_QUEUE_SUCCESS;
406}
407
408/**
409 * Simple function to write two command words to a command
410 * queue.
411 *
412 * @queue_id: Hardware command queue to write to
413 * @use_locking:
414 * Use internal locking to ensure exclusive access for queue
415 * updates. If you don't use this locking you must ensure
416 * exclusivity some other way. Locking is strongly recommended.
417 * @cmd1: Command
418 * @cmd2: Command
419 *
420 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
421 */
422static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t
423 queue_id,
424 int use_locking,
425 uint64_t cmd1,
426 uint64_t cmd2)
427{
428 __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
429
430 /* Make sure nobody else is updating the same queue */
431 if (likely(use_locking))
432 __cvmx_cmd_queue_lock(queue_id, qptr);
433
434 /*
435 * If a max queue length was specified then make sure we don't
436 * exceed it. If any part of the command would be below the
437 * limit we allow it.
438 */
439 if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
440 if (unlikely
441 (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
442 if (likely(use_locking))
443 __cvmx_cmd_queue_unlock(qptr);
444 return CVMX_CMD_QUEUE_FULL;
445 }
446 }
447
448 /*
449 * Normally there is plenty of room in the current buffer for
450 * the command.
451 */
452 if (likely(qptr->index + 2 < qptr->pool_size_m1)) {
453 uint64_t *ptr =
454 (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
455 base_ptr_div128 << 7);
456 ptr += qptr->index;
457 qptr->index += 2;
458 ptr[0] = cmd1;
459 ptr[1] = cmd2;
460 } else {
461 uint64_t *ptr;
462 /*
463 * Figure out how many command words will fit in this
464 * buffer. One location will be needed for the next
465 * buffer pointer.
466 */
467 int count = qptr->pool_size_m1 - qptr->index;
468 /*
469 * We need a new comamnd buffer. Fail if there isn't
470 * one available.
471 */
472 uint64_t *new_buffer =
473 (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
474 if (unlikely(new_buffer == NULL)) {
475 if (likely(use_locking))
476 __cvmx_cmd_queue_unlock(qptr);
477 return CVMX_CMD_QUEUE_NO_MEMORY;
478 }
479 count--;
480 ptr =
481 (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
482 base_ptr_div128 << 7);
483 ptr += qptr->index;
484 *ptr++ = cmd1;
485 if (likely(count))
486 *ptr++ = cmd2;
487 *ptr = cvmx_ptr_to_phys(new_buffer);
488 /*
489 * The current buffer is full and has a link to the
490 * next buffer. Time to write the rest of the commands
491 * into the new buffer.
492 */
493 qptr->base_ptr_div128 = *ptr >> 7;
494 qptr->index = 0;
495 if (unlikely(count == 0)) {
496 qptr->index = 1;
497 new_buffer[0] = cmd2;
498 }
499 }
500
501 /* All updates are complete. Release the lock and return */
502 if (likely(use_locking))
503 __cvmx_cmd_queue_unlock(qptr);
504 return CVMX_CMD_QUEUE_SUCCESS;
505}
506
507/**
508 * Simple function to write three command words to a command
509 * queue.
510 *
511 * @queue_id: Hardware command queue to write to
512 * @use_locking:
513 * Use internal locking to ensure exclusive access for queue
514 * updates. If you don't use this locking you must ensure
515 * exclusivity some other way. Locking is strongly recommended.
516 * @cmd1: Command
517 * @cmd2: Command
518 * @cmd3: Command
519 *
520 * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
521 */
522static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t
523 queue_id,
524 int use_locking,
525 uint64_t cmd1,
526 uint64_t cmd2,
527 uint64_t cmd3)
528{
529 __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
530
531 /* Make sure nobody else is updating the same queue */
532 if (likely(use_locking))
533 __cvmx_cmd_queue_lock(queue_id, qptr);
534
535 /*
536 * If a max queue length was specified then make sure we don't
537 * exceed it. If any part of the command would be below the
538 * limit we allow it.
539 */
540 if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
541 if (unlikely
542 (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
543 if (likely(use_locking))
544 __cvmx_cmd_queue_unlock(qptr);
545 return CVMX_CMD_QUEUE_FULL;
546 }
547 }
548
549 /*
550 * Normally there is plenty of room in the current buffer for
551 * the command.
552 */
553 if (likely(qptr->index + 3 < qptr->pool_size_m1)) {
554 uint64_t *ptr =
555 (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
556 base_ptr_div128 << 7);
557 ptr += qptr->index;
558 qptr->index += 3;
559 ptr[0] = cmd1;
560 ptr[1] = cmd2;
561 ptr[2] = cmd3;
562 } else {
563 uint64_t *ptr;
564 /*
565 * Figure out how many command words will fit in this
566 * buffer. One location will be needed for the next
567 * buffer pointer
568 */
569 int count = qptr->pool_size_m1 - qptr->index;
570 /*
571 * We need a new comamnd buffer. Fail if there isn't
572 * one available
573 */
574 uint64_t *new_buffer =
575 (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
576 if (unlikely(new_buffer == NULL)) {
577 if (likely(use_locking))
578 __cvmx_cmd_queue_unlock(qptr);
579 return CVMX_CMD_QUEUE_NO_MEMORY;
580 }
581 count--;
582 ptr =
583 (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
584 base_ptr_div128 << 7);
585 ptr += qptr->index;
586 *ptr++ = cmd1;
587 if (count) {
588 *ptr++ = cmd2;
589 if (count > 1)
590 *ptr++ = cmd3;
591 }
592 *ptr = cvmx_ptr_to_phys(new_buffer);
593 /*
594 * The current buffer is full and has a link to the
595 * next buffer. Time to write the rest of the commands
596 * into the new buffer.
597 */
598 qptr->base_ptr_div128 = *ptr >> 7;
599 qptr->index = 0;
600 ptr = new_buffer;
601 if (count == 0) {
602 *ptr++ = cmd2;
603 qptr->index++;
604 }
605 if (count < 2) {
606 *ptr++ = cmd3;
607 qptr->index++;
608 }
609 }
610
611 /* All updates are complete. Release the lock and return */
612 if (likely(use_locking))
613 __cvmx_cmd_queue_unlock(qptr);
614 return CVMX_CMD_QUEUE_SUCCESS;
615}
616
617#endif /* __CVMX_CMD_QUEUE_H__ */
diff --git a/drivers/staging/octeon/cvmx-config.h b/drivers/staging/octeon/cvmx-config.h
new file mode 100644
index 000000000000..078a520481cf
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-config.h
@@ -0,0 +1,169 @@
1#ifndef __CVMX_CONFIG_H__
2#define __CVMX_CONFIG_H__
3
4/************************* Config Specific Defines ************************/
5#define CVMX_LLM_NUM_PORTS 1
6#define CVMX_NULL_POINTER_PROTECT 1
7#define CVMX_ENABLE_DEBUG_PRINTS 1
8/* PKO queues per port for interface 0 (ports 0-15) */
9#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 1
10/* PKO queues per port for interface 1 (ports 16-31) */
11#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 1
12/* Limit on the number of PKO ports enabled for interface 0 */
13#define CVMX_PKO_MAX_PORTS_INTERFACE0 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
14/* Limit on the number of PKO ports enabled for interface 1 */
15#define CVMX_PKO_MAX_PORTS_INTERFACE1 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
16/* PKO queues per port for PCI (ports 32-35) */
17#define CVMX_PKO_QUEUES_PER_PORT_PCI 1
18/* PKO queues per port for Loop devices (ports 36-39) */
19#define CVMX_PKO_QUEUES_PER_PORT_LOOP 1
20
21/************************* FPA allocation *********************************/
22/* Pool sizes in bytes, must be multiple of a cache line */
23#define CVMX_FPA_POOL_0_SIZE (16 * CVMX_CACHE_LINE_SIZE)
24#define CVMX_FPA_POOL_1_SIZE (1 * CVMX_CACHE_LINE_SIZE)
25#define CVMX_FPA_POOL_2_SIZE (8 * CVMX_CACHE_LINE_SIZE)
26#define CVMX_FPA_POOL_3_SIZE (0 * CVMX_CACHE_LINE_SIZE)
27#define CVMX_FPA_POOL_4_SIZE (0 * CVMX_CACHE_LINE_SIZE)
28#define CVMX_FPA_POOL_5_SIZE (0 * CVMX_CACHE_LINE_SIZE)
29#define CVMX_FPA_POOL_6_SIZE (0 * CVMX_CACHE_LINE_SIZE)
30#define CVMX_FPA_POOL_7_SIZE (0 * CVMX_CACHE_LINE_SIZE)
31
32/* Pools in use */
33/* Packet buffers */
34#define CVMX_FPA_PACKET_POOL (0)
35#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
36/* Work queue entrys */
37#define CVMX_FPA_WQE_POOL (1)
38#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
39/* PKO queue command buffers */
40#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
41#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE CVMX_FPA_POOL_2_SIZE
42
43/************************* FAU allocation ********************************/
44/* The fetch and add registers are allocated here. They are arranged
45 * in order of descending size so that all alignment constraints are
46 * automatically met. The enums are linked so that the following enum
47 * continues allocating where the previous one left off, so the
48 * numbering within each enum always starts with zero. The macros
49 * take care of the address increment size, so the values entered
50 * always increase by 1. FAU registers are accessed with byte
51 * addresses.
52 */
53
54#define CVMX_FAU_REG_64_ADDR(x) ((x << 3) + CVMX_FAU_REG_64_START)
55typedef enum {
56 CVMX_FAU_REG_64_START = 0,
57 CVMX_FAU_REG_64_END = CVMX_FAU_REG_64_ADDR(0),
58} cvmx_fau_reg_64_t;
59
60#define CVMX_FAU_REG_32_ADDR(x) ((x << 2) + CVMX_FAU_REG_32_START)
61typedef enum {
62 CVMX_FAU_REG_32_START = CVMX_FAU_REG_64_END,
63 CVMX_FAU_REG_32_END = CVMX_FAU_REG_32_ADDR(0),
64} cvmx_fau_reg_32_t;
65
66#define CVMX_FAU_REG_16_ADDR(x) ((x << 1) + CVMX_FAU_REG_16_START)
67typedef enum {
68 CVMX_FAU_REG_16_START = CVMX_FAU_REG_32_END,
69 CVMX_FAU_REG_16_END = CVMX_FAU_REG_16_ADDR(0),
70} cvmx_fau_reg_16_t;
71
72#define CVMX_FAU_REG_8_ADDR(x) ((x) + CVMX_FAU_REG_8_START)
73typedef enum {
74 CVMX_FAU_REG_8_START = CVMX_FAU_REG_16_END,
75 CVMX_FAU_REG_8_END = CVMX_FAU_REG_8_ADDR(0),
76} cvmx_fau_reg_8_t;
77
78/*
79 * The name CVMX_FAU_REG_AVAIL_BASE is provided to indicate the first
80 * available FAU address that is not allocated in cvmx-config.h. This
81 * is 64 bit aligned.
82 */
83#define CVMX_FAU_REG_AVAIL_BASE ((CVMX_FAU_REG_8_END + 0x7) & (~0x7ULL))
84#define CVMX_FAU_REG_END (2048)
85
86/********************** scratch memory allocation *************************/
87/* Scratchpad memory allocation. Note that these are byte memory
88 * addresses. Some uses of scratchpad (IOBDMA for example) require
89 * the use of 8-byte aligned addresses, so proper alignment needs to
90 * be taken into account.
91 */
92/* Generic scratch iobdma area */
93#define CVMX_SCR_SCRATCH (0)
94/* First location available after cvmx-config.h allocated region. */
95#define CVMX_SCR_REG_AVAIL_BASE (8)
96
97/*
98 * CVMX_HELPER_FIRST_MBUFF_SKIP is the number of bytes to reserve
99 * before the beginning of the packet. If necessary, override the
100 * default here. See the IPD section of the hardware manual for MBUFF
101 * SKIP details.
102 */
103#define CVMX_HELPER_FIRST_MBUFF_SKIP 184
104
105/*
106 * CVMX_HELPER_NOT_FIRST_MBUFF_SKIP is the number of bytes to reserve
107 * in each chained packet element. If necessary, override the default
108 * here.
109 */
110#define CVMX_HELPER_NOT_FIRST_MBUFF_SKIP 0
111
112/*
113 * CVMX_HELPER_ENABLE_BACK_PRESSURE controls whether back pressure is
114 * enabled for all input ports. This controls if IPD sends
115 * backpressure to all ports if Octeon's FPA pools don't have enough
116 * packet or work queue entries. Even when this is off, it is still
117 * possible to get backpressure from individual hardware ports. When
118 * configuring backpressure, also check
119 * CVMX_HELPER_DISABLE_*_BACKPRESSURE below. If necessary, override
120 * the default here.
121 */
122#define CVMX_HELPER_ENABLE_BACK_PRESSURE 1
123
124/*
125 * CVMX_HELPER_ENABLE_IPD controls if the IPD is enabled in the helper
126 * function. Once it is enabled the hardware starts accepting
127 * packets. You might want to skip the IPD enable if configuration
128 * changes are need from the default helper setup. If necessary,
129 * override the default here.
130 */
131#define CVMX_HELPER_ENABLE_IPD 0
132
133/*
134 * CVMX_HELPER_INPUT_TAG_TYPE selects the type of tag that the IPD assigns
135 * to incoming packets.
136 */
137#define CVMX_HELPER_INPUT_TAG_TYPE CVMX_POW_TAG_TYPE_ORDERED
138
139#define CVMX_ENABLE_PARAMETER_CHECKING 0
140
141/*
142 * The following select which fields are used by the PIP to generate
143 * the tag on INPUT
144 * 0: don't include
145 * 1: include
146 */
147#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP 0
148#define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP 0
149#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT 0
150#define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT 0
151#define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER 0
152#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP 0
153#define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP 0
154#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT 0
155#define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT 0
156#define CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL 0
157#define CVMX_HELPER_INPUT_TAG_INPUT_PORT 1
158
159/* Select skip mode for input ports */
160#define CVMX_HELPER_INPUT_PORT_SKIP_MODE CVMX_PIP_PORT_CFG_MODE_SKIPL2
161
162/*
163 * Force backpressure to be disabled. This overrides all other
164 * backpressure configuration.
165 */
166#define CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE 0
167
168#endif /* __CVMX_CONFIG_H__ */
169
diff --git a/drivers/staging/octeon/cvmx-dbg-defs.h b/drivers/staging/octeon/cvmx-dbg-defs.h
new file mode 100644
index 000000000000..abbf42d05e5a
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-dbg-defs.h
@@ -0,0 +1,72 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28#ifndef __CVMX_DBG_DEFS_H__
29#define __CVMX_DBG_DEFS_H__
30
31#define CVMX_DBG_DATA \
32 CVMX_ADD_IO_SEG(0x00011F00000001E8ull)
33
34union cvmx_dbg_data {
35 uint64_t u64;
36 struct cvmx_dbg_data_s {
37 uint64_t reserved_23_63:41;
38 uint64_t c_mul:5;
39 uint64_t dsel_ext:1;
40 uint64_t data:17;
41 } s;
42 struct cvmx_dbg_data_cn30xx {
43 uint64_t reserved_31_63:33;
44 uint64_t pll_mul:3;
45 uint64_t reserved_23_27:5;
46 uint64_t c_mul:5;
47 uint64_t dsel_ext:1;
48 uint64_t data:17;
49 } cn30xx;
50 struct cvmx_dbg_data_cn30xx cn31xx;
51 struct cvmx_dbg_data_cn38xx {
52 uint64_t reserved_29_63:35;
53 uint64_t d_mul:4;
54 uint64_t dclk_mul2:1;
55 uint64_t cclk_div2:1;
56 uint64_t c_mul:5;
57 uint64_t dsel_ext:1;
58 uint64_t data:17;
59 } cn38xx;
60 struct cvmx_dbg_data_cn38xx cn38xxp2;
61 struct cvmx_dbg_data_cn30xx cn50xx;
62 struct cvmx_dbg_data_cn58xx {
63 uint64_t reserved_29_63:35;
64 uint64_t rem:6;
65 uint64_t c_mul:5;
66 uint64_t dsel_ext:1;
67 uint64_t data:17;
68 } cn58xx;
69 struct cvmx_dbg_data_cn58xx cn58xxp1;
70};
71
72#endif
diff --git a/drivers/staging/octeon/cvmx-fau.h b/drivers/staging/octeon/cvmx-fau.h
new file mode 100644
index 000000000000..29bdce66cdf8
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-fau.h
@@ -0,0 +1,597 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Interface to the hardware Fetch and Add Unit.
30 */
31
32#ifndef __CVMX_FAU_H__
33#define __CVMX_FAU_H__
34
35/*
36 * Octeon Fetch and Add Unit (FAU)
37 */
38
39#define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
40#define CVMX_FAU_BITS_SCRADDR 63, 56
41#define CVMX_FAU_BITS_LEN 55, 48
42#define CVMX_FAU_BITS_INEVAL 35, 14
43#define CVMX_FAU_BITS_TAGWAIT 13, 13
44#define CVMX_FAU_BITS_NOADD 13, 13
45#define CVMX_FAU_BITS_SIZE 12, 11
46#define CVMX_FAU_BITS_REGISTER 10, 0
47
48typedef enum {
49 CVMX_FAU_OP_SIZE_8 = 0,
50 CVMX_FAU_OP_SIZE_16 = 1,
51 CVMX_FAU_OP_SIZE_32 = 2,
52 CVMX_FAU_OP_SIZE_64 = 3
53} cvmx_fau_op_size_t;
54
55/**
56 * Tagwait return definition. If a timeout occurs, the error
57 * bit will be set. Otherwise the value of the register before
58 * the update will be returned.
59 */
60typedef struct {
61 uint64_t error:1;
62 int64_t value:63;
63} cvmx_fau_tagwait64_t;
64
65/**
66 * Tagwait return definition. If a timeout occurs, the error
67 * bit will be set. Otherwise the value of the register before
68 * the update will be returned.
69 */
70typedef struct {
71 uint64_t error:1;
72 int32_t value:31;
73} cvmx_fau_tagwait32_t;
74
75/**
76 * Tagwait return definition. If a timeout occurs, the error
77 * bit will be set. Otherwise the value of the register before
78 * the update will be returned.
79 */
80typedef struct {
81 uint64_t error:1;
82 int16_t value:15;
83} cvmx_fau_tagwait16_t;
84
85/**
86 * Tagwait return definition. If a timeout occurs, the error
87 * bit will be set. Otherwise the value of the register before
88 * the update will be returned.
89 */
90typedef struct {
91 uint64_t error:1;
92 int8_t value:7;
93} cvmx_fau_tagwait8_t;
94
95/**
96 * Asynchronous tagwait return definition. If a timeout occurs,
97 * the error bit will be set. Otherwise the value of the
98 * register before the update will be returned.
99 */
100typedef union {
101 uint64_t u64;
102 struct {
103 uint64_t invalid:1;
104 uint64_t data:63; /* unpredictable if invalid is set */
105 } s;
106} cvmx_fau_async_tagwait_result_t;
107
108/**
109 * Builds a store I/O address for writing to the FAU
110 *
111 * @noadd: 0 = Store value is atomically added to the current value
112 * 1 = Store value is atomically written over the current value
113 * @reg: FAU atomic register to access. 0 <= reg < 2048.
114 * - Step by 2 for 16 bit access.
115 * - Step by 4 for 32 bit access.
116 * - Step by 8 for 64 bit access.
117 * Returns Address to store for atomic update
118 */
119static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
120{
121 return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
122 cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
123 cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
124}
125
126/**
127 * Builds a I/O address for accessing the FAU
128 *
129 * @tagwait: Should the atomic add wait for the current tag switch
130 * operation to complete.
131 * - 0 = Don't wait
132 * - 1 = Wait for tag switch to complete
133 * @reg: FAU atomic register to access. 0 <= reg < 2048.
134 * - Step by 2 for 16 bit access.
135 * - Step by 4 for 32 bit access.
136 * - Step by 8 for 64 bit access.
137 * @value: Signed value to add.
138 * Note: When performing 32 and 64 bit access, only the low
139 * 22 bits are available.
140 * Returns Address to read from for atomic update
141 */
142static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
143 int64_t value)
144{
145 return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
146 cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
147 cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
148 cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
149}
150
151/**
152 * Perform an atomic 64 bit add
153 *
154 * @reg: FAU atomic register to access. 0 <= reg < 2048.
155 * - Step by 8 for 64 bit access.
156 * @value: Signed value to add.
157 * Note: Only the low 22 bits are available.
158 * Returns Value of the register before the update
159 */
160static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
161 int64_t value)
162{
163 return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value));
164}
165
166/**
167 * Perform an atomic 32 bit add
168 *
169 * @reg: FAU atomic register to access. 0 <= reg < 2048.
170 * - Step by 4 for 32 bit access.
171 * @value: Signed value to add.
172 * Note: Only the low 22 bits are available.
173 * Returns Value of the register before the update
174 */
175static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
176 int32_t value)
177{
178 return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
179}
180
181/**
182 * Perform an atomic 16 bit add
183 *
184 * @reg: FAU atomic register to access. 0 <= reg < 2048.
185 * - Step by 2 for 16 bit access.
186 * @value: Signed value to add.
187 * Returns Value of the register before the update
188 */
189static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg,
190 int16_t value)
191{
192 return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
193}
194
195/**
196 * Perform an atomic 8 bit add
197 *
198 * @reg: FAU atomic register to access. 0 <= reg < 2048.
199 * @value: Signed value to add.
200 * Returns Value of the register before the update
201 */
202static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
203{
204 return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
205}
206
207/**
208 * Perform an atomic 64 bit add after the current tag switch
209 * completes
210 *
211 * @reg: FAU atomic register to access. 0 <= reg < 2048.
212 * - Step by 8 for 64 bit access.
213 * @value: Signed value to add.
214 * Note: Only the low 22 bits are available.
215 * Returns If a timeout occurs, the error bit will be set. Otherwise
216 * the value of the register before the update will be
217 * returned
218 */
219static inline cvmx_fau_tagwait64_t
220cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
221{
222 union {
223 uint64_t i64;
224 cvmx_fau_tagwait64_t t;
225 } result;
226 result.i64 =
227 cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value));
228 return result.t;
229}
230
231/**
232 * Perform an atomic 32 bit add after the current tag switch
233 * completes
234 *
235 * @reg: FAU atomic register to access. 0 <= reg < 2048.
236 * - Step by 4 for 32 bit access.
237 * @value: Signed value to add.
238 * Note: Only the low 22 bits are available.
239 * Returns If a timeout occurs, the error bit will be set. Otherwise
240 * the value of the register before the update will be
241 * returned
242 */
243static inline cvmx_fau_tagwait32_t
244cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
245{
246 union {
247 uint64_t i32;
248 cvmx_fau_tagwait32_t t;
249 } result;
250 result.i32 =
251 cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
252 return result.t;
253}
254
255/**
256 * Perform an atomic 16 bit add after the current tag switch
257 * completes
258 *
259 * @reg: FAU atomic register to access. 0 <= reg < 2048.
260 * - Step by 2 for 16 bit access.
261 * @value: Signed value to add.
262 * Returns If a timeout occurs, the error bit will be set. Otherwise
263 * the value of the register before the update will be
264 * returned
265 */
266static inline cvmx_fau_tagwait16_t
267cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
268{
269 union {
270 uint64_t i16;
271 cvmx_fau_tagwait16_t t;
272 } result;
273 result.i16 =
274 cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
275 return result.t;
276}
277
278/**
279 * Perform an atomic 8 bit add after the current tag switch
280 * completes
281 *
282 * @reg: FAU atomic register to access. 0 <= reg < 2048.
283 * @value: Signed value to add.
284 * Returns If a timeout occurs, the error bit will be set. Otherwise
285 * the value of the register before the update will be
286 * returned
287 */
288static inline cvmx_fau_tagwait8_t
289cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
290{
291 union {
292 uint64_t i8;
293 cvmx_fau_tagwait8_t t;
294 } result;
295 result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
296 return result.t;
297}
298
299/**
300 * Builds I/O data for async operations
301 *
302 * @scraddr: Scratch pad byte addres to write to. Must be 8 byte aligned
303 * @value: Signed value to add.
304 * Note: When performing 32 and 64 bit access, only the low
305 * 22 bits are available.
306 * @tagwait: Should the atomic add wait for the current tag switch
307 * operation to complete.
308 * - 0 = Don't wait
309 * - 1 = Wait for tag switch to complete
310 * @size: The size of the operation:
311 * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
312 * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
313 * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
314 * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
315 * @reg: FAU atomic register to access. 0 <= reg < 2048.
316 * - Step by 2 for 16 bit access.
317 * - Step by 4 for 32 bit access.
318 * - Step by 8 for 64 bit access.
319 * Returns Data to write using cvmx_send_single
320 */
321static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
322 uint64_t tagwait,
323 cvmx_fau_op_size_t size,
324 uint64_t reg)
325{
326 return CVMX_FAU_LOAD_IO_ADDRESS |
327 cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr >> 3) |
328 cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
329 cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
330 cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
331 cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
332 cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
333}
334
335/**
336 * Perform an async atomic 64 bit add. The old value is
337 * placed in the scratch memory at byte address scraddr.
338 *
339 * @scraddr: Scratch memory byte address to put response in.
340 * Must be 8 byte aligned.
341 * @reg: FAU atomic register to access. 0 <= reg < 2048.
342 * - Step by 8 for 64 bit access.
343 * @value: Signed value to add.
344 * Note: Only the low 22 bits are available.
345 * Returns Placed in the scratch pad register
346 */
347static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
348 cvmx_fau_reg_64_t reg,
349 int64_t value)
350{
351 cvmx_send_single(__cvmx_fau_iobdma_data
352 (scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
353}
354
355/**
356 * Perform an async atomic 32 bit add. The old value is
357 * placed in the scratch memory at byte address scraddr.
358 *
359 * @scraddr: Scratch memory byte address to put response in.
360 * Must be 8 byte aligned.
361 * @reg: FAU atomic register to access. 0 <= reg < 2048.
362 * - Step by 4 for 32 bit access.
363 * @value: Signed value to add.
364 * Note: Only the low 22 bits are available.
365 * Returns Placed in the scratch pad register
366 */
367static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
368 cvmx_fau_reg_32_t reg,
369 int32_t value)
370{
371 cvmx_send_single(__cvmx_fau_iobdma_data
372 (scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
373}
374
375/**
376 * Perform an async atomic 16 bit add. The old value is
377 * placed in the scratch memory at byte address scraddr.
378 *
379 * @scraddr: Scratch memory byte address to put response in.
380 * Must be 8 byte aligned.
381 * @reg: FAU atomic register to access. 0 <= reg < 2048.
382 * - Step by 2 for 16 bit access.
383 * @value: Signed value to add.
384 * Returns Placed in the scratch pad register
385 */
386static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr,
387 cvmx_fau_reg_16_t reg,
388 int16_t value)
389{
390 cvmx_send_single(__cvmx_fau_iobdma_data
391 (scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
392}
393
394/**
395 * Perform an async atomic 8 bit add. The old value is
396 * placed in the scratch memory at byte address scraddr.
397 *
398 * @scraddr: Scratch memory byte address to put response in.
399 * Must be 8 byte aligned.
400 * @reg: FAU atomic register to access. 0 <= reg < 2048.
401 * @value: Signed value to add.
402 * Returns Placed in the scratch pad register
403 */
404static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr,
405 cvmx_fau_reg_8_t reg,
406 int8_t value)
407{
408 cvmx_send_single(__cvmx_fau_iobdma_data
409 (scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
410}
411
412/**
413 * Perform an async atomic 64 bit add after the current tag
414 * switch completes.
415 *
416 * @scraddr: Scratch memory byte address to put response in. Must be
417 * 8 byte aligned. If a timeout occurs, the error bit (63)
418 * will be set. Otherwise the value of the register before
419 * the update will be returned
420 *
421 * @reg: FAU atomic register to access. 0 <= reg < 2048.
422 * - Step by 8 for 64 bit access.
423 * @value: Signed value to add.
424 * Note: Only the low 22 bits are available.
425 * Returns Placed in the scratch pad register
426 */
427static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
428 cvmx_fau_reg_64_t reg,
429 int64_t value)
430{
431 cvmx_send_single(__cvmx_fau_iobdma_data
432 (scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
433}
434
435/**
436 * Perform an async atomic 32 bit add after the current tag
437 * switch completes.
438 *
439 * @scraddr: Scratch memory byte address to put response in. Must be
440 * 8 byte aligned. If a timeout occurs, the error bit (63)
441 * will be set. Otherwise the value of the register before
442 * the update will be returned
443 *
444 * @reg: FAU atomic register to access. 0 <= reg < 2048.
445 * - Step by 4 for 32 bit access.
446 * @value: Signed value to add.
447 * Note: Only the low 22 bits are available.
448 * Returns Placed in the scratch pad register
449 */
450static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
451 cvmx_fau_reg_32_t reg,
452 int32_t value)
453{
454 cvmx_send_single(__cvmx_fau_iobdma_data
455 (scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
456}
457
458/**
459 * Perform an async atomic 16 bit add after the current tag
460 * switch completes.
461 *
462 * @scraddr: Scratch memory byte address to put response in. Must be
463 * 8 byte aligned. If a timeout occurs, the error bit (63)
464 * will be set. Otherwise the value of the register before
465 * the update will be returned
466 *
467 * @reg: FAU atomic register to access. 0 <= reg < 2048.
468 * - Step by 2 for 16 bit access.
469 * @value: Signed value to add.
470 *
471 * Returns Placed in the scratch pad register
472 */
473static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,
474 cvmx_fau_reg_16_t reg,
475 int16_t value)
476{
477 cvmx_send_single(__cvmx_fau_iobdma_data
478 (scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
479}
480
481/**
482 * Perform an async atomic 8 bit add after the current tag
483 * switch completes.
484 *
485 * @scraddr: Scratch memory byte address to put response in. Must be
486 * 8 byte aligned. If a timeout occurs, the error bit (63)
487 * will be set. Otherwise the value of the register before
488 * the update will be returned
489 *
490 * @reg: FAU atomic register to access. 0 <= reg < 2048.
491 * @value: Signed value to add.
492 *
493 * Returns Placed in the scratch pad register
494 */
495static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,
496 cvmx_fau_reg_8_t reg,
497 int8_t value)
498{
499 cvmx_send_single(__cvmx_fau_iobdma_data
500 (scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
501}
502
503/**
504 * Perform an atomic 64 bit add
505 *
506 * @reg: FAU atomic register to access. 0 <= reg < 2048.
507 * - Step by 8 for 64 bit access.
508 * @value: Signed value to add.
509 */
510static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
511{
512 cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value);
513}
514
515/**
516 * Perform an atomic 32 bit add
517 *
518 * @reg: FAU atomic register to access. 0 <= reg < 2048.
519 * - Step by 4 for 32 bit access.
520 * @value: Signed value to add.
521 */
522static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
523{
524 cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
525}
526
527/**
528 * Perform an atomic 16 bit add
529 *
530 * @reg: FAU atomic register to access. 0 <= reg < 2048.
531 * - Step by 2 for 16 bit access.
532 * @value: Signed value to add.
533 */
534static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
535{
536 cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
537}
538
539/**
540 * Perform an atomic 8 bit add
541 *
542 * @reg: FAU atomic register to access. 0 <= reg < 2048.
543 * @value: Signed value to add.
544 */
545static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
546{
547 cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
548}
549
550/**
551 * Perform an atomic 64 bit write
552 *
553 * @reg: FAU atomic register to access. 0 <= reg < 2048.
554 * - Step by 8 for 64 bit access.
555 * @value: Signed value to write.
556 */
557static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
558{
559 cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value);
560}
561
562/**
563 * Perform an atomic 32 bit write
564 *
565 * @reg: FAU atomic register to access. 0 <= reg < 2048.
566 * - Step by 4 for 32 bit access.
567 * @value: Signed value to write.
568 */
569static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
570{
571 cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
572}
573
574/**
575 * Perform an atomic 16 bit write
576 *
577 * @reg: FAU atomic register to access. 0 <= reg < 2048.
578 * - Step by 2 for 16 bit access.
579 * @value: Signed value to write.
580 */
581static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
582{
583 cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
584}
585
586/**
587 * Perform an atomic 8 bit write
588 *
589 * @reg: FAU atomic register to access. 0 <= reg < 2048.
590 * @value: Signed value to write.
591 */
592static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
593{
594 cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
595}
596
597#endif /* __CVMX_FAU_H__ */
diff --git a/drivers/staging/octeon/cvmx-fpa-defs.h b/drivers/staging/octeon/cvmx-fpa-defs.h
new file mode 100644
index 000000000000..bf5546b90110
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-fpa-defs.h
@@ -0,0 +1,403 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28#ifndef __CVMX_FPA_DEFS_H__
29#define __CVMX_FPA_DEFS_H__
30
31#define CVMX_FPA_BIST_STATUS \
32 CVMX_ADD_IO_SEG(0x00011800280000E8ull)
33#define CVMX_FPA_CTL_STATUS \
34 CVMX_ADD_IO_SEG(0x0001180028000050ull)
35#define CVMX_FPA_FPF0_MARKS \
36 CVMX_ADD_IO_SEG(0x0001180028000000ull)
37#define CVMX_FPA_FPF0_SIZE \
38 CVMX_ADD_IO_SEG(0x0001180028000058ull)
39#define CVMX_FPA_FPF1_MARKS \
40 CVMX_ADD_IO_SEG(0x0001180028000008ull)
41#define CVMX_FPA_FPF2_MARKS \
42 CVMX_ADD_IO_SEG(0x0001180028000010ull)
43#define CVMX_FPA_FPF3_MARKS \
44 CVMX_ADD_IO_SEG(0x0001180028000018ull)
45#define CVMX_FPA_FPF4_MARKS \
46 CVMX_ADD_IO_SEG(0x0001180028000020ull)
47#define CVMX_FPA_FPF5_MARKS \
48 CVMX_ADD_IO_SEG(0x0001180028000028ull)
49#define CVMX_FPA_FPF6_MARKS \
50 CVMX_ADD_IO_SEG(0x0001180028000030ull)
51#define CVMX_FPA_FPF7_MARKS \
52 CVMX_ADD_IO_SEG(0x0001180028000038ull)
53#define CVMX_FPA_FPFX_MARKS(offset) \
54 CVMX_ADD_IO_SEG(0x0001180028000008ull + (((offset) & 7) * 8) - 8 * 1)
55#define CVMX_FPA_FPFX_SIZE(offset) \
56 CVMX_ADD_IO_SEG(0x0001180028000060ull + (((offset) & 7) * 8) - 8 * 1)
57#define CVMX_FPA_INT_ENB \
58 CVMX_ADD_IO_SEG(0x0001180028000048ull)
59#define CVMX_FPA_INT_SUM \
60 CVMX_ADD_IO_SEG(0x0001180028000040ull)
61#define CVMX_FPA_QUE0_PAGE_INDEX \
62 CVMX_ADD_IO_SEG(0x00011800280000F0ull)
63#define CVMX_FPA_QUE1_PAGE_INDEX \
64 CVMX_ADD_IO_SEG(0x00011800280000F8ull)
65#define CVMX_FPA_QUE2_PAGE_INDEX \
66 CVMX_ADD_IO_SEG(0x0001180028000100ull)
67#define CVMX_FPA_QUE3_PAGE_INDEX \
68 CVMX_ADD_IO_SEG(0x0001180028000108ull)
69#define CVMX_FPA_QUE4_PAGE_INDEX \
70 CVMX_ADD_IO_SEG(0x0001180028000110ull)
71#define CVMX_FPA_QUE5_PAGE_INDEX \
72 CVMX_ADD_IO_SEG(0x0001180028000118ull)
73#define CVMX_FPA_QUE6_PAGE_INDEX \
74 CVMX_ADD_IO_SEG(0x0001180028000120ull)
75#define CVMX_FPA_QUE7_PAGE_INDEX \
76 CVMX_ADD_IO_SEG(0x0001180028000128ull)
77#define CVMX_FPA_QUEX_AVAILABLE(offset) \
78 CVMX_ADD_IO_SEG(0x0001180028000098ull + (((offset) & 7) * 8))
79#define CVMX_FPA_QUEX_PAGE_INDEX(offset) \
80 CVMX_ADD_IO_SEG(0x00011800280000F0ull + (((offset) & 7) * 8))
81#define CVMX_FPA_QUE_ACT \
82 CVMX_ADD_IO_SEG(0x0001180028000138ull)
83#define CVMX_FPA_QUE_EXP \
84 CVMX_ADD_IO_SEG(0x0001180028000130ull)
85#define CVMX_FPA_WART_CTL \
86 CVMX_ADD_IO_SEG(0x00011800280000D8ull)
87#define CVMX_FPA_WART_STATUS \
88 CVMX_ADD_IO_SEG(0x00011800280000E0ull)
89
90union cvmx_fpa_bist_status {
91 uint64_t u64;
92 struct cvmx_fpa_bist_status_s {
93 uint64_t reserved_5_63:59;
94 uint64_t frd:1;
95 uint64_t fpf0:1;
96 uint64_t fpf1:1;
97 uint64_t ffr:1;
98 uint64_t fdr:1;
99 } s;
100 struct cvmx_fpa_bist_status_s cn30xx;
101 struct cvmx_fpa_bist_status_s cn31xx;
102 struct cvmx_fpa_bist_status_s cn38xx;
103 struct cvmx_fpa_bist_status_s cn38xxp2;
104 struct cvmx_fpa_bist_status_s cn50xx;
105 struct cvmx_fpa_bist_status_s cn52xx;
106 struct cvmx_fpa_bist_status_s cn52xxp1;
107 struct cvmx_fpa_bist_status_s cn56xx;
108 struct cvmx_fpa_bist_status_s cn56xxp1;
109 struct cvmx_fpa_bist_status_s cn58xx;
110 struct cvmx_fpa_bist_status_s cn58xxp1;
111};
112
113union cvmx_fpa_ctl_status {
114 uint64_t u64;
115 struct cvmx_fpa_ctl_status_s {
116 uint64_t reserved_18_63:46;
117 uint64_t reset:1;
118 uint64_t use_ldt:1;
119 uint64_t use_stt:1;
120 uint64_t enb:1;
121 uint64_t mem1_err:7;
122 uint64_t mem0_err:7;
123 } s;
124 struct cvmx_fpa_ctl_status_s cn30xx;
125 struct cvmx_fpa_ctl_status_s cn31xx;
126 struct cvmx_fpa_ctl_status_s cn38xx;
127 struct cvmx_fpa_ctl_status_s cn38xxp2;
128 struct cvmx_fpa_ctl_status_s cn50xx;
129 struct cvmx_fpa_ctl_status_s cn52xx;
130 struct cvmx_fpa_ctl_status_s cn52xxp1;
131 struct cvmx_fpa_ctl_status_s cn56xx;
132 struct cvmx_fpa_ctl_status_s cn56xxp1;
133 struct cvmx_fpa_ctl_status_s cn58xx;
134 struct cvmx_fpa_ctl_status_s cn58xxp1;
135};
136
137union cvmx_fpa_fpfx_marks {
138 uint64_t u64;
139 struct cvmx_fpa_fpfx_marks_s {
140 uint64_t reserved_22_63:42;
141 uint64_t fpf_wr:11;
142 uint64_t fpf_rd:11;
143 } s;
144 struct cvmx_fpa_fpfx_marks_s cn38xx;
145 struct cvmx_fpa_fpfx_marks_s cn38xxp2;
146 struct cvmx_fpa_fpfx_marks_s cn56xx;
147 struct cvmx_fpa_fpfx_marks_s cn56xxp1;
148 struct cvmx_fpa_fpfx_marks_s cn58xx;
149 struct cvmx_fpa_fpfx_marks_s cn58xxp1;
150};
151
152union cvmx_fpa_fpfx_size {
153 uint64_t u64;
154 struct cvmx_fpa_fpfx_size_s {
155 uint64_t reserved_11_63:53;
156 uint64_t fpf_siz:11;
157 } s;
158 struct cvmx_fpa_fpfx_size_s cn38xx;
159 struct cvmx_fpa_fpfx_size_s cn38xxp2;
160 struct cvmx_fpa_fpfx_size_s cn56xx;
161 struct cvmx_fpa_fpfx_size_s cn56xxp1;
162 struct cvmx_fpa_fpfx_size_s cn58xx;
163 struct cvmx_fpa_fpfx_size_s cn58xxp1;
164};
165
166union cvmx_fpa_fpf0_marks {
167 uint64_t u64;
168 struct cvmx_fpa_fpf0_marks_s {
169 uint64_t reserved_24_63:40;
170 uint64_t fpf_wr:12;
171 uint64_t fpf_rd:12;
172 } s;
173 struct cvmx_fpa_fpf0_marks_s cn38xx;
174 struct cvmx_fpa_fpf0_marks_s cn38xxp2;
175 struct cvmx_fpa_fpf0_marks_s cn56xx;
176 struct cvmx_fpa_fpf0_marks_s cn56xxp1;
177 struct cvmx_fpa_fpf0_marks_s cn58xx;
178 struct cvmx_fpa_fpf0_marks_s cn58xxp1;
179};
180
181union cvmx_fpa_fpf0_size {
182 uint64_t u64;
183 struct cvmx_fpa_fpf0_size_s {
184 uint64_t reserved_12_63:52;
185 uint64_t fpf_siz:12;
186 } s;
187 struct cvmx_fpa_fpf0_size_s cn38xx;
188 struct cvmx_fpa_fpf0_size_s cn38xxp2;
189 struct cvmx_fpa_fpf0_size_s cn56xx;
190 struct cvmx_fpa_fpf0_size_s cn56xxp1;
191 struct cvmx_fpa_fpf0_size_s cn58xx;
192 struct cvmx_fpa_fpf0_size_s cn58xxp1;
193};
194
195union cvmx_fpa_int_enb {
196 uint64_t u64;
197 struct cvmx_fpa_int_enb_s {
198 uint64_t reserved_28_63:36;
199 uint64_t q7_perr:1;
200 uint64_t q7_coff:1;
201 uint64_t q7_und:1;
202 uint64_t q6_perr:1;
203 uint64_t q6_coff:1;
204 uint64_t q6_und:1;
205 uint64_t q5_perr:1;
206 uint64_t q5_coff:1;
207 uint64_t q5_und:1;
208 uint64_t q4_perr:1;
209 uint64_t q4_coff:1;
210 uint64_t q4_und:1;
211 uint64_t q3_perr:1;
212 uint64_t q3_coff:1;
213 uint64_t q3_und:1;
214 uint64_t q2_perr:1;
215 uint64_t q2_coff:1;
216 uint64_t q2_und:1;
217 uint64_t q1_perr:1;
218 uint64_t q1_coff:1;
219 uint64_t q1_und:1;
220 uint64_t q0_perr:1;
221 uint64_t q0_coff:1;
222 uint64_t q0_und:1;
223 uint64_t fed1_dbe:1;
224 uint64_t fed1_sbe:1;
225 uint64_t fed0_dbe:1;
226 uint64_t fed0_sbe:1;
227 } s;
228 struct cvmx_fpa_int_enb_s cn30xx;
229 struct cvmx_fpa_int_enb_s cn31xx;
230 struct cvmx_fpa_int_enb_s cn38xx;
231 struct cvmx_fpa_int_enb_s cn38xxp2;
232 struct cvmx_fpa_int_enb_s cn50xx;
233 struct cvmx_fpa_int_enb_s cn52xx;
234 struct cvmx_fpa_int_enb_s cn52xxp1;
235 struct cvmx_fpa_int_enb_s cn56xx;
236 struct cvmx_fpa_int_enb_s cn56xxp1;
237 struct cvmx_fpa_int_enb_s cn58xx;
238 struct cvmx_fpa_int_enb_s cn58xxp1;
239};
240
241union cvmx_fpa_int_sum {
242 uint64_t u64;
243 struct cvmx_fpa_int_sum_s {
244 uint64_t reserved_28_63:36;
245 uint64_t q7_perr:1;
246 uint64_t q7_coff:1;
247 uint64_t q7_und:1;
248 uint64_t q6_perr:1;
249 uint64_t q6_coff:1;
250 uint64_t q6_und:1;
251 uint64_t q5_perr:1;
252 uint64_t q5_coff:1;
253 uint64_t q5_und:1;
254 uint64_t q4_perr:1;
255 uint64_t q4_coff:1;
256 uint64_t q4_und:1;
257 uint64_t q3_perr:1;
258 uint64_t q3_coff:1;
259 uint64_t q3_und:1;
260 uint64_t q2_perr:1;
261 uint64_t q2_coff:1;
262 uint64_t q2_und:1;
263 uint64_t q1_perr:1;
264 uint64_t q1_coff:1;
265 uint64_t q1_und:1;
266 uint64_t q0_perr:1;
267 uint64_t q0_coff:1;
268 uint64_t q0_und:1;
269 uint64_t fed1_dbe:1;
270 uint64_t fed1_sbe:1;
271 uint64_t fed0_dbe:1;
272 uint64_t fed0_sbe:1;
273 } s;
274 struct cvmx_fpa_int_sum_s cn30xx;
275 struct cvmx_fpa_int_sum_s cn31xx;
276 struct cvmx_fpa_int_sum_s cn38xx;
277 struct cvmx_fpa_int_sum_s cn38xxp2;
278 struct cvmx_fpa_int_sum_s cn50xx;
279 struct cvmx_fpa_int_sum_s cn52xx;
280 struct cvmx_fpa_int_sum_s cn52xxp1;
281 struct cvmx_fpa_int_sum_s cn56xx;
282 struct cvmx_fpa_int_sum_s cn56xxp1;
283 struct cvmx_fpa_int_sum_s cn58xx;
284 struct cvmx_fpa_int_sum_s cn58xxp1;
285};
286
287union cvmx_fpa_quex_available {
288 uint64_t u64;
289 struct cvmx_fpa_quex_available_s {
290 uint64_t reserved_29_63:35;
291 uint64_t que_siz:29;
292 } s;
293 struct cvmx_fpa_quex_available_s cn30xx;
294 struct cvmx_fpa_quex_available_s cn31xx;
295 struct cvmx_fpa_quex_available_s cn38xx;
296 struct cvmx_fpa_quex_available_s cn38xxp2;
297 struct cvmx_fpa_quex_available_s cn50xx;
298 struct cvmx_fpa_quex_available_s cn52xx;
299 struct cvmx_fpa_quex_available_s cn52xxp1;
300 struct cvmx_fpa_quex_available_s cn56xx;
301 struct cvmx_fpa_quex_available_s cn56xxp1;
302 struct cvmx_fpa_quex_available_s cn58xx;
303 struct cvmx_fpa_quex_available_s cn58xxp1;
304};
305
306union cvmx_fpa_quex_page_index {
307 uint64_t u64;
308 struct cvmx_fpa_quex_page_index_s {
309 uint64_t reserved_25_63:39;
310 uint64_t pg_num:25;
311 } s;
312 struct cvmx_fpa_quex_page_index_s cn30xx;
313 struct cvmx_fpa_quex_page_index_s cn31xx;
314 struct cvmx_fpa_quex_page_index_s cn38xx;
315 struct cvmx_fpa_quex_page_index_s cn38xxp2;
316 struct cvmx_fpa_quex_page_index_s cn50xx;
317 struct cvmx_fpa_quex_page_index_s cn52xx;
318 struct cvmx_fpa_quex_page_index_s cn52xxp1;
319 struct cvmx_fpa_quex_page_index_s cn56xx;
320 struct cvmx_fpa_quex_page_index_s cn56xxp1;
321 struct cvmx_fpa_quex_page_index_s cn58xx;
322 struct cvmx_fpa_quex_page_index_s cn58xxp1;
323};
324
325union cvmx_fpa_que_act {
326 uint64_t u64;
327 struct cvmx_fpa_que_act_s {
328 uint64_t reserved_29_63:35;
329 uint64_t act_que:3;
330 uint64_t act_indx:26;
331 } s;
332 struct cvmx_fpa_que_act_s cn30xx;
333 struct cvmx_fpa_que_act_s cn31xx;
334 struct cvmx_fpa_que_act_s cn38xx;
335 struct cvmx_fpa_que_act_s cn38xxp2;
336 struct cvmx_fpa_que_act_s cn50xx;
337 struct cvmx_fpa_que_act_s cn52xx;
338 struct cvmx_fpa_que_act_s cn52xxp1;
339 struct cvmx_fpa_que_act_s cn56xx;
340 struct cvmx_fpa_que_act_s cn56xxp1;
341 struct cvmx_fpa_que_act_s cn58xx;
342 struct cvmx_fpa_que_act_s cn58xxp1;
343};
344
345union cvmx_fpa_que_exp {
346 uint64_t u64;
347 struct cvmx_fpa_que_exp_s {
348 uint64_t reserved_29_63:35;
349 uint64_t exp_que:3;
350 uint64_t exp_indx:26;
351 } s;
352 struct cvmx_fpa_que_exp_s cn30xx;
353 struct cvmx_fpa_que_exp_s cn31xx;
354 struct cvmx_fpa_que_exp_s cn38xx;
355 struct cvmx_fpa_que_exp_s cn38xxp2;
356 struct cvmx_fpa_que_exp_s cn50xx;
357 struct cvmx_fpa_que_exp_s cn52xx;
358 struct cvmx_fpa_que_exp_s cn52xxp1;
359 struct cvmx_fpa_que_exp_s cn56xx;
360 struct cvmx_fpa_que_exp_s cn56xxp1;
361 struct cvmx_fpa_que_exp_s cn58xx;
362 struct cvmx_fpa_que_exp_s cn58xxp1;
363};
364
365union cvmx_fpa_wart_ctl {
366 uint64_t u64;
367 struct cvmx_fpa_wart_ctl_s {
368 uint64_t reserved_16_63:48;
369 uint64_t ctl:16;
370 } s;
371 struct cvmx_fpa_wart_ctl_s cn30xx;
372 struct cvmx_fpa_wart_ctl_s cn31xx;
373 struct cvmx_fpa_wart_ctl_s cn38xx;
374 struct cvmx_fpa_wart_ctl_s cn38xxp2;
375 struct cvmx_fpa_wart_ctl_s cn50xx;
376 struct cvmx_fpa_wart_ctl_s cn52xx;
377 struct cvmx_fpa_wart_ctl_s cn52xxp1;
378 struct cvmx_fpa_wart_ctl_s cn56xx;
379 struct cvmx_fpa_wart_ctl_s cn56xxp1;
380 struct cvmx_fpa_wart_ctl_s cn58xx;
381 struct cvmx_fpa_wart_ctl_s cn58xxp1;
382};
383
384union cvmx_fpa_wart_status {
385 uint64_t u64;
386 struct cvmx_fpa_wart_status_s {
387 uint64_t reserved_32_63:32;
388 uint64_t status:32;
389 } s;
390 struct cvmx_fpa_wart_status_s cn30xx;
391 struct cvmx_fpa_wart_status_s cn31xx;
392 struct cvmx_fpa_wart_status_s cn38xx;
393 struct cvmx_fpa_wart_status_s cn38xxp2;
394 struct cvmx_fpa_wart_status_s cn50xx;
395 struct cvmx_fpa_wart_status_s cn52xx;
396 struct cvmx_fpa_wart_status_s cn52xxp1;
397 struct cvmx_fpa_wart_status_s cn56xx;
398 struct cvmx_fpa_wart_status_s cn56xxp1;
399 struct cvmx_fpa_wart_status_s cn58xx;
400 struct cvmx_fpa_wart_status_s cn58xxp1;
401};
402
403#endif
diff --git a/drivers/staging/octeon/cvmx-fpa.c b/drivers/staging/octeon/cvmx-fpa.c
new file mode 100644
index 000000000000..55d9147acc85
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-fpa.c
@@ -0,0 +1,183 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 * @file
30 *
31 * Support library for the hardware Free Pool Allocator.
32 *
33 *
34 */
35
36#include "cvmx-config.h"
37#include "cvmx.h"
38#include "cvmx-fpa.h"
39#include "cvmx-ipd.h"
40
41/**
42 * Current state of all the pools. Use access functions
43 * instead of using it directly.
44 */
45CVMX_SHARED cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
46
47/**
48 * Setup a FPA pool to control a new block of memory. The
49 * buffer pointer must be a physical address.
50 *
51 * @pool: Pool to initialize
52 * 0 <= pool < 8
53 * @name: Constant character string to name this pool.
54 * String is not copied.
55 * @buffer: Pointer to the block of memory to use. This must be
56 * accessable by all processors and external hardware.
57 * @block_size: Size for each block controlled by the FPA
58 * @num_blocks: Number of blocks
59 *
60 * Returns 0 on Success,
61 * -1 on failure
62 */
63int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
64 uint64_t block_size, uint64_t num_blocks)
65{
66 char *ptr;
67 if (!buffer) {
68 cvmx_dprintf
69 ("ERROR: cvmx_fpa_setup_pool: NULL buffer pointer!\n");
70 return -1;
71 }
72 if (pool >= CVMX_FPA_NUM_POOLS) {
73 cvmx_dprintf("ERROR: cvmx_fpa_setup_pool: Illegal pool!\n");
74 return -1;
75 }
76
77 if (block_size < CVMX_FPA_MIN_BLOCK_SIZE) {
78 cvmx_dprintf
79 ("ERROR: cvmx_fpa_setup_pool: Block size too small.\n");
80 return -1;
81 }
82
83 if (((unsigned long)buffer & (CVMX_FPA_ALIGNMENT - 1)) != 0) {
84 cvmx_dprintf
85 ("ERROR: cvmx_fpa_setup_pool: Buffer not aligned properly.\n");
86 return -1;
87 }
88
89 cvmx_fpa_pool_info[pool].name = name;
90 cvmx_fpa_pool_info[pool].size = block_size;
91 cvmx_fpa_pool_info[pool].starting_element_count = num_blocks;
92 cvmx_fpa_pool_info[pool].base = buffer;
93
94 ptr = (char *)buffer;
95 while (num_blocks--) {
96 cvmx_fpa_free(ptr, pool, 0);
97 ptr += block_size;
98 }
99 return 0;
100}
101
102/**
103 * Shutdown a Memory pool and validate that it had all of
104 * the buffers originally placed in it.
105 *
106 * @pool: Pool to shutdown
107 * Returns Zero on success
108 * - Positive is count of missing buffers
109 * - Negative is too many buffers or corrupted pointers
110 */
111uint64_t cvmx_fpa_shutdown_pool(uint64_t pool)
112{
113 uint64_t errors = 0;
114 uint64_t count = 0;
115 uint64_t base = cvmx_ptr_to_phys(cvmx_fpa_pool_info[pool].base);
116 uint64_t finish =
117 base +
118 cvmx_fpa_pool_info[pool].size *
119 cvmx_fpa_pool_info[pool].starting_element_count;
120 void *ptr;
121 uint64_t address;
122
123 count = 0;
124 do {
125 ptr = cvmx_fpa_alloc(pool);
126 if (ptr)
127 address = cvmx_ptr_to_phys(ptr);
128 else
129 address = 0;
130 if (address) {
131 if ((address >= base) && (address < finish) &&
132 (((address -
133 base) % cvmx_fpa_pool_info[pool].size) == 0)) {
134 count++;
135 } else {
136 cvmx_dprintf
137 ("ERROR: cvmx_fpa_shutdown_pool: Illegal address 0x%llx in pool %s(%d)\n",
138 (unsigned long long)address,
139 cvmx_fpa_pool_info[pool].name, (int)pool);
140 errors++;
141 }
142 }
143 } while (address);
144
145#ifdef CVMX_ENABLE_PKO_FUNCTIONS
146 if (pool == 0)
147 cvmx_ipd_free_ptr();
148#endif
149
150 if (errors) {
151 cvmx_dprintf
152 ("ERROR: cvmx_fpa_shutdown_pool: Pool %s(%d) started at 0x%llx, ended at 0x%llx, with a step of 0x%llx\n",
153 cvmx_fpa_pool_info[pool].name, (int)pool,
154 (unsigned long long)base, (unsigned long long)finish,
155 (unsigned long long)cvmx_fpa_pool_info[pool].size);
156 return -errors;
157 } else
158 return 0;
159}
160
161uint64_t cvmx_fpa_get_block_size(uint64_t pool)
162{
163 switch (pool) {
164 case 0:
165 return CVMX_FPA_POOL_0_SIZE;
166 case 1:
167 return CVMX_FPA_POOL_1_SIZE;
168 case 2:
169 return CVMX_FPA_POOL_2_SIZE;
170 case 3:
171 return CVMX_FPA_POOL_3_SIZE;
172 case 4:
173 return CVMX_FPA_POOL_4_SIZE;
174 case 5:
175 return CVMX_FPA_POOL_5_SIZE;
176 case 6:
177 return CVMX_FPA_POOL_6_SIZE;
178 case 7:
179 return CVMX_FPA_POOL_7_SIZE;
180 default:
181 return 0;
182 }
183}
diff --git a/drivers/staging/octeon/cvmx-fpa.h b/drivers/staging/octeon/cvmx-fpa.h
new file mode 100644
index 000000000000..1d7788fe09f2
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-fpa.h
@@ -0,0 +1,299 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 * @file
30 *
31 * Interface to the hardware Free Pool Allocator.
32 *
33 *
34 */
35
36#ifndef __CVMX_FPA_H__
37#define __CVMX_FPA_H__
38
39#include "cvmx-address.h"
40#include "cvmx-fpa-defs.h"
41
42#define CVMX_FPA_NUM_POOLS 8
43#define CVMX_FPA_MIN_BLOCK_SIZE 128
44#define CVMX_FPA_ALIGNMENT 128
45
46/**
47 * Structure describing the data format used for stores to the FPA.
48 */
49typedef union {
50 uint64_t u64;
51 struct {
52 /*
53 * the (64-bit word) location in scratchpad to write
54 * to (if len != 0)
55 */
56 uint64_t scraddr:8;
57 /* the number of words in the response (0 => no response) */
58 uint64_t len:8;
59 /* the ID of the device on the non-coherent bus */
60 uint64_t did:8;
61 /*
62 * the address that will appear in the first tick on
63 * the NCB bus.
64 */
65 uint64_t addr:40;
66 } s;
67} cvmx_fpa_iobdma_data_t;
68
69/**
70 * Structure describing the current state of a FPA pool.
71 */
72typedef struct {
73 /* Name it was created under */
74 const char *name;
75 /* Size of each block */
76 uint64_t size;
77 /* The base memory address of whole block */
78 void *base;
79 /* The number of elements in the pool at creation */
80 uint64_t starting_element_count;
81} cvmx_fpa_pool_info_t;
82
83/**
84 * Current state of all the pools. Use access functions
85 * instead of using it directly.
86 */
87extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
88
89/* CSR typedefs have been moved to cvmx-csr-*.h */
90
91/**
92 * Return the name of the pool
93 *
94 * @pool: Pool to get the name of
95 * Returns The name
96 */
97static inline const char *cvmx_fpa_get_name(uint64_t pool)
98{
99 return cvmx_fpa_pool_info[pool].name;
100}
101
102/**
103 * Return the base of the pool
104 *
105 * @pool: Pool to get the base of
106 * Returns The base
107 */
108static inline void *cvmx_fpa_get_base(uint64_t pool)
109{
110 return cvmx_fpa_pool_info[pool].base;
111}
112
113/**
114 * Check if a pointer belongs to an FPA pool. Return non-zero
115 * if the supplied pointer is inside the memory controlled by
116 * an FPA pool.
117 *
118 * @pool: Pool to check
119 * @ptr: Pointer to check
120 * Returns Non-zero if pointer is in the pool. Zero if not
121 */
122static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
123{
124 return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
125 ((char *)ptr <
126 ((char *)(cvmx_fpa_pool_info[pool].base)) +
127 cvmx_fpa_pool_info[pool].size *
128 cvmx_fpa_pool_info[pool].starting_element_count));
129}
130
131/**
132 * Enable the FPA for use. Must be performed after any CSR
133 * configuration but before any other FPA functions.
134 */
135static inline void cvmx_fpa_enable(void)
136{
137 union cvmx_fpa_ctl_status status;
138
139 status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
140 if (status.s.enb) {
141 cvmx_dprintf
142 ("Warning: Enabling FPA when FPA already enabled.\n");
143 }
144
145 /*
146 * Do runtime check as we allow pass1 compiled code to run on
147 * pass2 chips.
148 */
149 if (cvmx_octeon_is_pass1()) {
150 union cvmx_fpa_fpfx_marks marks;
151 int i;
152 for (i = 1; i < 8; i++) {
153 marks.u64 =
154 cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull);
155 marks.s.fpf_wr = 0xe0;
156 cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull,
157 marks.u64);
158 }
159
160 /* Enforce a 10 cycle delay between config and enable */
161 cvmx_wait(10);
162 }
163
164 /* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
165 status.u64 = 0;
166 status.s.enb = 1;
167 cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
168}
169
170/**
171 * Get a new block from the FPA
172 *
173 * @pool: Pool to get the block from
174 * Returns Pointer to the block or NULL on failure
175 */
176static inline void *cvmx_fpa_alloc(uint64_t pool)
177{
178 uint64_t address =
179 cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
180 if (address)
181 return cvmx_phys_to_ptr(address);
182 else
183 return NULL;
184}
185
186/**
187 * Asynchronously get a new block from the FPA
188 *
189 * @scr_addr: Local scratch address to put response in. This is a byte address,
190 * but must be 8 byte aligned.
191 * @pool: Pool to get the block from
192 */
193static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
194{
195 cvmx_fpa_iobdma_data_t data;
196
197 /*
198 * Hardware only uses 64 bit alligned locations, so convert
199 * from byte address to 64-bit index
200 */
201 data.s.scraddr = scr_addr >> 3;
202 data.s.len = 1;
203 data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
204 data.s.addr = 0;
205 cvmx_send_single(data.u64);
206}
207
208/**
209 * Free a block allocated with a FPA pool. Does NOT provide memory
210 * ordering in cases where the memory block was modified by the core.
211 *
212 * @ptr: Block to free
213 * @pool: Pool to put it in
214 * @num_cache_lines:
215 * Cache lines to invalidate
216 */
217static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
218 uint64_t num_cache_lines)
219{
220 cvmx_addr_t newptr;
221 newptr.u64 = cvmx_ptr_to_phys(ptr);
222 newptr.sfilldidspace.didspace =
223 CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
224 /* Prevent GCC from reordering around free */
225 barrier();
226 /* value written is number of cache lines not written back */
227 cvmx_write_io(newptr.u64, num_cache_lines);
228}
229
230/**
231 * Free a block allocated with a FPA pool. Provides required memory
232 * ordering in cases where memory block was modified by core.
233 *
234 * @ptr: Block to free
235 * @pool: Pool to put it in
236 * @num_cache_lines:
237 * Cache lines to invalidate
238 */
239static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
240 uint64_t num_cache_lines)
241{
242 cvmx_addr_t newptr;
243 newptr.u64 = cvmx_ptr_to_phys(ptr);
244 newptr.sfilldidspace.didspace =
245 CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
246 /*
247 * Make sure that any previous writes to memory go out before
248 * we free this buffer. This also serves as a barrier to
249 * prevent GCC from reordering operations to after the
250 * free.
251 */
252 CVMX_SYNCWS;
253 /* value written is number of cache lines not written back */
254 cvmx_write_io(newptr.u64, num_cache_lines);
255}
256
257/**
258 * Setup a FPA pool to control a new block of memory.
259 * This can only be called once per pool. Make sure proper
260 * locking enforces this.
261 *
262 * @pool: Pool to initialize
263 * 0 <= pool < 8
264 * @name: Constant character string to name this pool.
265 * String is not copied.
266 * @buffer: Pointer to the block of memory to use. This must be
267 * accessable by all processors and external hardware.
268 * @block_size: Size for each block controlled by the FPA
269 * @num_blocks: Number of blocks
270 *
271 * Returns 0 on Success,
272 * -1 on failure
273 */
274extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
275 uint64_t block_size, uint64_t num_blocks);
276
277/**
278 * Shutdown a Memory pool and validate that it had all of
279 * the buffers originally placed in it. This should only be
280 * called by one processor after all hardware has finished
281 * using the pool.
282 *
283 * @pool: Pool to shutdown
284 * Returns Zero on success
285 * - Positive is count of missing buffers
286 * - Negative is too many buffers or corrupted pointers
287 */
288extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
289
290/**
291 * Get the size of blocks controlled by the pool
292 * This is resolved to a constant at compile time.
293 *
294 * @pool: Pool to access
295 * Returns Size of the block in bytes
296 */
297uint64_t cvmx_fpa_get_block_size(uint64_t pool);
298
299#endif /* __CVM_FPA_H__ */
diff --git a/drivers/staging/octeon/cvmx-gmxx-defs.h b/drivers/staging/octeon/cvmx-gmxx-defs.h
new file mode 100644
index 000000000000..946a43a73fd7
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-gmxx-defs.h
@@ -0,0 +1,2529 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28#ifndef __CVMX_GMXX_DEFS_H__
29#define __CVMX_GMXX_DEFS_H__
30
31#define CVMX_GMXX_BAD_REG(block_id) \
32 CVMX_ADD_IO_SEG(0x0001180008000518ull + (((block_id) & 1) * 0x8000000ull))
33#define CVMX_GMXX_BIST(block_id) \
34 CVMX_ADD_IO_SEG(0x0001180008000400ull + (((block_id) & 1) * 0x8000000ull))
35#define CVMX_GMXX_CLK_EN(block_id) \
36 CVMX_ADD_IO_SEG(0x00011800080007F0ull + (((block_id) & 1) * 0x8000000ull))
37#define CVMX_GMXX_HG2_CONTROL(block_id) \
38 CVMX_ADD_IO_SEG(0x0001180008000550ull + (((block_id) & 1) * 0x8000000ull))
39#define CVMX_GMXX_INF_MODE(block_id) \
40 CVMX_ADD_IO_SEG(0x00011800080007F8ull + (((block_id) & 1) * 0x8000000ull))
41#define CVMX_GMXX_NXA_ADR(block_id) \
42 CVMX_ADD_IO_SEG(0x0001180008000510ull + (((block_id) & 1) * 0x8000000ull))
43#define CVMX_GMXX_PRTX_CBFC_CTL(offset, block_id) \
44 CVMX_ADD_IO_SEG(0x0001180008000580ull + (((offset) & 0) * 8) + (((block_id) & 1) * 0x8000000ull))
45#define CVMX_GMXX_PRTX_CFG(offset, block_id) \
46 CVMX_ADD_IO_SEG(0x0001180008000010ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
47#define CVMX_GMXX_RXX_ADR_CAM0(offset, block_id) \
48 CVMX_ADD_IO_SEG(0x0001180008000180ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
49#define CVMX_GMXX_RXX_ADR_CAM1(offset, block_id) \
50 CVMX_ADD_IO_SEG(0x0001180008000188ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
51#define CVMX_GMXX_RXX_ADR_CAM2(offset, block_id) \
52 CVMX_ADD_IO_SEG(0x0001180008000190ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
53#define CVMX_GMXX_RXX_ADR_CAM3(offset, block_id) \
54 CVMX_ADD_IO_SEG(0x0001180008000198ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
55#define CVMX_GMXX_RXX_ADR_CAM4(offset, block_id) \
56 CVMX_ADD_IO_SEG(0x00011800080001A0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
57#define CVMX_GMXX_RXX_ADR_CAM5(offset, block_id) \
58 CVMX_ADD_IO_SEG(0x00011800080001A8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
59#define CVMX_GMXX_RXX_ADR_CAM_EN(offset, block_id) \
60 CVMX_ADD_IO_SEG(0x0001180008000108ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
61#define CVMX_GMXX_RXX_ADR_CTL(offset, block_id) \
62 CVMX_ADD_IO_SEG(0x0001180008000100ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
63#define CVMX_GMXX_RXX_DECISION(offset, block_id) \
64 CVMX_ADD_IO_SEG(0x0001180008000040ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
65#define CVMX_GMXX_RXX_FRM_CHK(offset, block_id) \
66 CVMX_ADD_IO_SEG(0x0001180008000020ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
67#define CVMX_GMXX_RXX_FRM_CTL(offset, block_id) \
68 CVMX_ADD_IO_SEG(0x0001180008000018ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
69#define CVMX_GMXX_RXX_FRM_MAX(offset, block_id) \
70 CVMX_ADD_IO_SEG(0x0001180008000030ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
71#define CVMX_GMXX_RXX_FRM_MIN(offset, block_id) \
72 CVMX_ADD_IO_SEG(0x0001180008000028ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
73#define CVMX_GMXX_RXX_IFG(offset, block_id) \
74 CVMX_ADD_IO_SEG(0x0001180008000058ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
75#define CVMX_GMXX_RXX_INT_EN(offset, block_id) \
76 CVMX_ADD_IO_SEG(0x0001180008000008ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
77#define CVMX_GMXX_RXX_INT_REG(offset, block_id) \
78 CVMX_ADD_IO_SEG(0x0001180008000000ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
79#define CVMX_GMXX_RXX_JABBER(offset, block_id) \
80 CVMX_ADD_IO_SEG(0x0001180008000038ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
81#define CVMX_GMXX_RXX_PAUSE_DROP_TIME(offset, block_id) \
82 CVMX_ADD_IO_SEG(0x0001180008000068ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
83#define CVMX_GMXX_RXX_RX_INBND(offset, block_id) \
84 CVMX_ADD_IO_SEG(0x0001180008000060ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
85#define CVMX_GMXX_RXX_STATS_CTL(offset, block_id) \
86 CVMX_ADD_IO_SEG(0x0001180008000050ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
87#define CVMX_GMXX_RXX_STATS_OCTS(offset, block_id) \
88 CVMX_ADD_IO_SEG(0x0001180008000088ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
89#define CVMX_GMXX_RXX_STATS_OCTS_CTL(offset, block_id) \
90 CVMX_ADD_IO_SEG(0x0001180008000098ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
91#define CVMX_GMXX_RXX_STATS_OCTS_DMAC(offset, block_id) \
92 CVMX_ADD_IO_SEG(0x00011800080000A8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
93#define CVMX_GMXX_RXX_STATS_OCTS_DRP(offset, block_id) \
94 CVMX_ADD_IO_SEG(0x00011800080000B8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
95#define CVMX_GMXX_RXX_STATS_PKTS(offset, block_id) \
96 CVMX_ADD_IO_SEG(0x0001180008000080ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
97#define CVMX_GMXX_RXX_STATS_PKTS_BAD(offset, block_id) \
98 CVMX_ADD_IO_SEG(0x00011800080000C0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
99#define CVMX_GMXX_RXX_STATS_PKTS_CTL(offset, block_id) \
100 CVMX_ADD_IO_SEG(0x0001180008000090ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
101#define CVMX_GMXX_RXX_STATS_PKTS_DMAC(offset, block_id) \
102 CVMX_ADD_IO_SEG(0x00011800080000A0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
103#define CVMX_GMXX_RXX_STATS_PKTS_DRP(offset, block_id) \
104 CVMX_ADD_IO_SEG(0x00011800080000B0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
105#define CVMX_GMXX_RXX_UDD_SKP(offset, block_id) \
106 CVMX_ADD_IO_SEG(0x0001180008000048ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
107#define CVMX_GMXX_RX_BP_DROPX(offset, block_id) \
108 CVMX_ADD_IO_SEG(0x0001180008000420ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
109#define CVMX_GMXX_RX_BP_OFFX(offset, block_id) \
110 CVMX_ADD_IO_SEG(0x0001180008000460ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
111#define CVMX_GMXX_RX_BP_ONX(offset, block_id) \
112 CVMX_ADD_IO_SEG(0x0001180008000440ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
113#define CVMX_GMXX_RX_HG2_STATUS(block_id) \
114 CVMX_ADD_IO_SEG(0x0001180008000548ull + (((block_id) & 1) * 0x8000000ull))
115#define CVMX_GMXX_RX_PASS_EN(block_id) \
116 CVMX_ADD_IO_SEG(0x00011800080005F8ull + (((block_id) & 1) * 0x8000000ull))
117#define CVMX_GMXX_RX_PASS_MAPX(offset, block_id) \
118 CVMX_ADD_IO_SEG(0x0001180008000600ull + (((offset) & 15) * 8) + (((block_id) & 1) * 0x8000000ull))
119#define CVMX_GMXX_RX_PRTS(block_id) \
120 CVMX_ADD_IO_SEG(0x0001180008000410ull + (((block_id) & 1) * 0x8000000ull))
121#define CVMX_GMXX_RX_PRT_INFO(block_id) \
122 CVMX_ADD_IO_SEG(0x00011800080004E8ull + (((block_id) & 1) * 0x8000000ull))
123#define CVMX_GMXX_RX_TX_STATUS(block_id) \
124 CVMX_ADD_IO_SEG(0x00011800080007E8ull + (((block_id) & 0) * 0x8000000ull))
125#define CVMX_GMXX_RX_XAUI_BAD_COL(block_id) \
126 CVMX_ADD_IO_SEG(0x0001180008000538ull + (((block_id) & 1) * 0x8000000ull))
127#define CVMX_GMXX_RX_XAUI_CTL(block_id) \
128 CVMX_ADD_IO_SEG(0x0001180008000530ull + (((block_id) & 1) * 0x8000000ull))
129#define CVMX_GMXX_SMACX(offset, block_id) \
130 CVMX_ADD_IO_SEG(0x0001180008000230ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
131#define CVMX_GMXX_STAT_BP(block_id) \
132 CVMX_ADD_IO_SEG(0x0001180008000520ull + (((block_id) & 1) * 0x8000000ull))
133#define CVMX_GMXX_TXX_APPEND(offset, block_id) \
134 CVMX_ADD_IO_SEG(0x0001180008000218ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
135#define CVMX_GMXX_TXX_BURST(offset, block_id) \
136 CVMX_ADD_IO_SEG(0x0001180008000228ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
137#define CVMX_GMXX_TXX_CBFC_XOFF(offset, block_id) \
138 CVMX_ADD_IO_SEG(0x00011800080005A0ull + (((offset) & 0) * 8) + (((block_id) & 1) * 0x8000000ull))
139#define CVMX_GMXX_TXX_CBFC_XON(offset, block_id) \
140 CVMX_ADD_IO_SEG(0x00011800080005C0ull + (((offset) & 0) * 8) + (((block_id) & 1) * 0x8000000ull))
141#define CVMX_GMXX_TXX_CLK(offset, block_id) \
142 CVMX_ADD_IO_SEG(0x0001180008000208ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
143#define CVMX_GMXX_TXX_CTL(offset, block_id) \
144 CVMX_ADD_IO_SEG(0x0001180008000270ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
145#define CVMX_GMXX_TXX_MIN_PKT(offset, block_id) \
146 CVMX_ADD_IO_SEG(0x0001180008000240ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
147#define CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(offset, block_id) \
148 CVMX_ADD_IO_SEG(0x0001180008000248ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
149#define CVMX_GMXX_TXX_PAUSE_PKT_TIME(offset, block_id) \
150 CVMX_ADD_IO_SEG(0x0001180008000238ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
151#define CVMX_GMXX_TXX_PAUSE_TOGO(offset, block_id) \
152 CVMX_ADD_IO_SEG(0x0001180008000258ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
153#define CVMX_GMXX_TXX_PAUSE_ZERO(offset, block_id) \
154 CVMX_ADD_IO_SEG(0x0001180008000260ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
155#define CVMX_GMXX_TXX_SGMII_CTL(offset, block_id) \
156 CVMX_ADD_IO_SEG(0x0001180008000300ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
157#define CVMX_GMXX_TXX_SLOT(offset, block_id) \
158 CVMX_ADD_IO_SEG(0x0001180008000220ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
159#define CVMX_GMXX_TXX_SOFT_PAUSE(offset, block_id) \
160 CVMX_ADD_IO_SEG(0x0001180008000250ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
161#define CVMX_GMXX_TXX_STAT0(offset, block_id) \
162 CVMX_ADD_IO_SEG(0x0001180008000280ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
163#define CVMX_GMXX_TXX_STAT1(offset, block_id) \
164 CVMX_ADD_IO_SEG(0x0001180008000288ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
165#define CVMX_GMXX_TXX_STAT2(offset, block_id) \
166 CVMX_ADD_IO_SEG(0x0001180008000290ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
167#define CVMX_GMXX_TXX_STAT3(offset, block_id) \
168 CVMX_ADD_IO_SEG(0x0001180008000298ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
169#define CVMX_GMXX_TXX_STAT4(offset, block_id) \
170 CVMX_ADD_IO_SEG(0x00011800080002A0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
171#define CVMX_GMXX_TXX_STAT5(offset, block_id) \
172 CVMX_ADD_IO_SEG(0x00011800080002A8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
173#define CVMX_GMXX_TXX_STAT6(offset, block_id) \
174 CVMX_ADD_IO_SEG(0x00011800080002B0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
175#define CVMX_GMXX_TXX_STAT7(offset, block_id) \
176 CVMX_ADD_IO_SEG(0x00011800080002B8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
177#define CVMX_GMXX_TXX_STAT8(offset, block_id) \
178 CVMX_ADD_IO_SEG(0x00011800080002C0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
179#define CVMX_GMXX_TXX_STAT9(offset, block_id) \
180 CVMX_ADD_IO_SEG(0x00011800080002C8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
181#define CVMX_GMXX_TXX_STATS_CTL(offset, block_id) \
182 CVMX_ADD_IO_SEG(0x0001180008000268ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
183#define CVMX_GMXX_TXX_THRESH(offset, block_id) \
184 CVMX_ADD_IO_SEG(0x0001180008000210ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
185#define CVMX_GMXX_TX_BP(block_id) \
186 CVMX_ADD_IO_SEG(0x00011800080004D0ull + (((block_id) & 1) * 0x8000000ull))
187#define CVMX_GMXX_TX_CLK_MSKX(offset, block_id) \
188 CVMX_ADD_IO_SEG(0x0001180008000780ull + (((offset) & 1) * 8) + (((block_id) & 0) * 0x0ull))
189#define CVMX_GMXX_TX_COL_ATTEMPT(block_id) \
190 CVMX_ADD_IO_SEG(0x0001180008000498ull + (((block_id) & 1) * 0x8000000ull))
191#define CVMX_GMXX_TX_CORRUPT(block_id) \
192 CVMX_ADD_IO_SEG(0x00011800080004D8ull + (((block_id) & 1) * 0x8000000ull))
193#define CVMX_GMXX_TX_HG2_REG1(block_id) \
194 CVMX_ADD_IO_SEG(0x0001180008000558ull + (((block_id) & 1) * 0x8000000ull))
195#define CVMX_GMXX_TX_HG2_REG2(block_id) \
196 CVMX_ADD_IO_SEG(0x0001180008000560ull + (((block_id) & 1) * 0x8000000ull))
197#define CVMX_GMXX_TX_IFG(block_id) \
198 CVMX_ADD_IO_SEG(0x0001180008000488ull + (((block_id) & 1) * 0x8000000ull))
199#define CVMX_GMXX_TX_INT_EN(block_id) \
200 CVMX_ADD_IO_SEG(0x0001180008000508ull + (((block_id) & 1) * 0x8000000ull))
201#define CVMX_GMXX_TX_INT_REG(block_id) \
202 CVMX_ADD_IO_SEG(0x0001180008000500ull + (((block_id) & 1) * 0x8000000ull))
203#define CVMX_GMXX_TX_JAM(block_id) \
204 CVMX_ADD_IO_SEG(0x0001180008000490ull + (((block_id) & 1) * 0x8000000ull))
205#define CVMX_GMXX_TX_LFSR(block_id) \
206 CVMX_ADD_IO_SEG(0x00011800080004F8ull + (((block_id) & 1) * 0x8000000ull))
207#define CVMX_GMXX_TX_OVR_BP(block_id) \
208 CVMX_ADD_IO_SEG(0x00011800080004C8ull + (((block_id) & 1) * 0x8000000ull))
209#define CVMX_GMXX_TX_PAUSE_PKT_DMAC(block_id) \
210 CVMX_ADD_IO_SEG(0x00011800080004A0ull + (((block_id) & 1) * 0x8000000ull))
211#define CVMX_GMXX_TX_PAUSE_PKT_TYPE(block_id) \
212 CVMX_ADD_IO_SEG(0x00011800080004A8ull + (((block_id) & 1) * 0x8000000ull))
213#define CVMX_GMXX_TX_PRTS(block_id) \
214 CVMX_ADD_IO_SEG(0x0001180008000480ull + (((block_id) & 1) * 0x8000000ull))
215#define CVMX_GMXX_TX_SPI_CTL(block_id) \
216 CVMX_ADD_IO_SEG(0x00011800080004C0ull + (((block_id) & 1) * 0x8000000ull))
217#define CVMX_GMXX_TX_SPI_DRAIN(block_id) \
218 CVMX_ADD_IO_SEG(0x00011800080004E0ull + (((block_id) & 1) * 0x8000000ull))
219#define CVMX_GMXX_TX_SPI_MAX(block_id) \
220 CVMX_ADD_IO_SEG(0x00011800080004B0ull + (((block_id) & 1) * 0x8000000ull))
221#define CVMX_GMXX_TX_SPI_ROUNDX(offset, block_id) \
222 CVMX_ADD_IO_SEG(0x0001180008000680ull + (((offset) & 31) * 8) + (((block_id) & 1) * 0x8000000ull))
223#define CVMX_GMXX_TX_SPI_THRESH(block_id) \
224 CVMX_ADD_IO_SEG(0x00011800080004B8ull + (((block_id) & 1) * 0x8000000ull))
225#define CVMX_GMXX_TX_XAUI_CTL(block_id) \
226 CVMX_ADD_IO_SEG(0x0001180008000528ull + (((block_id) & 1) * 0x8000000ull))
227#define CVMX_GMXX_XAUI_EXT_LOOPBACK(block_id) \
228 CVMX_ADD_IO_SEG(0x0001180008000540ull + (((block_id) & 1) * 0x8000000ull))
229
230union cvmx_gmxx_bad_reg {
231 uint64_t u64;
232 struct cvmx_gmxx_bad_reg_s {
233 uint64_t reserved_31_63:33;
234 uint64_t inb_nxa:4;
235 uint64_t statovr:1;
236 uint64_t loststat:4;
237 uint64_t reserved_18_21:4;
238 uint64_t out_ovr:16;
239 uint64_t ncb_ovr:1;
240 uint64_t out_col:1;
241 } s;
242 struct cvmx_gmxx_bad_reg_cn30xx {
243 uint64_t reserved_31_63:33;
244 uint64_t inb_nxa:4;
245 uint64_t statovr:1;
246 uint64_t reserved_25_25:1;
247 uint64_t loststat:3;
248 uint64_t reserved_5_21:17;
249 uint64_t out_ovr:3;
250 uint64_t reserved_0_1:2;
251 } cn30xx;
252 struct cvmx_gmxx_bad_reg_cn30xx cn31xx;
253 struct cvmx_gmxx_bad_reg_s cn38xx;
254 struct cvmx_gmxx_bad_reg_s cn38xxp2;
255 struct cvmx_gmxx_bad_reg_cn30xx cn50xx;
256 struct cvmx_gmxx_bad_reg_cn52xx {
257 uint64_t reserved_31_63:33;
258 uint64_t inb_nxa:4;
259 uint64_t statovr:1;
260 uint64_t loststat:4;
261 uint64_t reserved_6_21:16;
262 uint64_t out_ovr:4;
263 uint64_t reserved_0_1:2;
264 } cn52xx;
265 struct cvmx_gmxx_bad_reg_cn52xx cn52xxp1;
266 struct cvmx_gmxx_bad_reg_cn52xx cn56xx;
267 struct cvmx_gmxx_bad_reg_cn52xx cn56xxp1;
268 struct cvmx_gmxx_bad_reg_s cn58xx;
269 struct cvmx_gmxx_bad_reg_s cn58xxp1;
270};
271
272union cvmx_gmxx_bist {
273 uint64_t u64;
274 struct cvmx_gmxx_bist_s {
275 uint64_t reserved_17_63:47;
276 uint64_t status:17;
277 } s;
278 struct cvmx_gmxx_bist_cn30xx {
279 uint64_t reserved_10_63:54;
280 uint64_t status:10;
281 } cn30xx;
282 struct cvmx_gmxx_bist_cn30xx cn31xx;
283 struct cvmx_gmxx_bist_cn30xx cn38xx;
284 struct cvmx_gmxx_bist_cn30xx cn38xxp2;
285 struct cvmx_gmxx_bist_cn50xx {
286 uint64_t reserved_12_63:52;
287 uint64_t status:12;
288 } cn50xx;
289 struct cvmx_gmxx_bist_cn52xx {
290 uint64_t reserved_16_63:48;
291 uint64_t status:16;
292 } cn52xx;
293 struct cvmx_gmxx_bist_cn52xx cn52xxp1;
294 struct cvmx_gmxx_bist_cn52xx cn56xx;
295 struct cvmx_gmxx_bist_cn52xx cn56xxp1;
296 struct cvmx_gmxx_bist_s cn58xx;
297 struct cvmx_gmxx_bist_s cn58xxp1;
298};
299
300union cvmx_gmxx_clk_en {
301 uint64_t u64;
302 struct cvmx_gmxx_clk_en_s {
303 uint64_t reserved_1_63:63;
304 uint64_t clk_en:1;
305 } s;
306 struct cvmx_gmxx_clk_en_s cn52xx;
307 struct cvmx_gmxx_clk_en_s cn52xxp1;
308 struct cvmx_gmxx_clk_en_s cn56xx;
309 struct cvmx_gmxx_clk_en_s cn56xxp1;
310};
311
312union cvmx_gmxx_hg2_control {
313 uint64_t u64;
314 struct cvmx_gmxx_hg2_control_s {
315 uint64_t reserved_19_63:45;
316 uint64_t hg2tx_en:1;
317 uint64_t hg2rx_en:1;
318 uint64_t phys_en:1;
319 uint64_t logl_en:16;
320 } s;
321 struct cvmx_gmxx_hg2_control_s cn52xx;
322 struct cvmx_gmxx_hg2_control_s cn52xxp1;
323 struct cvmx_gmxx_hg2_control_s cn56xx;
324};
325
326union cvmx_gmxx_inf_mode {
327 uint64_t u64;
328 struct cvmx_gmxx_inf_mode_s {
329 uint64_t reserved_10_63:54;
330 uint64_t speed:2;
331 uint64_t reserved_6_7:2;
332 uint64_t mode:2;
333 uint64_t reserved_3_3:1;
334 uint64_t p0mii:1;
335 uint64_t en:1;
336 uint64_t type:1;
337 } s;
338 struct cvmx_gmxx_inf_mode_cn30xx {
339 uint64_t reserved_3_63:61;
340 uint64_t p0mii:1;
341 uint64_t en:1;
342 uint64_t type:1;
343 } cn30xx;
344 struct cvmx_gmxx_inf_mode_cn31xx {
345 uint64_t reserved_2_63:62;
346 uint64_t en:1;
347 uint64_t type:1;
348 } cn31xx;
349 struct cvmx_gmxx_inf_mode_cn31xx cn38xx;
350 struct cvmx_gmxx_inf_mode_cn31xx cn38xxp2;
351 struct cvmx_gmxx_inf_mode_cn30xx cn50xx;
352 struct cvmx_gmxx_inf_mode_cn52xx {
353 uint64_t reserved_10_63:54;
354 uint64_t speed:2;
355 uint64_t reserved_6_7:2;
356 uint64_t mode:2;
357 uint64_t reserved_2_3:2;
358 uint64_t en:1;
359 uint64_t type:1;
360 } cn52xx;
361 struct cvmx_gmxx_inf_mode_cn52xx cn52xxp1;
362 struct cvmx_gmxx_inf_mode_cn52xx cn56xx;
363 struct cvmx_gmxx_inf_mode_cn52xx cn56xxp1;
364 struct cvmx_gmxx_inf_mode_cn31xx cn58xx;
365 struct cvmx_gmxx_inf_mode_cn31xx cn58xxp1;
366};
367
368union cvmx_gmxx_nxa_adr {
369 uint64_t u64;
370 struct cvmx_gmxx_nxa_adr_s {
371 uint64_t reserved_6_63:58;
372 uint64_t prt:6;
373 } s;
374 struct cvmx_gmxx_nxa_adr_s cn30xx;
375 struct cvmx_gmxx_nxa_adr_s cn31xx;
376 struct cvmx_gmxx_nxa_adr_s cn38xx;
377 struct cvmx_gmxx_nxa_adr_s cn38xxp2;
378 struct cvmx_gmxx_nxa_adr_s cn50xx;
379 struct cvmx_gmxx_nxa_adr_s cn52xx;
380 struct cvmx_gmxx_nxa_adr_s cn52xxp1;
381 struct cvmx_gmxx_nxa_adr_s cn56xx;
382 struct cvmx_gmxx_nxa_adr_s cn56xxp1;
383 struct cvmx_gmxx_nxa_adr_s cn58xx;
384 struct cvmx_gmxx_nxa_adr_s cn58xxp1;
385};
386
387union cvmx_gmxx_prtx_cbfc_ctl {
388 uint64_t u64;
389 struct cvmx_gmxx_prtx_cbfc_ctl_s {
390 uint64_t phys_en:16;
391 uint64_t logl_en:16;
392 uint64_t phys_bp:16;
393 uint64_t reserved_4_15:12;
394 uint64_t bck_en:1;
395 uint64_t drp_en:1;
396 uint64_t tx_en:1;
397 uint64_t rx_en:1;
398 } s;
399 struct cvmx_gmxx_prtx_cbfc_ctl_s cn52xx;
400 struct cvmx_gmxx_prtx_cbfc_ctl_s cn56xx;
401};
402
403union cvmx_gmxx_prtx_cfg {
404 uint64_t u64;
405 struct cvmx_gmxx_prtx_cfg_s {
406 uint64_t reserved_14_63:50;
407 uint64_t tx_idle:1;
408 uint64_t rx_idle:1;
409 uint64_t reserved_9_11:3;
410 uint64_t speed_msb:1;
411 uint64_t reserved_4_7:4;
412 uint64_t slottime:1;
413 uint64_t duplex:1;
414 uint64_t speed:1;
415 uint64_t en:1;
416 } s;
417 struct cvmx_gmxx_prtx_cfg_cn30xx {
418 uint64_t reserved_4_63:60;
419 uint64_t slottime:1;
420 uint64_t duplex:1;
421 uint64_t speed:1;
422 uint64_t en:1;
423 } cn30xx;
424 struct cvmx_gmxx_prtx_cfg_cn30xx cn31xx;
425 struct cvmx_gmxx_prtx_cfg_cn30xx cn38xx;
426 struct cvmx_gmxx_prtx_cfg_cn30xx cn38xxp2;
427 struct cvmx_gmxx_prtx_cfg_cn30xx cn50xx;
428 struct cvmx_gmxx_prtx_cfg_s cn52xx;
429 struct cvmx_gmxx_prtx_cfg_s cn52xxp1;
430 struct cvmx_gmxx_prtx_cfg_s cn56xx;
431 struct cvmx_gmxx_prtx_cfg_s cn56xxp1;
432 struct cvmx_gmxx_prtx_cfg_cn30xx cn58xx;
433 struct cvmx_gmxx_prtx_cfg_cn30xx cn58xxp1;
434};
435
436union cvmx_gmxx_rxx_adr_cam0 {
437 uint64_t u64;
438 struct cvmx_gmxx_rxx_adr_cam0_s {
439 uint64_t adr:64;
440 } s;
441 struct cvmx_gmxx_rxx_adr_cam0_s cn30xx;
442 struct cvmx_gmxx_rxx_adr_cam0_s cn31xx;
443 struct cvmx_gmxx_rxx_adr_cam0_s cn38xx;
444 struct cvmx_gmxx_rxx_adr_cam0_s cn38xxp2;
445 struct cvmx_gmxx_rxx_adr_cam0_s cn50xx;
446 struct cvmx_gmxx_rxx_adr_cam0_s cn52xx;
447 struct cvmx_gmxx_rxx_adr_cam0_s cn52xxp1;
448 struct cvmx_gmxx_rxx_adr_cam0_s cn56xx;
449 struct cvmx_gmxx_rxx_adr_cam0_s cn56xxp1;
450 struct cvmx_gmxx_rxx_adr_cam0_s cn58xx;
451 struct cvmx_gmxx_rxx_adr_cam0_s cn58xxp1;
452};
453
454union cvmx_gmxx_rxx_adr_cam1 {
455 uint64_t u64;
456 struct cvmx_gmxx_rxx_adr_cam1_s {
457 uint64_t adr:64;
458 } s;
459 struct cvmx_gmxx_rxx_adr_cam1_s cn30xx;
460 struct cvmx_gmxx_rxx_adr_cam1_s cn31xx;
461 struct cvmx_gmxx_rxx_adr_cam1_s cn38xx;
462 struct cvmx_gmxx_rxx_adr_cam1_s cn38xxp2;
463 struct cvmx_gmxx_rxx_adr_cam1_s cn50xx;
464 struct cvmx_gmxx_rxx_adr_cam1_s cn52xx;
465 struct cvmx_gmxx_rxx_adr_cam1_s cn52xxp1;
466 struct cvmx_gmxx_rxx_adr_cam1_s cn56xx;
467 struct cvmx_gmxx_rxx_adr_cam1_s cn56xxp1;
468 struct cvmx_gmxx_rxx_adr_cam1_s cn58xx;
469 struct cvmx_gmxx_rxx_adr_cam1_s cn58xxp1;
470};
471
472union cvmx_gmxx_rxx_adr_cam2 {
473 uint64_t u64;
474 struct cvmx_gmxx_rxx_adr_cam2_s {
475 uint64_t adr:64;
476 } s;
477 struct cvmx_gmxx_rxx_adr_cam2_s cn30xx;
478 struct cvmx_gmxx_rxx_adr_cam2_s cn31xx;
479 struct cvmx_gmxx_rxx_adr_cam2_s cn38xx;
480 struct cvmx_gmxx_rxx_adr_cam2_s cn38xxp2;
481 struct cvmx_gmxx_rxx_adr_cam2_s cn50xx;
482 struct cvmx_gmxx_rxx_adr_cam2_s cn52xx;
483 struct cvmx_gmxx_rxx_adr_cam2_s cn52xxp1;
484 struct cvmx_gmxx_rxx_adr_cam2_s cn56xx;
485 struct cvmx_gmxx_rxx_adr_cam2_s cn56xxp1;
486 struct cvmx_gmxx_rxx_adr_cam2_s cn58xx;
487 struct cvmx_gmxx_rxx_adr_cam2_s cn58xxp1;
488};
489
490union cvmx_gmxx_rxx_adr_cam3 {
491 uint64_t u64;
492 struct cvmx_gmxx_rxx_adr_cam3_s {
493 uint64_t adr:64;
494 } s;
495 struct cvmx_gmxx_rxx_adr_cam3_s cn30xx;
496 struct cvmx_gmxx_rxx_adr_cam3_s cn31xx;
497 struct cvmx_gmxx_rxx_adr_cam3_s cn38xx;
498 struct cvmx_gmxx_rxx_adr_cam3_s cn38xxp2;
499 struct cvmx_gmxx_rxx_adr_cam3_s cn50xx;
500 struct cvmx_gmxx_rxx_adr_cam3_s cn52xx;
501 struct cvmx_gmxx_rxx_adr_cam3_s cn52xxp1;
502 struct cvmx_gmxx_rxx_adr_cam3_s cn56xx;
503 struct cvmx_gmxx_rxx_adr_cam3_s cn56xxp1;
504 struct cvmx_gmxx_rxx_adr_cam3_s cn58xx;
505 struct cvmx_gmxx_rxx_adr_cam3_s cn58xxp1;
506};
507
508union cvmx_gmxx_rxx_adr_cam4 {
509 uint64_t u64;
510 struct cvmx_gmxx_rxx_adr_cam4_s {
511 uint64_t adr:64;
512 } s;
513 struct cvmx_gmxx_rxx_adr_cam4_s cn30xx;
514 struct cvmx_gmxx_rxx_adr_cam4_s cn31xx;
515 struct cvmx_gmxx_rxx_adr_cam4_s cn38xx;
516 struct cvmx_gmxx_rxx_adr_cam4_s cn38xxp2;
517 struct cvmx_gmxx_rxx_adr_cam4_s cn50xx;
518 struct cvmx_gmxx_rxx_adr_cam4_s cn52xx;
519 struct cvmx_gmxx_rxx_adr_cam4_s cn52xxp1;
520 struct cvmx_gmxx_rxx_adr_cam4_s cn56xx;
521 struct cvmx_gmxx_rxx_adr_cam4_s cn56xxp1;
522 struct cvmx_gmxx_rxx_adr_cam4_s cn58xx;
523 struct cvmx_gmxx_rxx_adr_cam4_s cn58xxp1;
524};
525
526union cvmx_gmxx_rxx_adr_cam5 {
527 uint64_t u64;
528 struct cvmx_gmxx_rxx_adr_cam5_s {
529 uint64_t adr:64;
530 } s;
531 struct cvmx_gmxx_rxx_adr_cam5_s cn30xx;
532 struct cvmx_gmxx_rxx_adr_cam5_s cn31xx;
533 struct cvmx_gmxx_rxx_adr_cam5_s cn38xx;
534 struct cvmx_gmxx_rxx_adr_cam5_s cn38xxp2;
535 struct cvmx_gmxx_rxx_adr_cam5_s cn50xx;
536 struct cvmx_gmxx_rxx_adr_cam5_s cn52xx;
537 struct cvmx_gmxx_rxx_adr_cam5_s cn52xxp1;
538 struct cvmx_gmxx_rxx_adr_cam5_s cn56xx;
539 struct cvmx_gmxx_rxx_adr_cam5_s cn56xxp1;
540 struct cvmx_gmxx_rxx_adr_cam5_s cn58xx;
541 struct cvmx_gmxx_rxx_adr_cam5_s cn58xxp1;
542};
543
544union cvmx_gmxx_rxx_adr_cam_en {
545 uint64_t u64;
546 struct cvmx_gmxx_rxx_adr_cam_en_s {
547 uint64_t reserved_8_63:56;
548 uint64_t en:8;
549 } s;
550 struct cvmx_gmxx_rxx_adr_cam_en_s cn30xx;
551 struct cvmx_gmxx_rxx_adr_cam_en_s cn31xx;
552 struct cvmx_gmxx_rxx_adr_cam_en_s cn38xx;
553 struct cvmx_gmxx_rxx_adr_cam_en_s cn38xxp2;
554 struct cvmx_gmxx_rxx_adr_cam_en_s cn50xx;
555 struct cvmx_gmxx_rxx_adr_cam_en_s cn52xx;
556 struct cvmx_gmxx_rxx_adr_cam_en_s cn52xxp1;
557 struct cvmx_gmxx_rxx_adr_cam_en_s cn56xx;
558 struct cvmx_gmxx_rxx_adr_cam_en_s cn56xxp1;
559 struct cvmx_gmxx_rxx_adr_cam_en_s cn58xx;
560 struct cvmx_gmxx_rxx_adr_cam_en_s cn58xxp1;
561};
562
563union cvmx_gmxx_rxx_adr_ctl {
564 uint64_t u64;
565 struct cvmx_gmxx_rxx_adr_ctl_s {
566 uint64_t reserved_4_63:60;
567 uint64_t cam_mode:1;
568 uint64_t mcst:2;
569 uint64_t bcst:1;
570 } s;
571 struct cvmx_gmxx_rxx_adr_ctl_s cn30xx;
572 struct cvmx_gmxx_rxx_adr_ctl_s cn31xx;
573 struct cvmx_gmxx_rxx_adr_ctl_s cn38xx;
574 struct cvmx_gmxx_rxx_adr_ctl_s cn38xxp2;
575 struct cvmx_gmxx_rxx_adr_ctl_s cn50xx;
576 struct cvmx_gmxx_rxx_adr_ctl_s cn52xx;
577 struct cvmx_gmxx_rxx_adr_ctl_s cn52xxp1;
578 struct cvmx_gmxx_rxx_adr_ctl_s cn56xx;
579 struct cvmx_gmxx_rxx_adr_ctl_s cn56xxp1;
580 struct cvmx_gmxx_rxx_adr_ctl_s cn58xx;
581 struct cvmx_gmxx_rxx_adr_ctl_s cn58xxp1;
582};
583
584union cvmx_gmxx_rxx_decision {
585 uint64_t u64;
586 struct cvmx_gmxx_rxx_decision_s {
587 uint64_t reserved_5_63:59;
588 uint64_t cnt:5;
589 } s;
590 struct cvmx_gmxx_rxx_decision_s cn30xx;
591 struct cvmx_gmxx_rxx_decision_s cn31xx;
592 struct cvmx_gmxx_rxx_decision_s cn38xx;
593 struct cvmx_gmxx_rxx_decision_s cn38xxp2;
594 struct cvmx_gmxx_rxx_decision_s cn50xx;
595 struct cvmx_gmxx_rxx_decision_s cn52xx;
596 struct cvmx_gmxx_rxx_decision_s cn52xxp1;
597 struct cvmx_gmxx_rxx_decision_s cn56xx;
598 struct cvmx_gmxx_rxx_decision_s cn56xxp1;
599 struct cvmx_gmxx_rxx_decision_s cn58xx;
600 struct cvmx_gmxx_rxx_decision_s cn58xxp1;
601};
602
603union cvmx_gmxx_rxx_frm_chk {
604 uint64_t u64;
605 struct cvmx_gmxx_rxx_frm_chk_s {
606 uint64_t reserved_10_63:54;
607 uint64_t niberr:1;
608 uint64_t skperr:1;
609 uint64_t rcverr:1;
610 uint64_t lenerr:1;
611 uint64_t alnerr:1;
612 uint64_t fcserr:1;
613 uint64_t jabber:1;
614 uint64_t maxerr:1;
615 uint64_t carext:1;
616 uint64_t minerr:1;
617 } s;
618 struct cvmx_gmxx_rxx_frm_chk_s cn30xx;
619 struct cvmx_gmxx_rxx_frm_chk_s cn31xx;
620 struct cvmx_gmxx_rxx_frm_chk_s cn38xx;
621 struct cvmx_gmxx_rxx_frm_chk_s cn38xxp2;
622 struct cvmx_gmxx_rxx_frm_chk_cn50xx {
623 uint64_t reserved_10_63:54;
624 uint64_t niberr:1;
625 uint64_t skperr:1;
626 uint64_t rcverr:1;
627 uint64_t reserved_6_6:1;
628 uint64_t alnerr:1;
629 uint64_t fcserr:1;
630 uint64_t jabber:1;
631 uint64_t reserved_2_2:1;
632 uint64_t carext:1;
633 uint64_t reserved_0_0:1;
634 } cn50xx;
635 struct cvmx_gmxx_rxx_frm_chk_cn52xx {
636 uint64_t reserved_9_63:55;
637 uint64_t skperr:1;
638 uint64_t rcverr:1;
639 uint64_t reserved_5_6:2;
640 uint64_t fcserr:1;
641 uint64_t jabber:1;
642 uint64_t reserved_2_2:1;
643 uint64_t carext:1;
644 uint64_t reserved_0_0:1;
645 } cn52xx;
646 struct cvmx_gmxx_rxx_frm_chk_cn52xx cn52xxp1;
647 struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xx;
648 struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xxp1;
649 struct cvmx_gmxx_rxx_frm_chk_s cn58xx;
650 struct cvmx_gmxx_rxx_frm_chk_s cn58xxp1;
651};
652
653union cvmx_gmxx_rxx_frm_ctl {
654 uint64_t u64;
655 struct cvmx_gmxx_rxx_frm_ctl_s {
656 uint64_t reserved_11_63:53;
657 uint64_t null_dis:1;
658 uint64_t pre_align:1;
659 uint64_t pad_len:1;
660 uint64_t vlan_len:1;
661 uint64_t pre_free:1;
662 uint64_t ctl_smac:1;
663 uint64_t ctl_mcst:1;
664 uint64_t ctl_bck:1;
665 uint64_t ctl_drp:1;
666 uint64_t pre_strp:1;
667 uint64_t pre_chk:1;
668 } s;
669 struct cvmx_gmxx_rxx_frm_ctl_cn30xx {
670 uint64_t reserved_9_63:55;
671 uint64_t pad_len:1;
672 uint64_t vlan_len:1;
673 uint64_t pre_free:1;
674 uint64_t ctl_smac:1;
675 uint64_t ctl_mcst:1;
676 uint64_t ctl_bck:1;
677 uint64_t ctl_drp:1;
678 uint64_t pre_strp:1;
679 uint64_t pre_chk:1;
680 } cn30xx;
681 struct cvmx_gmxx_rxx_frm_ctl_cn31xx {
682 uint64_t reserved_8_63:56;
683 uint64_t vlan_len:1;
684 uint64_t pre_free:1;
685 uint64_t ctl_smac:1;
686 uint64_t ctl_mcst:1;
687 uint64_t ctl_bck:1;
688 uint64_t ctl_drp:1;
689 uint64_t pre_strp:1;
690 uint64_t pre_chk:1;
691 } cn31xx;
692 struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn38xx;
693 struct cvmx_gmxx_rxx_frm_ctl_cn31xx cn38xxp2;
694 struct cvmx_gmxx_rxx_frm_ctl_cn50xx {
695 uint64_t reserved_11_63:53;
696 uint64_t null_dis:1;
697 uint64_t pre_align:1;
698 uint64_t reserved_7_8:2;
699 uint64_t pre_free:1;
700 uint64_t ctl_smac:1;
701 uint64_t ctl_mcst:1;
702 uint64_t ctl_bck:1;
703 uint64_t ctl_drp:1;
704 uint64_t pre_strp:1;
705 uint64_t pre_chk:1;
706 } cn50xx;
707 struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xx;
708 struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xxp1;
709 struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn56xx;
710 struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 {
711 uint64_t reserved_10_63:54;
712 uint64_t pre_align:1;
713 uint64_t reserved_7_8:2;
714 uint64_t pre_free:1;
715 uint64_t ctl_smac:1;
716 uint64_t ctl_mcst:1;
717 uint64_t ctl_bck:1;
718 uint64_t ctl_drp:1;
719 uint64_t pre_strp:1;
720 uint64_t pre_chk:1;
721 } cn56xxp1;
722 struct cvmx_gmxx_rxx_frm_ctl_s cn58xx;
723 struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn58xxp1;
724};
725
726union cvmx_gmxx_rxx_frm_max {
727 uint64_t u64;
728 struct cvmx_gmxx_rxx_frm_max_s {
729 uint64_t reserved_16_63:48;
730 uint64_t len:16;
731 } s;
732 struct cvmx_gmxx_rxx_frm_max_s cn30xx;
733 struct cvmx_gmxx_rxx_frm_max_s cn31xx;
734 struct cvmx_gmxx_rxx_frm_max_s cn38xx;
735 struct cvmx_gmxx_rxx_frm_max_s cn38xxp2;
736 struct cvmx_gmxx_rxx_frm_max_s cn58xx;
737 struct cvmx_gmxx_rxx_frm_max_s cn58xxp1;
738};
739
740union cvmx_gmxx_rxx_frm_min {
741 uint64_t u64;
742 struct cvmx_gmxx_rxx_frm_min_s {
743 uint64_t reserved_16_63:48;
744 uint64_t len:16;
745 } s;
746 struct cvmx_gmxx_rxx_frm_min_s cn30xx;
747 struct cvmx_gmxx_rxx_frm_min_s cn31xx;
748 struct cvmx_gmxx_rxx_frm_min_s cn38xx;
749 struct cvmx_gmxx_rxx_frm_min_s cn38xxp2;
750 struct cvmx_gmxx_rxx_frm_min_s cn58xx;
751 struct cvmx_gmxx_rxx_frm_min_s cn58xxp1;
752};
753
754union cvmx_gmxx_rxx_ifg {
755 uint64_t u64;
756 struct cvmx_gmxx_rxx_ifg_s {
757 uint64_t reserved_4_63:60;
758 uint64_t ifg:4;
759 } s;
760 struct cvmx_gmxx_rxx_ifg_s cn30xx;
761 struct cvmx_gmxx_rxx_ifg_s cn31xx;
762 struct cvmx_gmxx_rxx_ifg_s cn38xx;
763 struct cvmx_gmxx_rxx_ifg_s cn38xxp2;
764 struct cvmx_gmxx_rxx_ifg_s cn50xx;
765 struct cvmx_gmxx_rxx_ifg_s cn52xx;
766 struct cvmx_gmxx_rxx_ifg_s cn52xxp1;
767 struct cvmx_gmxx_rxx_ifg_s cn56xx;
768 struct cvmx_gmxx_rxx_ifg_s cn56xxp1;
769 struct cvmx_gmxx_rxx_ifg_s cn58xx;
770 struct cvmx_gmxx_rxx_ifg_s cn58xxp1;
771};
772
773union cvmx_gmxx_rxx_int_en {
774 uint64_t u64;
775 struct cvmx_gmxx_rxx_int_en_s {
776 uint64_t reserved_29_63:35;
777 uint64_t hg2cc:1;
778 uint64_t hg2fld:1;
779 uint64_t undat:1;
780 uint64_t uneop:1;
781 uint64_t unsop:1;
782 uint64_t bad_term:1;
783 uint64_t bad_seq:1;
784 uint64_t rem_fault:1;
785 uint64_t loc_fault:1;
786 uint64_t pause_drp:1;
787 uint64_t phy_dupx:1;
788 uint64_t phy_spd:1;
789 uint64_t phy_link:1;
790 uint64_t ifgerr:1;
791 uint64_t coldet:1;
792 uint64_t falerr:1;
793 uint64_t rsverr:1;
794 uint64_t pcterr:1;
795 uint64_t ovrerr:1;
796 uint64_t niberr:1;
797 uint64_t skperr:1;
798 uint64_t rcverr:1;
799 uint64_t lenerr:1;
800 uint64_t alnerr:1;
801 uint64_t fcserr:1;
802 uint64_t jabber:1;
803 uint64_t maxerr:1;
804 uint64_t carext:1;
805 uint64_t minerr:1;
806 } s;
807 struct cvmx_gmxx_rxx_int_en_cn30xx {
808 uint64_t reserved_19_63:45;
809 uint64_t phy_dupx:1;
810 uint64_t phy_spd:1;
811 uint64_t phy_link:1;
812 uint64_t ifgerr:1;
813 uint64_t coldet:1;
814 uint64_t falerr:1;
815 uint64_t rsverr:1;
816 uint64_t pcterr:1;
817 uint64_t ovrerr:1;
818 uint64_t niberr:1;
819 uint64_t skperr:1;
820 uint64_t rcverr:1;
821 uint64_t lenerr:1;
822 uint64_t alnerr:1;
823 uint64_t fcserr:1;
824 uint64_t jabber:1;
825 uint64_t maxerr:1;
826 uint64_t carext:1;
827 uint64_t minerr:1;
828 } cn30xx;
829 struct cvmx_gmxx_rxx_int_en_cn30xx cn31xx;
830 struct cvmx_gmxx_rxx_int_en_cn30xx cn38xx;
831 struct cvmx_gmxx_rxx_int_en_cn30xx cn38xxp2;
832 struct cvmx_gmxx_rxx_int_en_cn50xx {
833 uint64_t reserved_20_63:44;
834 uint64_t pause_drp:1;
835 uint64_t phy_dupx:1;
836 uint64_t phy_spd:1;
837 uint64_t phy_link:1;
838 uint64_t ifgerr:1;
839 uint64_t coldet:1;
840 uint64_t falerr:1;
841 uint64_t rsverr:1;
842 uint64_t pcterr:1;
843 uint64_t ovrerr:1;
844 uint64_t niberr:1;
845 uint64_t skperr:1;
846 uint64_t rcverr:1;
847 uint64_t reserved_6_6:1;
848 uint64_t alnerr:1;
849 uint64_t fcserr:1;
850 uint64_t jabber:1;
851 uint64_t reserved_2_2:1;
852 uint64_t carext:1;
853 uint64_t reserved_0_0:1;
854 } cn50xx;
855 struct cvmx_gmxx_rxx_int_en_cn52xx {
856 uint64_t reserved_29_63:35;
857 uint64_t hg2cc:1;
858 uint64_t hg2fld:1;
859 uint64_t undat:1;
860 uint64_t uneop:1;
861 uint64_t unsop:1;
862 uint64_t bad_term:1;
863 uint64_t bad_seq:1;
864 uint64_t rem_fault:1;
865 uint64_t loc_fault:1;
866 uint64_t pause_drp:1;
867 uint64_t reserved_16_18:3;
868 uint64_t ifgerr:1;
869 uint64_t coldet:1;
870 uint64_t falerr:1;
871 uint64_t rsverr:1;
872 uint64_t pcterr:1;
873 uint64_t ovrerr:1;
874 uint64_t reserved_9_9:1;
875 uint64_t skperr:1;
876 uint64_t rcverr:1;
877 uint64_t reserved_5_6:2;
878 uint64_t fcserr:1;
879 uint64_t jabber:1;
880 uint64_t reserved_2_2:1;
881 uint64_t carext:1;
882 uint64_t reserved_0_0:1;
883 } cn52xx;
884 struct cvmx_gmxx_rxx_int_en_cn52xx cn52xxp1;
885 struct cvmx_gmxx_rxx_int_en_cn52xx cn56xx;
886 struct cvmx_gmxx_rxx_int_en_cn56xxp1 {
887 uint64_t reserved_27_63:37;
888 uint64_t undat:1;
889 uint64_t uneop:1;
890 uint64_t unsop:1;
891 uint64_t bad_term:1;
892 uint64_t bad_seq:1;
893 uint64_t rem_fault:1;
894 uint64_t loc_fault:1;
895 uint64_t pause_drp:1;
896 uint64_t reserved_16_18:3;
897 uint64_t ifgerr:1;
898 uint64_t coldet:1;
899 uint64_t falerr:1;
900 uint64_t rsverr:1;
901 uint64_t pcterr:1;
902 uint64_t ovrerr:1;
903 uint64_t reserved_9_9:1;
904 uint64_t skperr:1;
905 uint64_t rcverr:1;
906 uint64_t reserved_5_6:2;
907 uint64_t fcserr:1;
908 uint64_t jabber:1;
909 uint64_t reserved_2_2:1;
910 uint64_t carext:1;
911 uint64_t reserved_0_0:1;
912 } cn56xxp1;
913 struct cvmx_gmxx_rxx_int_en_cn58xx {
914 uint64_t reserved_20_63:44;
915 uint64_t pause_drp:1;
916 uint64_t phy_dupx:1;
917 uint64_t phy_spd:1;
918 uint64_t phy_link:1;
919 uint64_t ifgerr:1;
920 uint64_t coldet:1;
921 uint64_t falerr:1;
922 uint64_t rsverr:1;
923 uint64_t pcterr:1;
924 uint64_t ovrerr:1;
925 uint64_t niberr:1;
926 uint64_t skperr:1;
927 uint64_t rcverr:1;
928 uint64_t lenerr:1;
929 uint64_t alnerr:1;
930 uint64_t fcserr:1;
931 uint64_t jabber:1;
932 uint64_t maxerr:1;
933 uint64_t carext:1;
934 uint64_t minerr:1;
935 } cn58xx;
936 struct cvmx_gmxx_rxx_int_en_cn58xx cn58xxp1;
937};
938
939union cvmx_gmxx_rxx_int_reg {
940 uint64_t u64;
941 struct cvmx_gmxx_rxx_int_reg_s {
942 uint64_t reserved_29_63:35;
943 uint64_t hg2cc:1;
944 uint64_t hg2fld:1;
945 uint64_t undat:1;
946 uint64_t uneop:1;
947 uint64_t unsop:1;
948 uint64_t bad_term:1;
949 uint64_t bad_seq:1;
950 uint64_t rem_fault:1;
951 uint64_t loc_fault:1;
952 uint64_t pause_drp:1;
953 uint64_t phy_dupx:1;
954 uint64_t phy_spd:1;
955 uint64_t phy_link:1;
956 uint64_t ifgerr:1;
957 uint64_t coldet:1;
958 uint64_t falerr:1;
959 uint64_t rsverr:1;
960 uint64_t pcterr:1;
961 uint64_t ovrerr:1;
962 uint64_t niberr:1;
963 uint64_t skperr:1;
964 uint64_t rcverr:1;
965 uint64_t lenerr:1;
966 uint64_t alnerr:1;
967 uint64_t fcserr:1;
968 uint64_t jabber:1;
969 uint64_t maxerr:1;
970 uint64_t carext:1;
971 uint64_t minerr:1;
972 } s;
973 struct cvmx_gmxx_rxx_int_reg_cn30xx {
974 uint64_t reserved_19_63:45;
975 uint64_t phy_dupx:1;
976 uint64_t phy_spd:1;
977 uint64_t phy_link:1;
978 uint64_t ifgerr:1;
979 uint64_t coldet:1;
980 uint64_t falerr:1;
981 uint64_t rsverr:1;
982 uint64_t pcterr:1;
983 uint64_t ovrerr:1;
984 uint64_t niberr:1;
985 uint64_t skperr:1;
986 uint64_t rcverr:1;
987 uint64_t lenerr:1;
988 uint64_t alnerr:1;
989 uint64_t fcserr:1;
990 uint64_t jabber:1;
991 uint64_t maxerr:1;
992 uint64_t carext:1;
993 uint64_t minerr:1;
994 } cn30xx;
995 struct cvmx_gmxx_rxx_int_reg_cn30xx cn31xx;
996 struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xx;
997 struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xxp2;
998 struct cvmx_gmxx_rxx_int_reg_cn50xx {
999 uint64_t reserved_20_63:44;
1000 uint64_t pause_drp:1;
1001 uint64_t phy_dupx:1;
1002 uint64_t phy_spd:1;
1003 uint64_t phy_link:1;
1004 uint64_t ifgerr:1;
1005 uint64_t coldet:1;
1006 uint64_t falerr:1;
1007 uint64_t rsverr:1;
1008 uint64_t pcterr:1;
1009 uint64_t ovrerr:1;
1010 uint64_t niberr:1;
1011 uint64_t skperr:1;
1012 uint64_t rcverr:1;
1013 uint64_t reserved_6_6:1;
1014 uint64_t alnerr:1;
1015 uint64_t fcserr:1;
1016 uint64_t jabber:1;
1017 uint64_t reserved_2_2:1;
1018 uint64_t carext:1;
1019 uint64_t reserved_0_0:1;
1020 } cn50xx;
1021 struct cvmx_gmxx_rxx_int_reg_cn52xx {
1022 uint64_t reserved_29_63:35;
1023 uint64_t hg2cc:1;
1024 uint64_t hg2fld:1;
1025 uint64_t undat:1;
1026 uint64_t uneop:1;
1027 uint64_t unsop:1;
1028 uint64_t bad_term:1;
1029 uint64_t bad_seq:1;
1030 uint64_t rem_fault:1;
1031 uint64_t loc_fault:1;
1032 uint64_t pause_drp:1;
1033 uint64_t reserved_16_18:3;
1034 uint64_t ifgerr:1;
1035 uint64_t coldet:1;
1036 uint64_t falerr:1;
1037 uint64_t rsverr:1;
1038 uint64_t pcterr:1;
1039 uint64_t ovrerr:1;
1040 uint64_t reserved_9_9:1;
1041 uint64_t skperr:1;
1042 uint64_t rcverr:1;
1043 uint64_t reserved_5_6:2;
1044 uint64_t fcserr:1;
1045 uint64_t jabber:1;
1046 uint64_t reserved_2_2:1;
1047 uint64_t carext:1;
1048 uint64_t reserved_0_0:1;
1049 } cn52xx;
1050 struct cvmx_gmxx_rxx_int_reg_cn52xx cn52xxp1;
1051 struct cvmx_gmxx_rxx_int_reg_cn52xx cn56xx;
1052 struct cvmx_gmxx_rxx_int_reg_cn56xxp1 {
1053 uint64_t reserved_27_63:37;
1054 uint64_t undat:1;
1055 uint64_t uneop:1;
1056 uint64_t unsop:1;
1057 uint64_t bad_term:1;
1058 uint64_t bad_seq:1;
1059 uint64_t rem_fault:1;
1060 uint64_t loc_fault:1;
1061 uint64_t pause_drp:1;
1062 uint64_t reserved_16_18:3;
1063 uint64_t ifgerr:1;
1064 uint64_t coldet:1;
1065 uint64_t falerr:1;
1066 uint64_t rsverr:1;
1067 uint64_t pcterr:1;
1068 uint64_t ovrerr:1;
1069 uint64_t reserved_9_9:1;
1070 uint64_t skperr:1;
1071 uint64_t rcverr:1;
1072 uint64_t reserved_5_6:2;
1073 uint64_t fcserr:1;
1074 uint64_t jabber:1;
1075 uint64_t reserved_2_2:1;
1076 uint64_t carext:1;
1077 uint64_t reserved_0_0:1;
1078 } cn56xxp1;
1079 struct cvmx_gmxx_rxx_int_reg_cn58xx {
1080 uint64_t reserved_20_63:44;
1081 uint64_t pause_drp:1;
1082 uint64_t phy_dupx:1;
1083 uint64_t phy_spd:1;
1084 uint64_t phy_link:1;
1085 uint64_t ifgerr:1;
1086 uint64_t coldet:1;
1087 uint64_t falerr:1;
1088 uint64_t rsverr:1;
1089 uint64_t pcterr:1;
1090 uint64_t ovrerr:1;
1091 uint64_t niberr:1;
1092 uint64_t skperr:1;
1093 uint64_t rcverr:1;
1094 uint64_t lenerr:1;
1095 uint64_t alnerr:1;
1096 uint64_t fcserr:1;
1097 uint64_t jabber:1;
1098 uint64_t maxerr:1;
1099 uint64_t carext:1;
1100 uint64_t minerr:1;
1101 } cn58xx;
1102 struct cvmx_gmxx_rxx_int_reg_cn58xx cn58xxp1;
1103};
1104
1105union cvmx_gmxx_rxx_jabber {
1106 uint64_t u64;
1107 struct cvmx_gmxx_rxx_jabber_s {
1108 uint64_t reserved_16_63:48;
1109 uint64_t cnt:16;
1110 } s;
1111 struct cvmx_gmxx_rxx_jabber_s cn30xx;
1112 struct cvmx_gmxx_rxx_jabber_s cn31xx;
1113 struct cvmx_gmxx_rxx_jabber_s cn38xx;
1114 struct cvmx_gmxx_rxx_jabber_s cn38xxp2;
1115 struct cvmx_gmxx_rxx_jabber_s cn50xx;
1116 struct cvmx_gmxx_rxx_jabber_s cn52xx;
1117 struct cvmx_gmxx_rxx_jabber_s cn52xxp1;
1118 struct cvmx_gmxx_rxx_jabber_s cn56xx;
1119 struct cvmx_gmxx_rxx_jabber_s cn56xxp1;
1120 struct cvmx_gmxx_rxx_jabber_s cn58xx;
1121 struct cvmx_gmxx_rxx_jabber_s cn58xxp1;
1122};
1123
1124union cvmx_gmxx_rxx_pause_drop_time {
1125 uint64_t u64;
1126 struct cvmx_gmxx_rxx_pause_drop_time_s {
1127 uint64_t reserved_16_63:48;
1128 uint64_t status:16;
1129 } s;
1130 struct cvmx_gmxx_rxx_pause_drop_time_s cn50xx;
1131 struct cvmx_gmxx_rxx_pause_drop_time_s cn52xx;
1132 struct cvmx_gmxx_rxx_pause_drop_time_s cn52xxp1;
1133 struct cvmx_gmxx_rxx_pause_drop_time_s cn56xx;
1134 struct cvmx_gmxx_rxx_pause_drop_time_s cn56xxp1;
1135 struct cvmx_gmxx_rxx_pause_drop_time_s cn58xx;
1136 struct cvmx_gmxx_rxx_pause_drop_time_s cn58xxp1;
1137};
1138
1139union cvmx_gmxx_rxx_rx_inbnd {
1140 uint64_t u64;
1141 struct cvmx_gmxx_rxx_rx_inbnd_s {
1142 uint64_t reserved_4_63:60;
1143 uint64_t duplex:1;
1144 uint64_t speed:2;
1145 uint64_t status:1;
1146 } s;
1147 struct cvmx_gmxx_rxx_rx_inbnd_s cn30xx;
1148 struct cvmx_gmxx_rxx_rx_inbnd_s cn31xx;
1149 struct cvmx_gmxx_rxx_rx_inbnd_s cn38xx;
1150 struct cvmx_gmxx_rxx_rx_inbnd_s cn38xxp2;
1151 struct cvmx_gmxx_rxx_rx_inbnd_s cn50xx;
1152 struct cvmx_gmxx_rxx_rx_inbnd_s cn58xx;
1153 struct cvmx_gmxx_rxx_rx_inbnd_s cn58xxp1;
1154};
1155
1156union cvmx_gmxx_rxx_stats_ctl {
1157 uint64_t u64;
1158 struct cvmx_gmxx_rxx_stats_ctl_s {
1159 uint64_t reserved_1_63:63;
1160 uint64_t rd_clr:1;
1161 } s;
1162 struct cvmx_gmxx_rxx_stats_ctl_s cn30xx;
1163 struct cvmx_gmxx_rxx_stats_ctl_s cn31xx;
1164 struct cvmx_gmxx_rxx_stats_ctl_s cn38xx;
1165 struct cvmx_gmxx_rxx_stats_ctl_s cn38xxp2;
1166 struct cvmx_gmxx_rxx_stats_ctl_s cn50xx;
1167 struct cvmx_gmxx_rxx_stats_ctl_s cn52xx;
1168 struct cvmx_gmxx_rxx_stats_ctl_s cn52xxp1;
1169 struct cvmx_gmxx_rxx_stats_ctl_s cn56xx;
1170 struct cvmx_gmxx_rxx_stats_ctl_s cn56xxp1;
1171 struct cvmx_gmxx_rxx_stats_ctl_s cn58xx;
1172 struct cvmx_gmxx_rxx_stats_ctl_s cn58xxp1;
1173};
1174
1175union cvmx_gmxx_rxx_stats_octs {
1176 uint64_t u64;
1177 struct cvmx_gmxx_rxx_stats_octs_s {
1178 uint64_t reserved_48_63:16;
1179 uint64_t cnt:48;
1180 } s;
1181 struct cvmx_gmxx_rxx_stats_octs_s cn30xx;
1182 struct cvmx_gmxx_rxx_stats_octs_s cn31xx;
1183 struct cvmx_gmxx_rxx_stats_octs_s cn38xx;
1184 struct cvmx_gmxx_rxx_stats_octs_s cn38xxp2;
1185 struct cvmx_gmxx_rxx_stats_octs_s cn50xx;
1186 struct cvmx_gmxx_rxx_stats_octs_s cn52xx;
1187 struct cvmx_gmxx_rxx_stats_octs_s cn52xxp1;
1188 struct cvmx_gmxx_rxx_stats_octs_s cn56xx;
1189 struct cvmx_gmxx_rxx_stats_octs_s cn56xxp1;
1190 struct cvmx_gmxx_rxx_stats_octs_s cn58xx;
1191 struct cvmx_gmxx_rxx_stats_octs_s cn58xxp1;
1192};
1193
1194union cvmx_gmxx_rxx_stats_octs_ctl {
1195 uint64_t u64;
1196 struct cvmx_gmxx_rxx_stats_octs_ctl_s {
1197 uint64_t reserved_48_63:16;
1198 uint64_t cnt:48;
1199 } s;
1200 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn30xx;
1201 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn31xx;
1202 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xx;
1203 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xxp2;
1204 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn50xx;
1205 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xx;
1206 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xxp1;
1207 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xx;
1208 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xxp1;
1209 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xx;
1210 struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xxp1;
1211};
1212
1213union cvmx_gmxx_rxx_stats_octs_dmac {
1214 uint64_t u64;
1215 struct cvmx_gmxx_rxx_stats_octs_dmac_s {
1216 uint64_t reserved_48_63:16;
1217 uint64_t cnt:48;
1218 } s;
1219 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn30xx;
1220 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn31xx;
1221 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xx;
1222 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xxp2;
1223 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn50xx;
1224 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xx;
1225 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xxp1;
1226 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xx;
1227 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xxp1;
1228 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xx;
1229 struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xxp1;
1230};
1231
1232union cvmx_gmxx_rxx_stats_octs_drp {
1233 uint64_t u64;
1234 struct cvmx_gmxx_rxx_stats_octs_drp_s {
1235 uint64_t reserved_48_63:16;
1236 uint64_t cnt:48;
1237 } s;
1238 struct cvmx_gmxx_rxx_stats_octs_drp_s cn30xx;
1239 struct cvmx_gmxx_rxx_stats_octs_drp_s cn31xx;
1240 struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xx;
1241 struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xxp2;
1242 struct cvmx_gmxx_rxx_stats_octs_drp_s cn50xx;
1243 struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xx;
1244 struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xxp1;
1245 struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xx;
1246 struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xxp1;
1247 struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xx;
1248 struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xxp1;
1249};
1250
1251union cvmx_gmxx_rxx_stats_pkts {
1252 uint64_t u64;
1253 struct cvmx_gmxx_rxx_stats_pkts_s {
1254 uint64_t reserved_32_63:32;
1255 uint64_t cnt:32;
1256 } s;
1257 struct cvmx_gmxx_rxx_stats_pkts_s cn30xx;
1258 struct cvmx_gmxx_rxx_stats_pkts_s cn31xx;
1259 struct cvmx_gmxx_rxx_stats_pkts_s cn38xx;
1260 struct cvmx_gmxx_rxx_stats_pkts_s cn38xxp2;
1261 struct cvmx_gmxx_rxx_stats_pkts_s cn50xx;
1262 struct cvmx_gmxx_rxx_stats_pkts_s cn52xx;
1263 struct cvmx_gmxx_rxx_stats_pkts_s cn52xxp1;
1264 struct cvmx_gmxx_rxx_stats_pkts_s cn56xx;
1265 struct cvmx_gmxx_rxx_stats_pkts_s cn56xxp1;
1266 struct cvmx_gmxx_rxx_stats_pkts_s cn58xx;
1267 struct cvmx_gmxx_rxx_stats_pkts_s cn58xxp1;
1268};
1269
1270union cvmx_gmxx_rxx_stats_pkts_bad {
1271 uint64_t u64;
1272 struct cvmx_gmxx_rxx_stats_pkts_bad_s {
1273 uint64_t reserved_32_63:32;
1274 uint64_t cnt:32;
1275 } s;
1276 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn30xx;
1277 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn31xx;
1278 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xx;
1279 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xxp2;
1280 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn50xx;
1281 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xx;
1282 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xxp1;
1283 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xx;
1284 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xxp1;
1285 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xx;
1286 struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xxp1;
1287};
1288
1289union cvmx_gmxx_rxx_stats_pkts_ctl {
1290 uint64_t u64;
1291 struct cvmx_gmxx_rxx_stats_pkts_ctl_s {
1292 uint64_t reserved_32_63:32;
1293 uint64_t cnt:32;
1294 } s;
1295 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn30xx;
1296 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn31xx;
1297 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xx;
1298 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xxp2;
1299 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn50xx;
1300 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xx;
1301 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xxp1;
1302 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xx;
1303 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xxp1;
1304 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xx;
1305 struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xxp1;
1306};
1307
1308union cvmx_gmxx_rxx_stats_pkts_dmac {
1309 uint64_t u64;
1310 struct cvmx_gmxx_rxx_stats_pkts_dmac_s {
1311 uint64_t reserved_32_63:32;
1312 uint64_t cnt:32;
1313 } s;
1314 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn30xx;
1315 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn31xx;
1316 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xx;
1317 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xxp2;
1318 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn50xx;
1319 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xx;
1320 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xxp1;
1321 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xx;
1322 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xxp1;
1323 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xx;
1324 struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xxp1;
1325};
1326
1327union cvmx_gmxx_rxx_stats_pkts_drp {
1328 uint64_t u64;
1329 struct cvmx_gmxx_rxx_stats_pkts_drp_s {
1330 uint64_t reserved_32_63:32;
1331 uint64_t cnt:32;
1332 } s;
1333 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn30xx;
1334 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn31xx;
1335 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xx;
1336 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xxp2;
1337 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn50xx;
1338 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xx;
1339 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xxp1;
1340 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xx;
1341 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xxp1;
1342 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xx;
1343 struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xxp1;
1344};
1345
1346union cvmx_gmxx_rxx_udd_skp {
1347 uint64_t u64;
1348 struct cvmx_gmxx_rxx_udd_skp_s {
1349 uint64_t reserved_9_63:55;
1350 uint64_t fcssel:1;
1351 uint64_t reserved_7_7:1;
1352 uint64_t len:7;
1353 } s;
1354 struct cvmx_gmxx_rxx_udd_skp_s cn30xx;
1355 struct cvmx_gmxx_rxx_udd_skp_s cn31xx;
1356 struct cvmx_gmxx_rxx_udd_skp_s cn38xx;
1357 struct cvmx_gmxx_rxx_udd_skp_s cn38xxp2;
1358 struct cvmx_gmxx_rxx_udd_skp_s cn50xx;
1359 struct cvmx_gmxx_rxx_udd_skp_s cn52xx;
1360 struct cvmx_gmxx_rxx_udd_skp_s cn52xxp1;
1361 struct cvmx_gmxx_rxx_udd_skp_s cn56xx;
1362 struct cvmx_gmxx_rxx_udd_skp_s cn56xxp1;
1363 struct cvmx_gmxx_rxx_udd_skp_s cn58xx;
1364 struct cvmx_gmxx_rxx_udd_skp_s cn58xxp1;
1365};
1366
1367union cvmx_gmxx_rx_bp_dropx {
1368 uint64_t u64;
1369 struct cvmx_gmxx_rx_bp_dropx_s {
1370 uint64_t reserved_6_63:58;
1371 uint64_t mark:6;
1372 } s;
1373 struct cvmx_gmxx_rx_bp_dropx_s cn30xx;
1374 struct cvmx_gmxx_rx_bp_dropx_s cn31xx;
1375 struct cvmx_gmxx_rx_bp_dropx_s cn38xx;
1376 struct cvmx_gmxx_rx_bp_dropx_s cn38xxp2;
1377 struct cvmx_gmxx_rx_bp_dropx_s cn50xx;
1378 struct cvmx_gmxx_rx_bp_dropx_s cn52xx;
1379 struct cvmx_gmxx_rx_bp_dropx_s cn52xxp1;
1380 struct cvmx_gmxx_rx_bp_dropx_s cn56xx;
1381 struct cvmx_gmxx_rx_bp_dropx_s cn56xxp1;
1382 struct cvmx_gmxx_rx_bp_dropx_s cn58xx;
1383 struct cvmx_gmxx_rx_bp_dropx_s cn58xxp1;
1384};
1385
1386union cvmx_gmxx_rx_bp_offx {
1387 uint64_t u64;
1388 struct cvmx_gmxx_rx_bp_offx_s {
1389 uint64_t reserved_6_63:58;
1390 uint64_t mark:6;
1391 } s;
1392 struct cvmx_gmxx_rx_bp_offx_s cn30xx;
1393 struct cvmx_gmxx_rx_bp_offx_s cn31xx;
1394 struct cvmx_gmxx_rx_bp_offx_s cn38xx;
1395 struct cvmx_gmxx_rx_bp_offx_s cn38xxp2;
1396 struct cvmx_gmxx_rx_bp_offx_s cn50xx;
1397 struct cvmx_gmxx_rx_bp_offx_s cn52xx;
1398 struct cvmx_gmxx_rx_bp_offx_s cn52xxp1;
1399 struct cvmx_gmxx_rx_bp_offx_s cn56xx;
1400 struct cvmx_gmxx_rx_bp_offx_s cn56xxp1;
1401 struct cvmx_gmxx_rx_bp_offx_s cn58xx;
1402 struct cvmx_gmxx_rx_bp_offx_s cn58xxp1;
1403};
1404
1405union cvmx_gmxx_rx_bp_onx {
1406 uint64_t u64;
1407 struct cvmx_gmxx_rx_bp_onx_s {
1408 uint64_t reserved_9_63:55;
1409 uint64_t mark:9;
1410 } s;
1411 struct cvmx_gmxx_rx_bp_onx_s cn30xx;
1412 struct cvmx_gmxx_rx_bp_onx_s cn31xx;
1413 struct cvmx_gmxx_rx_bp_onx_s cn38xx;
1414 struct cvmx_gmxx_rx_bp_onx_s cn38xxp2;
1415 struct cvmx_gmxx_rx_bp_onx_s cn50xx;
1416 struct cvmx_gmxx_rx_bp_onx_s cn52xx;
1417 struct cvmx_gmxx_rx_bp_onx_s cn52xxp1;
1418 struct cvmx_gmxx_rx_bp_onx_s cn56xx;
1419 struct cvmx_gmxx_rx_bp_onx_s cn56xxp1;
1420 struct cvmx_gmxx_rx_bp_onx_s cn58xx;
1421 struct cvmx_gmxx_rx_bp_onx_s cn58xxp1;
1422};
1423
1424union cvmx_gmxx_rx_hg2_status {
1425 uint64_t u64;
1426 struct cvmx_gmxx_rx_hg2_status_s {
1427 uint64_t reserved_48_63:16;
1428 uint64_t phtim2go:16;
1429 uint64_t xof:16;
1430 uint64_t lgtim2go:16;
1431 } s;
1432 struct cvmx_gmxx_rx_hg2_status_s cn52xx;
1433 struct cvmx_gmxx_rx_hg2_status_s cn52xxp1;
1434 struct cvmx_gmxx_rx_hg2_status_s cn56xx;
1435};
1436
1437union cvmx_gmxx_rx_pass_en {
1438 uint64_t u64;
1439 struct cvmx_gmxx_rx_pass_en_s {
1440 uint64_t reserved_16_63:48;
1441 uint64_t en:16;
1442 } s;
1443 struct cvmx_gmxx_rx_pass_en_s cn38xx;
1444 struct cvmx_gmxx_rx_pass_en_s cn38xxp2;
1445 struct cvmx_gmxx_rx_pass_en_s cn58xx;
1446 struct cvmx_gmxx_rx_pass_en_s cn58xxp1;
1447};
1448
1449union cvmx_gmxx_rx_pass_mapx {
1450 uint64_t u64;
1451 struct cvmx_gmxx_rx_pass_mapx_s {
1452 uint64_t reserved_4_63:60;
1453 uint64_t dprt:4;
1454 } s;
1455 struct cvmx_gmxx_rx_pass_mapx_s cn38xx;
1456 struct cvmx_gmxx_rx_pass_mapx_s cn38xxp2;
1457 struct cvmx_gmxx_rx_pass_mapx_s cn58xx;
1458 struct cvmx_gmxx_rx_pass_mapx_s cn58xxp1;
1459};
1460
1461union cvmx_gmxx_rx_prt_info {
1462 uint64_t u64;
1463 struct cvmx_gmxx_rx_prt_info_s {
1464 uint64_t reserved_32_63:32;
1465 uint64_t drop:16;
1466 uint64_t commit:16;
1467 } s;
1468 struct cvmx_gmxx_rx_prt_info_cn30xx {
1469 uint64_t reserved_19_63:45;
1470 uint64_t drop:3;
1471 uint64_t reserved_3_15:13;
1472 uint64_t commit:3;
1473 } cn30xx;
1474 struct cvmx_gmxx_rx_prt_info_cn30xx cn31xx;
1475 struct cvmx_gmxx_rx_prt_info_s cn38xx;
1476 struct cvmx_gmxx_rx_prt_info_cn30xx cn50xx;
1477 struct cvmx_gmxx_rx_prt_info_cn52xx {
1478 uint64_t reserved_20_63:44;
1479 uint64_t drop:4;
1480 uint64_t reserved_4_15:12;
1481 uint64_t commit:4;
1482 } cn52xx;
1483 struct cvmx_gmxx_rx_prt_info_cn52xx cn52xxp1;
1484 struct cvmx_gmxx_rx_prt_info_cn52xx cn56xx;
1485 struct cvmx_gmxx_rx_prt_info_cn52xx cn56xxp1;
1486 struct cvmx_gmxx_rx_prt_info_s cn58xx;
1487 struct cvmx_gmxx_rx_prt_info_s cn58xxp1;
1488};
1489
1490union cvmx_gmxx_rx_prts {
1491 uint64_t u64;
1492 struct cvmx_gmxx_rx_prts_s {
1493 uint64_t reserved_3_63:61;
1494 uint64_t prts:3;
1495 } s;
1496 struct cvmx_gmxx_rx_prts_s cn30xx;
1497 struct cvmx_gmxx_rx_prts_s cn31xx;
1498 struct cvmx_gmxx_rx_prts_s cn38xx;
1499 struct cvmx_gmxx_rx_prts_s cn38xxp2;
1500 struct cvmx_gmxx_rx_prts_s cn50xx;
1501 struct cvmx_gmxx_rx_prts_s cn52xx;
1502 struct cvmx_gmxx_rx_prts_s cn52xxp1;
1503 struct cvmx_gmxx_rx_prts_s cn56xx;
1504 struct cvmx_gmxx_rx_prts_s cn56xxp1;
1505 struct cvmx_gmxx_rx_prts_s cn58xx;
1506 struct cvmx_gmxx_rx_prts_s cn58xxp1;
1507};
1508
1509union cvmx_gmxx_rx_tx_status {
1510 uint64_t u64;
1511 struct cvmx_gmxx_rx_tx_status_s {
1512 uint64_t reserved_7_63:57;
1513 uint64_t tx:3;
1514 uint64_t reserved_3_3:1;
1515 uint64_t rx:3;
1516 } s;
1517 struct cvmx_gmxx_rx_tx_status_s cn30xx;
1518 struct cvmx_gmxx_rx_tx_status_s cn31xx;
1519 struct cvmx_gmxx_rx_tx_status_s cn50xx;
1520};
1521
1522union cvmx_gmxx_rx_xaui_bad_col {
1523 uint64_t u64;
1524 struct cvmx_gmxx_rx_xaui_bad_col_s {
1525 uint64_t reserved_40_63:24;
1526 uint64_t val:1;
1527 uint64_t state:3;
1528 uint64_t lane_rxc:4;
1529 uint64_t lane_rxd:32;
1530 } s;
1531 struct cvmx_gmxx_rx_xaui_bad_col_s cn52xx;
1532 struct cvmx_gmxx_rx_xaui_bad_col_s cn52xxp1;
1533 struct cvmx_gmxx_rx_xaui_bad_col_s cn56xx;
1534 struct cvmx_gmxx_rx_xaui_bad_col_s cn56xxp1;
1535};
1536
1537union cvmx_gmxx_rx_xaui_ctl {
1538 uint64_t u64;
1539 struct cvmx_gmxx_rx_xaui_ctl_s {
1540 uint64_t reserved_2_63:62;
1541 uint64_t status:2;
1542 } s;
1543 struct cvmx_gmxx_rx_xaui_ctl_s cn52xx;
1544 struct cvmx_gmxx_rx_xaui_ctl_s cn52xxp1;
1545 struct cvmx_gmxx_rx_xaui_ctl_s cn56xx;
1546 struct cvmx_gmxx_rx_xaui_ctl_s cn56xxp1;
1547};
1548
1549union cvmx_gmxx_smacx {
1550 uint64_t u64;
1551 struct cvmx_gmxx_smacx_s {
1552 uint64_t reserved_48_63:16;
1553 uint64_t smac:48;
1554 } s;
1555 struct cvmx_gmxx_smacx_s cn30xx;
1556 struct cvmx_gmxx_smacx_s cn31xx;
1557 struct cvmx_gmxx_smacx_s cn38xx;
1558 struct cvmx_gmxx_smacx_s cn38xxp2;
1559 struct cvmx_gmxx_smacx_s cn50xx;
1560 struct cvmx_gmxx_smacx_s cn52xx;
1561 struct cvmx_gmxx_smacx_s cn52xxp1;
1562 struct cvmx_gmxx_smacx_s cn56xx;
1563 struct cvmx_gmxx_smacx_s cn56xxp1;
1564 struct cvmx_gmxx_smacx_s cn58xx;
1565 struct cvmx_gmxx_smacx_s cn58xxp1;
1566};
1567
1568union cvmx_gmxx_stat_bp {
1569 uint64_t u64;
1570 struct cvmx_gmxx_stat_bp_s {
1571 uint64_t reserved_17_63:47;
1572 uint64_t bp:1;
1573 uint64_t cnt:16;
1574 } s;
1575 struct cvmx_gmxx_stat_bp_s cn30xx;
1576 struct cvmx_gmxx_stat_bp_s cn31xx;
1577 struct cvmx_gmxx_stat_bp_s cn38xx;
1578 struct cvmx_gmxx_stat_bp_s cn38xxp2;
1579 struct cvmx_gmxx_stat_bp_s cn50xx;
1580 struct cvmx_gmxx_stat_bp_s cn52xx;
1581 struct cvmx_gmxx_stat_bp_s cn52xxp1;
1582 struct cvmx_gmxx_stat_bp_s cn56xx;
1583 struct cvmx_gmxx_stat_bp_s cn56xxp1;
1584 struct cvmx_gmxx_stat_bp_s cn58xx;
1585 struct cvmx_gmxx_stat_bp_s cn58xxp1;
1586};
1587
1588union cvmx_gmxx_txx_append {
1589 uint64_t u64;
1590 struct cvmx_gmxx_txx_append_s {
1591 uint64_t reserved_4_63:60;
1592 uint64_t force_fcs:1;
1593 uint64_t fcs:1;
1594 uint64_t pad:1;
1595 uint64_t preamble:1;
1596 } s;
1597 struct cvmx_gmxx_txx_append_s cn30xx;
1598 struct cvmx_gmxx_txx_append_s cn31xx;
1599 struct cvmx_gmxx_txx_append_s cn38xx;
1600 struct cvmx_gmxx_txx_append_s cn38xxp2;
1601 struct cvmx_gmxx_txx_append_s cn50xx;
1602 struct cvmx_gmxx_txx_append_s cn52xx;
1603 struct cvmx_gmxx_txx_append_s cn52xxp1;
1604 struct cvmx_gmxx_txx_append_s cn56xx;
1605 struct cvmx_gmxx_txx_append_s cn56xxp1;
1606 struct cvmx_gmxx_txx_append_s cn58xx;
1607 struct cvmx_gmxx_txx_append_s cn58xxp1;
1608};
1609
1610union cvmx_gmxx_txx_burst {
1611 uint64_t u64;
1612 struct cvmx_gmxx_txx_burst_s {
1613 uint64_t reserved_16_63:48;
1614 uint64_t burst:16;
1615 } s;
1616 struct cvmx_gmxx_txx_burst_s cn30xx;
1617 struct cvmx_gmxx_txx_burst_s cn31xx;
1618 struct cvmx_gmxx_txx_burst_s cn38xx;
1619 struct cvmx_gmxx_txx_burst_s cn38xxp2;
1620 struct cvmx_gmxx_txx_burst_s cn50xx;
1621 struct cvmx_gmxx_txx_burst_s cn52xx;
1622 struct cvmx_gmxx_txx_burst_s cn52xxp1;
1623 struct cvmx_gmxx_txx_burst_s cn56xx;
1624 struct cvmx_gmxx_txx_burst_s cn56xxp1;
1625 struct cvmx_gmxx_txx_burst_s cn58xx;
1626 struct cvmx_gmxx_txx_burst_s cn58xxp1;
1627};
1628
1629union cvmx_gmxx_txx_cbfc_xoff {
1630 uint64_t u64;
1631 struct cvmx_gmxx_txx_cbfc_xoff_s {
1632 uint64_t reserved_16_63:48;
1633 uint64_t xoff:16;
1634 } s;
1635 struct cvmx_gmxx_txx_cbfc_xoff_s cn52xx;
1636 struct cvmx_gmxx_txx_cbfc_xoff_s cn56xx;
1637};
1638
1639union cvmx_gmxx_txx_cbfc_xon {
1640 uint64_t u64;
1641 struct cvmx_gmxx_txx_cbfc_xon_s {
1642 uint64_t reserved_16_63:48;
1643 uint64_t xon:16;
1644 } s;
1645 struct cvmx_gmxx_txx_cbfc_xon_s cn52xx;
1646 struct cvmx_gmxx_txx_cbfc_xon_s cn56xx;
1647};
1648
1649union cvmx_gmxx_txx_clk {
1650 uint64_t u64;
1651 struct cvmx_gmxx_txx_clk_s {
1652 uint64_t reserved_6_63:58;
1653 uint64_t clk_cnt:6;
1654 } s;
1655 struct cvmx_gmxx_txx_clk_s cn30xx;
1656 struct cvmx_gmxx_txx_clk_s cn31xx;
1657 struct cvmx_gmxx_txx_clk_s cn38xx;
1658 struct cvmx_gmxx_txx_clk_s cn38xxp2;
1659 struct cvmx_gmxx_txx_clk_s cn50xx;
1660 struct cvmx_gmxx_txx_clk_s cn58xx;
1661 struct cvmx_gmxx_txx_clk_s cn58xxp1;
1662};
1663
1664union cvmx_gmxx_txx_ctl {
1665 uint64_t u64;
1666 struct cvmx_gmxx_txx_ctl_s {
1667 uint64_t reserved_2_63:62;
1668 uint64_t xsdef_en:1;
1669 uint64_t xscol_en:1;
1670 } s;
1671 struct cvmx_gmxx_txx_ctl_s cn30xx;
1672 struct cvmx_gmxx_txx_ctl_s cn31xx;
1673 struct cvmx_gmxx_txx_ctl_s cn38xx;
1674 struct cvmx_gmxx_txx_ctl_s cn38xxp2;
1675 struct cvmx_gmxx_txx_ctl_s cn50xx;
1676 struct cvmx_gmxx_txx_ctl_s cn52xx;
1677 struct cvmx_gmxx_txx_ctl_s cn52xxp1;
1678 struct cvmx_gmxx_txx_ctl_s cn56xx;
1679 struct cvmx_gmxx_txx_ctl_s cn56xxp1;
1680 struct cvmx_gmxx_txx_ctl_s cn58xx;
1681 struct cvmx_gmxx_txx_ctl_s cn58xxp1;
1682};
1683
1684union cvmx_gmxx_txx_min_pkt {
1685 uint64_t u64;
1686 struct cvmx_gmxx_txx_min_pkt_s {
1687 uint64_t reserved_8_63:56;
1688 uint64_t min_size:8;
1689 } s;
1690 struct cvmx_gmxx_txx_min_pkt_s cn30xx;
1691 struct cvmx_gmxx_txx_min_pkt_s cn31xx;
1692 struct cvmx_gmxx_txx_min_pkt_s cn38xx;
1693 struct cvmx_gmxx_txx_min_pkt_s cn38xxp2;
1694 struct cvmx_gmxx_txx_min_pkt_s cn50xx;
1695 struct cvmx_gmxx_txx_min_pkt_s cn52xx;
1696 struct cvmx_gmxx_txx_min_pkt_s cn52xxp1;
1697 struct cvmx_gmxx_txx_min_pkt_s cn56xx;
1698 struct cvmx_gmxx_txx_min_pkt_s cn56xxp1;
1699 struct cvmx_gmxx_txx_min_pkt_s cn58xx;
1700 struct cvmx_gmxx_txx_min_pkt_s cn58xxp1;
1701};
1702
1703union cvmx_gmxx_txx_pause_pkt_interval {
1704 uint64_t u64;
1705 struct cvmx_gmxx_txx_pause_pkt_interval_s {
1706 uint64_t reserved_16_63:48;
1707 uint64_t interval:16;
1708 } s;
1709 struct cvmx_gmxx_txx_pause_pkt_interval_s cn30xx;
1710 struct cvmx_gmxx_txx_pause_pkt_interval_s cn31xx;
1711 struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xx;
1712 struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xxp2;
1713 struct cvmx_gmxx_txx_pause_pkt_interval_s cn50xx;
1714 struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xx;
1715 struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xxp1;
1716 struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xx;
1717 struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xxp1;
1718 struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xx;
1719 struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xxp1;
1720};
1721
1722union cvmx_gmxx_txx_pause_pkt_time {
1723 uint64_t u64;
1724 struct cvmx_gmxx_txx_pause_pkt_time_s {
1725 uint64_t reserved_16_63:48;
1726 uint64_t time:16;
1727 } s;
1728 struct cvmx_gmxx_txx_pause_pkt_time_s cn30xx;
1729 struct cvmx_gmxx_txx_pause_pkt_time_s cn31xx;
1730 struct cvmx_gmxx_txx_pause_pkt_time_s cn38xx;
1731 struct cvmx_gmxx_txx_pause_pkt_time_s cn38xxp2;
1732 struct cvmx_gmxx_txx_pause_pkt_time_s cn50xx;
1733 struct cvmx_gmxx_txx_pause_pkt_time_s cn52xx;
1734 struct cvmx_gmxx_txx_pause_pkt_time_s cn52xxp1;
1735 struct cvmx_gmxx_txx_pause_pkt_time_s cn56xx;
1736 struct cvmx_gmxx_txx_pause_pkt_time_s cn56xxp1;
1737 struct cvmx_gmxx_txx_pause_pkt_time_s cn58xx;
1738 struct cvmx_gmxx_txx_pause_pkt_time_s cn58xxp1;
1739};
1740
1741union cvmx_gmxx_txx_pause_togo {
1742 uint64_t u64;
1743 struct cvmx_gmxx_txx_pause_togo_s {
1744 uint64_t reserved_32_63:32;
1745 uint64_t msg_time:16;
1746 uint64_t time:16;
1747 } s;
1748 struct cvmx_gmxx_txx_pause_togo_cn30xx {
1749 uint64_t reserved_16_63:48;
1750 uint64_t time:16;
1751 } cn30xx;
1752 struct cvmx_gmxx_txx_pause_togo_cn30xx cn31xx;
1753 struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xx;
1754 struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xxp2;
1755 struct cvmx_gmxx_txx_pause_togo_cn30xx cn50xx;
1756 struct cvmx_gmxx_txx_pause_togo_s cn52xx;
1757 struct cvmx_gmxx_txx_pause_togo_s cn52xxp1;
1758 struct cvmx_gmxx_txx_pause_togo_s cn56xx;
1759 struct cvmx_gmxx_txx_pause_togo_cn30xx cn56xxp1;
1760 struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xx;
1761 struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xxp1;
1762};
1763
1764union cvmx_gmxx_txx_pause_zero {
1765 uint64_t u64;
1766 struct cvmx_gmxx_txx_pause_zero_s {
1767 uint64_t reserved_1_63:63;
1768 uint64_t send:1;
1769 } s;
1770 struct cvmx_gmxx_txx_pause_zero_s cn30xx;
1771 struct cvmx_gmxx_txx_pause_zero_s cn31xx;
1772 struct cvmx_gmxx_txx_pause_zero_s cn38xx;
1773 struct cvmx_gmxx_txx_pause_zero_s cn38xxp2;
1774 struct cvmx_gmxx_txx_pause_zero_s cn50xx;
1775 struct cvmx_gmxx_txx_pause_zero_s cn52xx;
1776 struct cvmx_gmxx_txx_pause_zero_s cn52xxp1;
1777 struct cvmx_gmxx_txx_pause_zero_s cn56xx;
1778 struct cvmx_gmxx_txx_pause_zero_s cn56xxp1;
1779 struct cvmx_gmxx_txx_pause_zero_s cn58xx;
1780 struct cvmx_gmxx_txx_pause_zero_s cn58xxp1;
1781};
1782
1783union cvmx_gmxx_txx_sgmii_ctl {
1784 uint64_t u64;
1785 struct cvmx_gmxx_txx_sgmii_ctl_s {
1786 uint64_t reserved_1_63:63;
1787 uint64_t align:1;
1788 } s;
1789 struct cvmx_gmxx_txx_sgmii_ctl_s cn52xx;
1790 struct cvmx_gmxx_txx_sgmii_ctl_s cn52xxp1;
1791 struct cvmx_gmxx_txx_sgmii_ctl_s cn56xx;
1792 struct cvmx_gmxx_txx_sgmii_ctl_s cn56xxp1;
1793};
1794
1795union cvmx_gmxx_txx_slot {
1796 uint64_t u64;
1797 struct cvmx_gmxx_txx_slot_s {
1798 uint64_t reserved_10_63:54;
1799 uint64_t slot:10;
1800 } s;
1801 struct cvmx_gmxx_txx_slot_s cn30xx;
1802 struct cvmx_gmxx_txx_slot_s cn31xx;
1803 struct cvmx_gmxx_txx_slot_s cn38xx;
1804 struct cvmx_gmxx_txx_slot_s cn38xxp2;
1805 struct cvmx_gmxx_txx_slot_s cn50xx;
1806 struct cvmx_gmxx_txx_slot_s cn52xx;
1807 struct cvmx_gmxx_txx_slot_s cn52xxp1;
1808 struct cvmx_gmxx_txx_slot_s cn56xx;
1809 struct cvmx_gmxx_txx_slot_s cn56xxp1;
1810 struct cvmx_gmxx_txx_slot_s cn58xx;
1811 struct cvmx_gmxx_txx_slot_s cn58xxp1;
1812};
1813
1814union cvmx_gmxx_txx_soft_pause {
1815 uint64_t u64;
1816 struct cvmx_gmxx_txx_soft_pause_s {
1817 uint64_t reserved_16_63:48;
1818 uint64_t time:16;
1819 } s;
1820 struct cvmx_gmxx_txx_soft_pause_s cn30xx;
1821 struct cvmx_gmxx_txx_soft_pause_s cn31xx;
1822 struct cvmx_gmxx_txx_soft_pause_s cn38xx;
1823 struct cvmx_gmxx_txx_soft_pause_s cn38xxp2;
1824 struct cvmx_gmxx_txx_soft_pause_s cn50xx;
1825 struct cvmx_gmxx_txx_soft_pause_s cn52xx;
1826 struct cvmx_gmxx_txx_soft_pause_s cn52xxp1;
1827 struct cvmx_gmxx_txx_soft_pause_s cn56xx;
1828 struct cvmx_gmxx_txx_soft_pause_s cn56xxp1;
1829 struct cvmx_gmxx_txx_soft_pause_s cn58xx;
1830 struct cvmx_gmxx_txx_soft_pause_s cn58xxp1;
1831};
1832
1833union cvmx_gmxx_txx_stat0 {
1834 uint64_t u64;
1835 struct cvmx_gmxx_txx_stat0_s {
1836 uint64_t xsdef:32;
1837 uint64_t xscol:32;
1838 } s;
1839 struct cvmx_gmxx_txx_stat0_s cn30xx;
1840 struct cvmx_gmxx_txx_stat0_s cn31xx;
1841 struct cvmx_gmxx_txx_stat0_s cn38xx;
1842 struct cvmx_gmxx_txx_stat0_s cn38xxp2;
1843 struct cvmx_gmxx_txx_stat0_s cn50xx;
1844 struct cvmx_gmxx_txx_stat0_s cn52xx;
1845 struct cvmx_gmxx_txx_stat0_s cn52xxp1;
1846 struct cvmx_gmxx_txx_stat0_s cn56xx;
1847 struct cvmx_gmxx_txx_stat0_s cn56xxp1;
1848 struct cvmx_gmxx_txx_stat0_s cn58xx;
1849 struct cvmx_gmxx_txx_stat0_s cn58xxp1;
1850};
1851
1852union cvmx_gmxx_txx_stat1 {
1853 uint64_t u64;
1854 struct cvmx_gmxx_txx_stat1_s {
1855 uint64_t scol:32;
1856 uint64_t mcol:32;
1857 } s;
1858 struct cvmx_gmxx_txx_stat1_s cn30xx;
1859 struct cvmx_gmxx_txx_stat1_s cn31xx;
1860 struct cvmx_gmxx_txx_stat1_s cn38xx;
1861 struct cvmx_gmxx_txx_stat1_s cn38xxp2;
1862 struct cvmx_gmxx_txx_stat1_s cn50xx;
1863 struct cvmx_gmxx_txx_stat1_s cn52xx;
1864 struct cvmx_gmxx_txx_stat1_s cn52xxp1;
1865 struct cvmx_gmxx_txx_stat1_s cn56xx;
1866 struct cvmx_gmxx_txx_stat1_s cn56xxp1;
1867 struct cvmx_gmxx_txx_stat1_s cn58xx;
1868 struct cvmx_gmxx_txx_stat1_s cn58xxp1;
1869};
1870
1871union cvmx_gmxx_txx_stat2 {
1872 uint64_t u64;
1873 struct cvmx_gmxx_txx_stat2_s {
1874 uint64_t reserved_48_63:16;
1875 uint64_t octs:48;
1876 } s;
1877 struct cvmx_gmxx_txx_stat2_s cn30xx;
1878 struct cvmx_gmxx_txx_stat2_s cn31xx;
1879 struct cvmx_gmxx_txx_stat2_s cn38xx;
1880 struct cvmx_gmxx_txx_stat2_s cn38xxp2;
1881 struct cvmx_gmxx_txx_stat2_s cn50xx;
1882 struct cvmx_gmxx_txx_stat2_s cn52xx;
1883 struct cvmx_gmxx_txx_stat2_s cn52xxp1;
1884 struct cvmx_gmxx_txx_stat2_s cn56xx;
1885 struct cvmx_gmxx_txx_stat2_s cn56xxp1;
1886 struct cvmx_gmxx_txx_stat2_s cn58xx;
1887 struct cvmx_gmxx_txx_stat2_s cn58xxp1;
1888};
1889
1890union cvmx_gmxx_txx_stat3 {
1891 uint64_t u64;
1892 struct cvmx_gmxx_txx_stat3_s {
1893 uint64_t reserved_32_63:32;
1894 uint64_t pkts:32;
1895 } s;
1896 struct cvmx_gmxx_txx_stat3_s cn30xx;
1897 struct cvmx_gmxx_txx_stat3_s cn31xx;
1898 struct cvmx_gmxx_txx_stat3_s cn38xx;
1899 struct cvmx_gmxx_txx_stat3_s cn38xxp2;
1900 struct cvmx_gmxx_txx_stat3_s cn50xx;
1901 struct cvmx_gmxx_txx_stat3_s cn52xx;
1902 struct cvmx_gmxx_txx_stat3_s cn52xxp1;
1903 struct cvmx_gmxx_txx_stat3_s cn56xx;
1904 struct cvmx_gmxx_txx_stat3_s cn56xxp1;
1905 struct cvmx_gmxx_txx_stat3_s cn58xx;
1906 struct cvmx_gmxx_txx_stat3_s cn58xxp1;
1907};
1908
1909union cvmx_gmxx_txx_stat4 {
1910 uint64_t u64;
1911 struct cvmx_gmxx_txx_stat4_s {
1912 uint64_t hist1:32;
1913 uint64_t hist0:32;
1914 } s;
1915 struct cvmx_gmxx_txx_stat4_s cn30xx;
1916 struct cvmx_gmxx_txx_stat4_s cn31xx;
1917 struct cvmx_gmxx_txx_stat4_s cn38xx;
1918 struct cvmx_gmxx_txx_stat4_s cn38xxp2;
1919 struct cvmx_gmxx_txx_stat4_s cn50xx;
1920 struct cvmx_gmxx_txx_stat4_s cn52xx;
1921 struct cvmx_gmxx_txx_stat4_s cn52xxp1;
1922 struct cvmx_gmxx_txx_stat4_s cn56xx;
1923 struct cvmx_gmxx_txx_stat4_s cn56xxp1;
1924 struct cvmx_gmxx_txx_stat4_s cn58xx;
1925 struct cvmx_gmxx_txx_stat4_s cn58xxp1;
1926};
1927
1928union cvmx_gmxx_txx_stat5 {
1929 uint64_t u64;
1930 struct cvmx_gmxx_txx_stat5_s {
1931 uint64_t hist3:32;
1932 uint64_t hist2:32;
1933 } s;
1934 struct cvmx_gmxx_txx_stat5_s cn30xx;
1935 struct cvmx_gmxx_txx_stat5_s cn31xx;
1936 struct cvmx_gmxx_txx_stat5_s cn38xx;
1937 struct cvmx_gmxx_txx_stat5_s cn38xxp2;
1938 struct cvmx_gmxx_txx_stat5_s cn50xx;
1939 struct cvmx_gmxx_txx_stat5_s cn52xx;
1940 struct cvmx_gmxx_txx_stat5_s cn52xxp1;
1941 struct cvmx_gmxx_txx_stat5_s cn56xx;
1942 struct cvmx_gmxx_txx_stat5_s cn56xxp1;
1943 struct cvmx_gmxx_txx_stat5_s cn58xx;
1944 struct cvmx_gmxx_txx_stat5_s cn58xxp1;
1945};
1946
1947union cvmx_gmxx_txx_stat6 {
1948 uint64_t u64;
1949 struct cvmx_gmxx_txx_stat6_s {
1950 uint64_t hist5:32;
1951 uint64_t hist4:32;
1952 } s;
1953 struct cvmx_gmxx_txx_stat6_s cn30xx;
1954 struct cvmx_gmxx_txx_stat6_s cn31xx;
1955 struct cvmx_gmxx_txx_stat6_s cn38xx;
1956 struct cvmx_gmxx_txx_stat6_s cn38xxp2;
1957 struct cvmx_gmxx_txx_stat6_s cn50xx;
1958 struct cvmx_gmxx_txx_stat6_s cn52xx;
1959 struct cvmx_gmxx_txx_stat6_s cn52xxp1;
1960 struct cvmx_gmxx_txx_stat6_s cn56xx;
1961 struct cvmx_gmxx_txx_stat6_s cn56xxp1;
1962 struct cvmx_gmxx_txx_stat6_s cn58xx;
1963 struct cvmx_gmxx_txx_stat6_s cn58xxp1;
1964};
1965
1966union cvmx_gmxx_txx_stat7 {
1967 uint64_t u64;
1968 struct cvmx_gmxx_txx_stat7_s {
1969 uint64_t hist7:32;
1970 uint64_t hist6:32;
1971 } s;
1972 struct cvmx_gmxx_txx_stat7_s cn30xx;
1973 struct cvmx_gmxx_txx_stat7_s cn31xx;
1974 struct cvmx_gmxx_txx_stat7_s cn38xx;
1975 struct cvmx_gmxx_txx_stat7_s cn38xxp2;
1976 struct cvmx_gmxx_txx_stat7_s cn50xx;
1977 struct cvmx_gmxx_txx_stat7_s cn52xx;
1978 struct cvmx_gmxx_txx_stat7_s cn52xxp1;
1979 struct cvmx_gmxx_txx_stat7_s cn56xx;
1980 struct cvmx_gmxx_txx_stat7_s cn56xxp1;
1981 struct cvmx_gmxx_txx_stat7_s cn58xx;
1982 struct cvmx_gmxx_txx_stat7_s cn58xxp1;
1983};
1984
1985union cvmx_gmxx_txx_stat8 {
1986 uint64_t u64;
1987 struct cvmx_gmxx_txx_stat8_s {
1988 uint64_t mcst:32;
1989 uint64_t bcst:32;
1990 } s;
1991 struct cvmx_gmxx_txx_stat8_s cn30xx;
1992 struct cvmx_gmxx_txx_stat8_s cn31xx;
1993 struct cvmx_gmxx_txx_stat8_s cn38xx;
1994 struct cvmx_gmxx_txx_stat8_s cn38xxp2;
1995 struct cvmx_gmxx_txx_stat8_s cn50xx;
1996 struct cvmx_gmxx_txx_stat8_s cn52xx;
1997 struct cvmx_gmxx_txx_stat8_s cn52xxp1;
1998 struct cvmx_gmxx_txx_stat8_s cn56xx;
1999 struct cvmx_gmxx_txx_stat8_s cn56xxp1;
2000 struct cvmx_gmxx_txx_stat8_s cn58xx;
2001 struct cvmx_gmxx_txx_stat8_s cn58xxp1;
2002};
2003
2004union cvmx_gmxx_txx_stat9 {
2005 uint64_t u64;
2006 struct cvmx_gmxx_txx_stat9_s {
2007 uint64_t undflw:32;
2008 uint64_t ctl:32;
2009 } s;
2010 struct cvmx_gmxx_txx_stat9_s cn30xx;
2011 struct cvmx_gmxx_txx_stat9_s cn31xx;
2012 struct cvmx_gmxx_txx_stat9_s cn38xx;
2013 struct cvmx_gmxx_txx_stat9_s cn38xxp2;
2014 struct cvmx_gmxx_txx_stat9_s cn50xx;
2015 struct cvmx_gmxx_txx_stat9_s cn52xx;
2016 struct cvmx_gmxx_txx_stat9_s cn52xxp1;
2017 struct cvmx_gmxx_txx_stat9_s cn56xx;
2018 struct cvmx_gmxx_txx_stat9_s cn56xxp1;
2019 struct cvmx_gmxx_txx_stat9_s cn58xx;
2020 struct cvmx_gmxx_txx_stat9_s cn58xxp1;
2021};
2022
2023union cvmx_gmxx_txx_stats_ctl {
2024 uint64_t u64;
2025 struct cvmx_gmxx_txx_stats_ctl_s {
2026 uint64_t reserved_1_63:63;
2027 uint64_t rd_clr:1;
2028 } s;
2029 struct cvmx_gmxx_txx_stats_ctl_s cn30xx;
2030 struct cvmx_gmxx_txx_stats_ctl_s cn31xx;
2031 struct cvmx_gmxx_txx_stats_ctl_s cn38xx;
2032 struct cvmx_gmxx_txx_stats_ctl_s cn38xxp2;
2033 struct cvmx_gmxx_txx_stats_ctl_s cn50xx;
2034 struct cvmx_gmxx_txx_stats_ctl_s cn52xx;
2035 struct cvmx_gmxx_txx_stats_ctl_s cn52xxp1;
2036 struct cvmx_gmxx_txx_stats_ctl_s cn56xx;
2037 struct cvmx_gmxx_txx_stats_ctl_s cn56xxp1;
2038 struct cvmx_gmxx_txx_stats_ctl_s cn58xx;
2039 struct cvmx_gmxx_txx_stats_ctl_s cn58xxp1;
2040};
2041
2042union cvmx_gmxx_txx_thresh {
2043 uint64_t u64;
2044 struct cvmx_gmxx_txx_thresh_s {
2045 uint64_t reserved_9_63:55;
2046 uint64_t cnt:9;
2047 } s;
2048 struct cvmx_gmxx_txx_thresh_cn30xx {
2049 uint64_t reserved_7_63:57;
2050 uint64_t cnt:7;
2051 } cn30xx;
2052 struct cvmx_gmxx_txx_thresh_cn30xx cn31xx;
2053 struct cvmx_gmxx_txx_thresh_s cn38xx;
2054 struct cvmx_gmxx_txx_thresh_s cn38xxp2;
2055 struct cvmx_gmxx_txx_thresh_cn30xx cn50xx;
2056 struct cvmx_gmxx_txx_thresh_s cn52xx;
2057 struct cvmx_gmxx_txx_thresh_s cn52xxp1;
2058 struct cvmx_gmxx_txx_thresh_s cn56xx;
2059 struct cvmx_gmxx_txx_thresh_s cn56xxp1;
2060 struct cvmx_gmxx_txx_thresh_s cn58xx;
2061 struct cvmx_gmxx_txx_thresh_s cn58xxp1;
2062};
2063
2064union cvmx_gmxx_tx_bp {
2065 uint64_t u64;
2066 struct cvmx_gmxx_tx_bp_s {
2067 uint64_t reserved_4_63:60;
2068 uint64_t bp:4;
2069 } s;
2070 struct cvmx_gmxx_tx_bp_cn30xx {
2071 uint64_t reserved_3_63:61;
2072 uint64_t bp:3;
2073 } cn30xx;
2074 struct cvmx_gmxx_tx_bp_cn30xx cn31xx;
2075 struct cvmx_gmxx_tx_bp_s cn38xx;
2076 struct cvmx_gmxx_tx_bp_s cn38xxp2;
2077 struct cvmx_gmxx_tx_bp_cn30xx cn50xx;
2078 struct cvmx_gmxx_tx_bp_s cn52xx;
2079 struct cvmx_gmxx_tx_bp_s cn52xxp1;
2080 struct cvmx_gmxx_tx_bp_s cn56xx;
2081 struct cvmx_gmxx_tx_bp_s cn56xxp1;
2082 struct cvmx_gmxx_tx_bp_s cn58xx;
2083 struct cvmx_gmxx_tx_bp_s cn58xxp1;
2084};
2085
2086union cvmx_gmxx_tx_clk_mskx {
2087 uint64_t u64;
2088 struct cvmx_gmxx_tx_clk_mskx_s {
2089 uint64_t reserved_1_63:63;
2090 uint64_t msk:1;
2091 } s;
2092 struct cvmx_gmxx_tx_clk_mskx_s cn30xx;
2093 struct cvmx_gmxx_tx_clk_mskx_s cn50xx;
2094};
2095
2096union cvmx_gmxx_tx_col_attempt {
2097 uint64_t u64;
2098 struct cvmx_gmxx_tx_col_attempt_s {
2099 uint64_t reserved_5_63:59;
2100 uint64_t limit:5;
2101 } s;
2102 struct cvmx_gmxx_tx_col_attempt_s cn30xx;
2103 struct cvmx_gmxx_tx_col_attempt_s cn31xx;
2104 struct cvmx_gmxx_tx_col_attempt_s cn38xx;
2105 struct cvmx_gmxx_tx_col_attempt_s cn38xxp2;
2106 struct cvmx_gmxx_tx_col_attempt_s cn50xx;
2107 struct cvmx_gmxx_tx_col_attempt_s cn52xx;
2108 struct cvmx_gmxx_tx_col_attempt_s cn52xxp1;
2109 struct cvmx_gmxx_tx_col_attempt_s cn56xx;
2110 struct cvmx_gmxx_tx_col_attempt_s cn56xxp1;
2111 struct cvmx_gmxx_tx_col_attempt_s cn58xx;
2112 struct cvmx_gmxx_tx_col_attempt_s cn58xxp1;
2113};
2114
2115union cvmx_gmxx_tx_corrupt {
2116 uint64_t u64;
2117 struct cvmx_gmxx_tx_corrupt_s {
2118 uint64_t reserved_4_63:60;
2119 uint64_t corrupt:4;
2120 } s;
2121 struct cvmx_gmxx_tx_corrupt_cn30xx {
2122 uint64_t reserved_3_63:61;
2123 uint64_t corrupt:3;
2124 } cn30xx;
2125 struct cvmx_gmxx_tx_corrupt_cn30xx cn31xx;
2126 struct cvmx_gmxx_tx_corrupt_s cn38xx;
2127 struct cvmx_gmxx_tx_corrupt_s cn38xxp2;
2128 struct cvmx_gmxx_tx_corrupt_cn30xx cn50xx;
2129 struct cvmx_gmxx_tx_corrupt_s cn52xx;
2130 struct cvmx_gmxx_tx_corrupt_s cn52xxp1;
2131 struct cvmx_gmxx_tx_corrupt_s cn56xx;
2132 struct cvmx_gmxx_tx_corrupt_s cn56xxp1;
2133 struct cvmx_gmxx_tx_corrupt_s cn58xx;
2134 struct cvmx_gmxx_tx_corrupt_s cn58xxp1;
2135};
2136
2137union cvmx_gmxx_tx_hg2_reg1 {
2138 uint64_t u64;
2139 struct cvmx_gmxx_tx_hg2_reg1_s {
2140 uint64_t reserved_16_63:48;
2141 uint64_t tx_xof:16;
2142 } s;
2143 struct cvmx_gmxx_tx_hg2_reg1_s cn52xx;
2144 struct cvmx_gmxx_tx_hg2_reg1_s cn52xxp1;
2145 struct cvmx_gmxx_tx_hg2_reg1_s cn56xx;
2146};
2147
2148union cvmx_gmxx_tx_hg2_reg2 {
2149 uint64_t u64;
2150 struct cvmx_gmxx_tx_hg2_reg2_s {
2151 uint64_t reserved_16_63:48;
2152 uint64_t tx_xon:16;
2153 } s;
2154 struct cvmx_gmxx_tx_hg2_reg2_s cn52xx;
2155 struct cvmx_gmxx_tx_hg2_reg2_s cn52xxp1;
2156 struct cvmx_gmxx_tx_hg2_reg2_s cn56xx;
2157};
2158
2159union cvmx_gmxx_tx_ifg {
2160 uint64_t u64;
2161 struct cvmx_gmxx_tx_ifg_s {
2162 uint64_t reserved_8_63:56;
2163 uint64_t ifg2:4;
2164 uint64_t ifg1:4;
2165 } s;
2166 struct cvmx_gmxx_tx_ifg_s cn30xx;
2167 struct cvmx_gmxx_tx_ifg_s cn31xx;
2168 struct cvmx_gmxx_tx_ifg_s cn38xx;
2169 struct cvmx_gmxx_tx_ifg_s cn38xxp2;
2170 struct cvmx_gmxx_tx_ifg_s cn50xx;
2171 struct cvmx_gmxx_tx_ifg_s cn52xx;
2172 struct cvmx_gmxx_tx_ifg_s cn52xxp1;
2173 struct cvmx_gmxx_tx_ifg_s cn56xx;
2174 struct cvmx_gmxx_tx_ifg_s cn56xxp1;
2175 struct cvmx_gmxx_tx_ifg_s cn58xx;
2176 struct cvmx_gmxx_tx_ifg_s cn58xxp1;
2177};
2178
2179union cvmx_gmxx_tx_int_en {
2180 uint64_t u64;
2181 struct cvmx_gmxx_tx_int_en_s {
2182 uint64_t reserved_20_63:44;
2183 uint64_t late_col:4;
2184 uint64_t xsdef:4;
2185 uint64_t xscol:4;
2186 uint64_t reserved_6_7:2;
2187 uint64_t undflw:4;
2188 uint64_t ncb_nxa:1;
2189 uint64_t pko_nxa:1;
2190 } s;
2191 struct cvmx_gmxx_tx_int_en_cn30xx {
2192 uint64_t reserved_19_63:45;
2193 uint64_t late_col:3;
2194 uint64_t reserved_15_15:1;
2195 uint64_t xsdef:3;
2196 uint64_t reserved_11_11:1;
2197 uint64_t xscol:3;
2198 uint64_t reserved_5_7:3;
2199 uint64_t undflw:3;
2200 uint64_t reserved_1_1:1;
2201 uint64_t pko_nxa:1;
2202 } cn30xx;
2203 struct cvmx_gmxx_tx_int_en_cn31xx {
2204 uint64_t reserved_15_63:49;
2205 uint64_t xsdef:3;
2206 uint64_t reserved_11_11:1;
2207 uint64_t xscol:3;
2208 uint64_t reserved_5_7:3;
2209 uint64_t undflw:3;
2210 uint64_t reserved_1_1:1;
2211 uint64_t pko_nxa:1;
2212 } cn31xx;
2213 struct cvmx_gmxx_tx_int_en_s cn38xx;
2214 struct cvmx_gmxx_tx_int_en_cn38xxp2 {
2215 uint64_t reserved_16_63:48;
2216 uint64_t xsdef:4;
2217 uint64_t xscol:4;
2218 uint64_t reserved_6_7:2;
2219 uint64_t undflw:4;
2220 uint64_t ncb_nxa:1;
2221 uint64_t pko_nxa:1;
2222 } cn38xxp2;
2223 struct cvmx_gmxx_tx_int_en_cn30xx cn50xx;
2224 struct cvmx_gmxx_tx_int_en_cn52xx {
2225 uint64_t reserved_20_63:44;
2226 uint64_t late_col:4;
2227 uint64_t xsdef:4;
2228 uint64_t xscol:4;
2229 uint64_t reserved_6_7:2;
2230 uint64_t undflw:4;
2231 uint64_t reserved_1_1:1;
2232 uint64_t pko_nxa:1;
2233 } cn52xx;
2234 struct cvmx_gmxx_tx_int_en_cn52xx cn52xxp1;
2235 struct cvmx_gmxx_tx_int_en_cn52xx cn56xx;
2236 struct cvmx_gmxx_tx_int_en_cn52xx cn56xxp1;
2237 struct cvmx_gmxx_tx_int_en_s cn58xx;
2238 struct cvmx_gmxx_tx_int_en_s cn58xxp1;
2239};
2240
2241union cvmx_gmxx_tx_int_reg {
2242 uint64_t u64;
2243 struct cvmx_gmxx_tx_int_reg_s {
2244 uint64_t reserved_20_63:44;
2245 uint64_t late_col:4;
2246 uint64_t xsdef:4;
2247 uint64_t xscol:4;
2248 uint64_t reserved_6_7:2;
2249 uint64_t undflw:4;
2250 uint64_t ncb_nxa:1;
2251 uint64_t pko_nxa:1;
2252 } s;
2253 struct cvmx_gmxx_tx_int_reg_cn30xx {
2254 uint64_t reserved_19_63:45;
2255 uint64_t late_col:3;
2256 uint64_t reserved_15_15:1;
2257 uint64_t xsdef:3;
2258 uint64_t reserved_11_11:1;
2259 uint64_t xscol:3;
2260 uint64_t reserved_5_7:3;
2261 uint64_t undflw:3;
2262 uint64_t reserved_1_1:1;
2263 uint64_t pko_nxa:1;
2264 } cn30xx;
2265 struct cvmx_gmxx_tx_int_reg_cn31xx {
2266 uint64_t reserved_15_63:49;
2267 uint64_t xsdef:3;
2268 uint64_t reserved_11_11:1;
2269 uint64_t xscol:3;
2270 uint64_t reserved_5_7:3;
2271 uint64_t undflw:3;
2272 uint64_t reserved_1_1:1;
2273 uint64_t pko_nxa:1;
2274 } cn31xx;
2275 struct cvmx_gmxx_tx_int_reg_s cn38xx;
2276 struct cvmx_gmxx_tx_int_reg_cn38xxp2 {
2277 uint64_t reserved_16_63:48;
2278 uint64_t xsdef:4;
2279 uint64_t xscol:4;
2280 uint64_t reserved_6_7:2;
2281 uint64_t undflw:4;
2282 uint64_t ncb_nxa:1;
2283 uint64_t pko_nxa:1;
2284 } cn38xxp2;
2285 struct cvmx_gmxx_tx_int_reg_cn30xx cn50xx;
2286 struct cvmx_gmxx_tx_int_reg_cn52xx {
2287 uint64_t reserved_20_63:44;
2288 uint64_t late_col:4;
2289 uint64_t xsdef:4;
2290 uint64_t xscol:4;
2291 uint64_t reserved_6_7:2;
2292 uint64_t undflw:4;
2293 uint64_t reserved_1_1:1;
2294 uint64_t pko_nxa:1;
2295 } cn52xx;
2296 struct cvmx_gmxx_tx_int_reg_cn52xx cn52xxp1;
2297 struct cvmx_gmxx_tx_int_reg_cn52xx cn56xx;
2298 struct cvmx_gmxx_tx_int_reg_cn52xx cn56xxp1;
2299 struct cvmx_gmxx_tx_int_reg_s cn58xx;
2300 struct cvmx_gmxx_tx_int_reg_s cn58xxp1;
2301};
2302
2303union cvmx_gmxx_tx_jam {
2304 uint64_t u64;
2305 struct cvmx_gmxx_tx_jam_s {
2306 uint64_t reserved_8_63:56;
2307 uint64_t jam:8;
2308 } s;
2309 struct cvmx_gmxx_tx_jam_s cn30xx;
2310 struct cvmx_gmxx_tx_jam_s cn31xx;
2311 struct cvmx_gmxx_tx_jam_s cn38xx;
2312 struct cvmx_gmxx_tx_jam_s cn38xxp2;
2313 struct cvmx_gmxx_tx_jam_s cn50xx;
2314 struct cvmx_gmxx_tx_jam_s cn52xx;
2315 struct cvmx_gmxx_tx_jam_s cn52xxp1;
2316 struct cvmx_gmxx_tx_jam_s cn56xx;
2317 struct cvmx_gmxx_tx_jam_s cn56xxp1;
2318 struct cvmx_gmxx_tx_jam_s cn58xx;
2319 struct cvmx_gmxx_tx_jam_s cn58xxp1;
2320};
2321
2322union cvmx_gmxx_tx_lfsr {
2323 uint64_t u64;
2324 struct cvmx_gmxx_tx_lfsr_s {
2325 uint64_t reserved_16_63:48;
2326 uint64_t lfsr:16;
2327 } s;
2328 struct cvmx_gmxx_tx_lfsr_s cn30xx;
2329 struct cvmx_gmxx_tx_lfsr_s cn31xx;
2330 struct cvmx_gmxx_tx_lfsr_s cn38xx;
2331 struct cvmx_gmxx_tx_lfsr_s cn38xxp2;
2332 struct cvmx_gmxx_tx_lfsr_s cn50xx;
2333 struct cvmx_gmxx_tx_lfsr_s cn52xx;
2334 struct cvmx_gmxx_tx_lfsr_s cn52xxp1;
2335 struct cvmx_gmxx_tx_lfsr_s cn56xx;
2336 struct cvmx_gmxx_tx_lfsr_s cn56xxp1;
2337 struct cvmx_gmxx_tx_lfsr_s cn58xx;
2338 struct cvmx_gmxx_tx_lfsr_s cn58xxp1;
2339};
2340
2341union cvmx_gmxx_tx_ovr_bp {
2342 uint64_t u64;
2343 struct cvmx_gmxx_tx_ovr_bp_s {
2344 uint64_t reserved_48_63:16;
2345 uint64_t tx_prt_bp:16;
2346 uint64_t reserved_12_31:20;
2347 uint64_t en:4;
2348 uint64_t bp:4;
2349 uint64_t ign_full:4;
2350 } s;
2351 struct cvmx_gmxx_tx_ovr_bp_cn30xx {
2352 uint64_t reserved_11_63:53;
2353 uint64_t en:3;
2354 uint64_t reserved_7_7:1;
2355 uint64_t bp:3;
2356 uint64_t reserved_3_3:1;
2357 uint64_t ign_full:3;
2358 } cn30xx;
2359 struct cvmx_gmxx_tx_ovr_bp_cn30xx cn31xx;
2360 struct cvmx_gmxx_tx_ovr_bp_cn38xx {
2361 uint64_t reserved_12_63:52;
2362 uint64_t en:4;
2363 uint64_t bp:4;
2364 uint64_t ign_full:4;
2365 } cn38xx;
2366 struct cvmx_gmxx_tx_ovr_bp_cn38xx cn38xxp2;
2367 struct cvmx_gmxx_tx_ovr_bp_cn30xx cn50xx;
2368 struct cvmx_gmxx_tx_ovr_bp_s cn52xx;
2369 struct cvmx_gmxx_tx_ovr_bp_s cn52xxp1;
2370 struct cvmx_gmxx_tx_ovr_bp_s cn56xx;
2371 struct cvmx_gmxx_tx_ovr_bp_s cn56xxp1;
2372 struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xx;
2373 struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xxp1;
2374};
2375
2376union cvmx_gmxx_tx_pause_pkt_dmac {
2377 uint64_t u64;
2378 struct cvmx_gmxx_tx_pause_pkt_dmac_s {
2379 uint64_t reserved_48_63:16;
2380 uint64_t dmac:48;
2381 } s;
2382 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn30xx;
2383 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn31xx;
2384 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xx;
2385 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xxp2;
2386 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn50xx;
2387 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xx;
2388 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xxp1;
2389 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xx;
2390 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xxp1;
2391 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xx;
2392 struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xxp1;
2393};
2394
2395union cvmx_gmxx_tx_pause_pkt_type {
2396 uint64_t u64;
2397 struct cvmx_gmxx_tx_pause_pkt_type_s {
2398 uint64_t reserved_16_63:48;
2399 uint64_t type:16;
2400 } s;
2401 struct cvmx_gmxx_tx_pause_pkt_type_s cn30xx;
2402 struct cvmx_gmxx_tx_pause_pkt_type_s cn31xx;
2403 struct cvmx_gmxx_tx_pause_pkt_type_s cn38xx;
2404 struct cvmx_gmxx_tx_pause_pkt_type_s cn38xxp2;
2405 struct cvmx_gmxx_tx_pause_pkt_type_s cn50xx;
2406 struct cvmx_gmxx_tx_pause_pkt_type_s cn52xx;
2407 struct cvmx_gmxx_tx_pause_pkt_type_s cn52xxp1;
2408 struct cvmx_gmxx_tx_pause_pkt_type_s cn56xx;
2409 struct cvmx_gmxx_tx_pause_pkt_type_s cn56xxp1;
2410 struct cvmx_gmxx_tx_pause_pkt_type_s cn58xx;
2411 struct cvmx_gmxx_tx_pause_pkt_type_s cn58xxp1;
2412};
2413
2414union cvmx_gmxx_tx_prts {
2415 uint64_t u64;
2416 struct cvmx_gmxx_tx_prts_s {
2417 uint64_t reserved_5_63:59;
2418 uint64_t prts:5;
2419 } s;
2420 struct cvmx_gmxx_tx_prts_s cn30xx;
2421 struct cvmx_gmxx_tx_prts_s cn31xx;
2422 struct cvmx_gmxx_tx_prts_s cn38xx;
2423 struct cvmx_gmxx_tx_prts_s cn38xxp2;
2424 struct cvmx_gmxx_tx_prts_s cn50xx;
2425 struct cvmx_gmxx_tx_prts_s cn52xx;
2426 struct cvmx_gmxx_tx_prts_s cn52xxp1;
2427 struct cvmx_gmxx_tx_prts_s cn56xx;
2428 struct cvmx_gmxx_tx_prts_s cn56xxp1;
2429 struct cvmx_gmxx_tx_prts_s cn58xx;
2430 struct cvmx_gmxx_tx_prts_s cn58xxp1;
2431};
2432
2433union cvmx_gmxx_tx_spi_ctl {
2434 uint64_t u64;
2435 struct cvmx_gmxx_tx_spi_ctl_s {
2436 uint64_t reserved_2_63:62;
2437 uint64_t tpa_clr:1;
2438 uint64_t cont_pkt:1;
2439 } s;
2440 struct cvmx_gmxx_tx_spi_ctl_s cn38xx;
2441 struct cvmx_gmxx_tx_spi_ctl_s cn38xxp2;
2442 struct cvmx_gmxx_tx_spi_ctl_s cn58xx;
2443 struct cvmx_gmxx_tx_spi_ctl_s cn58xxp1;
2444};
2445
2446union cvmx_gmxx_tx_spi_drain {
2447 uint64_t u64;
2448 struct cvmx_gmxx_tx_spi_drain_s {
2449 uint64_t reserved_16_63:48;
2450 uint64_t drain:16;
2451 } s;
2452 struct cvmx_gmxx_tx_spi_drain_s cn38xx;
2453 struct cvmx_gmxx_tx_spi_drain_s cn58xx;
2454 struct cvmx_gmxx_tx_spi_drain_s cn58xxp1;
2455};
2456
2457union cvmx_gmxx_tx_spi_max {
2458 uint64_t u64;
2459 struct cvmx_gmxx_tx_spi_max_s {
2460 uint64_t reserved_23_63:41;
2461 uint64_t slice:7;
2462 uint64_t max2:8;
2463 uint64_t max1:8;
2464 } s;
2465 struct cvmx_gmxx_tx_spi_max_cn38xx {
2466 uint64_t reserved_16_63:48;
2467 uint64_t max2:8;
2468 uint64_t max1:8;
2469 } cn38xx;
2470 struct cvmx_gmxx_tx_spi_max_cn38xx cn38xxp2;
2471 struct cvmx_gmxx_tx_spi_max_s cn58xx;
2472 struct cvmx_gmxx_tx_spi_max_s cn58xxp1;
2473};
2474
2475union cvmx_gmxx_tx_spi_roundx {
2476 uint64_t u64;
2477 struct cvmx_gmxx_tx_spi_roundx_s {
2478 uint64_t reserved_16_63:48;
2479 uint64_t round:16;
2480 } s;
2481 struct cvmx_gmxx_tx_spi_roundx_s cn58xx;
2482 struct cvmx_gmxx_tx_spi_roundx_s cn58xxp1;
2483};
2484
2485union cvmx_gmxx_tx_spi_thresh {
2486 uint64_t u64;
2487 struct cvmx_gmxx_tx_spi_thresh_s {
2488 uint64_t reserved_6_63:58;
2489 uint64_t thresh:6;
2490 } s;
2491 struct cvmx_gmxx_tx_spi_thresh_s cn38xx;
2492 struct cvmx_gmxx_tx_spi_thresh_s cn38xxp2;
2493 struct cvmx_gmxx_tx_spi_thresh_s cn58xx;
2494 struct cvmx_gmxx_tx_spi_thresh_s cn58xxp1;
2495};
2496
2497union cvmx_gmxx_tx_xaui_ctl {
2498 uint64_t u64;
2499 struct cvmx_gmxx_tx_xaui_ctl_s {
2500 uint64_t reserved_11_63:53;
2501 uint64_t hg_pause_hgi:2;
2502 uint64_t hg_en:1;
2503 uint64_t reserved_7_7:1;
2504 uint64_t ls_byp:1;
2505 uint64_t ls:2;
2506 uint64_t reserved_2_3:2;
2507 uint64_t uni_en:1;
2508 uint64_t dic_en:1;
2509 } s;
2510 struct cvmx_gmxx_tx_xaui_ctl_s cn52xx;
2511 struct cvmx_gmxx_tx_xaui_ctl_s cn52xxp1;
2512 struct cvmx_gmxx_tx_xaui_ctl_s cn56xx;
2513 struct cvmx_gmxx_tx_xaui_ctl_s cn56xxp1;
2514};
2515
2516union cvmx_gmxx_xaui_ext_loopback {
2517 uint64_t u64;
2518 struct cvmx_gmxx_xaui_ext_loopback_s {
2519 uint64_t reserved_5_63:59;
2520 uint64_t en:1;
2521 uint64_t thresh:4;
2522 } s;
2523 struct cvmx_gmxx_xaui_ext_loopback_s cn52xx;
2524 struct cvmx_gmxx_xaui_ext_loopback_s cn52xxp1;
2525 struct cvmx_gmxx_xaui_ext_loopback_s cn56xx;
2526 struct cvmx_gmxx_xaui_ext_loopback_s cn56xxp1;
2527};
2528
2529#endif
diff --git a/drivers/staging/octeon/cvmx-helper-board.c b/drivers/staging/octeon/cvmx-helper-board.c
new file mode 100644
index 000000000000..3085e38a6f99
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-board.c
@@ -0,0 +1,706 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 *
30 * Helper functions to abstract board specific data about
31 * network ports from the rest of the cvmx-helper files.
32 */
33
34#include <asm/octeon/octeon.h>
35#include <asm/octeon/cvmx-bootinfo.h>
36
37#include "cvmx-config.h"
38
39#include "cvmx-mdio.h"
40
41#include "cvmx-helper.h"
42#include "cvmx-helper-util.h"
43#include "cvmx-helper-board.h"
44
45#include "cvmx-gmxx-defs.h"
46#include "cvmx-asxx-defs.h"
47
48/**
49 * cvmx_override_board_link_get(int ipd_port) is a function
50 * pointer. It is meant to allow customization of the process of
51 * talking to a PHY to determine link speed. It is called every
52 * time a PHY must be polled for link status. Users should set
53 * this pointer to a function before calling any cvmx-helper
54 * operations.
55 */
56cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port) =
57 NULL;
58
59/**
60 * Return the MII PHY address associated with the given IPD
61 * port. A result of -1 means there isn't a MII capable PHY
62 * connected to this port. On chips supporting multiple MII
63 * busses the bus number is encoded in bits <15:8>.
64 *
65 * This function must be modified for every new Octeon board.
66 * Internally it uses switch statements based on the cvmx_sysinfo
67 * data to determine board types and revisions. It replies on the
68 * fact that every Octeon board receives a unique board type
69 * enumeration from the bootloader.
70 *
71 * @ipd_port: Octeon IPD port to get the MII address for.
72 *
73 * Returns MII PHY address and bus number or -1.
74 */
75int cvmx_helper_board_get_mii_address(int ipd_port)
76{
77 switch (cvmx_sysinfo_get()->board_type) {
78 case CVMX_BOARD_TYPE_SIM:
79 /* Simulator doesn't have MII */
80 return -1;
81 case CVMX_BOARD_TYPE_EBT3000:
82 case CVMX_BOARD_TYPE_EBT5800:
83 case CVMX_BOARD_TYPE_THUNDER:
84 case CVMX_BOARD_TYPE_NICPRO2:
85 /* Interface 0 is SPI4, interface 1 is RGMII */
86 if ((ipd_port >= 16) && (ipd_port < 20))
87 return ipd_port - 16;
88 else
89 return -1;
90 case CVMX_BOARD_TYPE_KODAMA:
91 case CVMX_BOARD_TYPE_EBH3100:
92 case CVMX_BOARD_TYPE_HIKARI:
93 case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
94 case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
95 case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
96 /*
97 * Port 0 is WAN connected to a PHY, Port 1 is GMII
98 * connected to a switch
99 */
100 if (ipd_port == 0)
101 return 4;
102 else if (ipd_port == 1)
103 return 9;
104 else
105 return -1;
106 case CVMX_BOARD_TYPE_NAC38:
107 /* Board has 8 RGMII ports PHYs are 0-7 */
108 if ((ipd_port >= 0) && (ipd_port < 4))
109 return ipd_port;
110 else if ((ipd_port >= 16) && (ipd_port < 20))
111 return ipd_port - 16 + 4;
112 else
113 return -1;
114 case CVMX_BOARD_TYPE_EBH3000:
115 /* Board has dual SPI4 and no PHYs */
116 return -1;
117 case CVMX_BOARD_TYPE_EBH5200:
118 case CVMX_BOARD_TYPE_EBH5201:
119 case CVMX_BOARD_TYPE_EBT5200:
120 /*
121 * Board has 4 SGMII ports. The PHYs start right after the MII
122 * ports MII0 = 0, MII1 = 1, SGMII = 2-5.
123 */
124 if ((ipd_port >= 0) && (ipd_port < 4))
125 return ipd_port + 2;
126 else
127 return -1;
128 case CVMX_BOARD_TYPE_EBH5600:
129 case CVMX_BOARD_TYPE_EBH5601:
130 case CVMX_BOARD_TYPE_EBH5610:
131 /*
132 * Board has 8 SGMII ports. 4 connect out, two connect
133 * to a switch, and 2 loop to each other
134 */
135 if ((ipd_port >= 0) && (ipd_port < 4))
136 return ipd_port + 1;
137 else
138 return -1;
139 case CVMX_BOARD_TYPE_CUST_NB5:
140 if (ipd_port == 2)
141 return 4;
142 else
143 return -1;
144 case CVMX_BOARD_TYPE_NIC_XLE_4G:
145 /* Board has 4 SGMII ports. connected QLM3(interface 1) */
146 if ((ipd_port >= 16) && (ipd_port < 20))
147 return ipd_port - 16 + 1;
148 else
149 return -1;
150 case CVMX_BOARD_TYPE_BBGW_REF:
151 /*
152 * No PHYs are connected to Octeon, everything is
153 * through switch.
154 */
155 return -1;
156 }
157
158 /* Some unknown board. Somebody forgot to update this function... */
159 cvmx_dprintf
160 ("cvmx_helper_board_get_mii_address: Unknown board type %d\n",
161 cvmx_sysinfo_get()->board_type);
162 return -1;
163}
164
165/**
166 * This function is the board specific method of determining an
167 * ethernet ports link speed. Most Octeon boards have Marvell PHYs
168 * and are handled by the fall through case. This function must be
169 * updated for boards that don't have the normal Marvell PHYs.
170 *
171 * This function must be modified for every new Octeon board.
172 * Internally it uses switch statements based on the cvmx_sysinfo
173 * data to determine board types and revisions. It relies on the
174 * fact that every Octeon board receives a unique board type
175 * enumeration from the bootloader.
176 *
177 * @ipd_port: IPD input port associated with the port we want to get link
178 * status for.
179 *
180 * Returns The ports link status. If the link isn't fully resolved, this must
181 * return zero.
182 */
183cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
184{
185 cvmx_helper_link_info_t result;
186 int phy_addr;
187 int is_broadcom_phy = 0;
188
189 /* Give the user a chance to override the processing of this function */
190 if (cvmx_override_board_link_get)
191 return cvmx_override_board_link_get(ipd_port);
192
193 /* Unless we fix it later, all links are defaulted to down */
194 result.u64 = 0;
195
196 /*
197 * This switch statement should handle all ports that either don't use
198 * Marvell PHYS, or don't support in-band status.
199 */
200 switch (cvmx_sysinfo_get()->board_type) {
201 case CVMX_BOARD_TYPE_SIM:
202 /* The simulator gives you a simulated 1Gbps full duplex link */
203 result.s.link_up = 1;
204 result.s.full_duplex = 1;
205 result.s.speed = 1000;
206 return result;
207 case CVMX_BOARD_TYPE_EBH3100:
208 case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
209 case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
210 case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
211 /* Port 1 on these boards is always Gigabit */
212 if (ipd_port == 1) {
213 result.s.link_up = 1;
214 result.s.full_duplex = 1;
215 result.s.speed = 1000;
216 return result;
217 }
218 /* Fall through to the generic code below */
219 break;
220 case CVMX_BOARD_TYPE_CUST_NB5:
221 /* Port 1 on these boards is always Gigabit */
222 if (ipd_port == 1) {
223 result.s.link_up = 1;
224 result.s.full_duplex = 1;
225 result.s.speed = 1000;
226 return result;
227 } else /* The other port uses a broadcom PHY */
228 is_broadcom_phy = 1;
229 break;
230 case CVMX_BOARD_TYPE_BBGW_REF:
231 /* Port 1 on these boards is always Gigabit */
232 if (ipd_port == 2) {
233 /* Port 2 is not hooked up */
234 result.u64 = 0;
235 return result;
236 } else {
237 /* Ports 0 and 1 connect to the switch */
238 result.s.link_up = 1;
239 result.s.full_duplex = 1;
240 result.s.speed = 1000;
241 return result;
242 }
243 break;
244 }
245
246 phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
247 if (phy_addr != -1) {
248 if (is_broadcom_phy) {
249 /*
250 * Below we are going to read SMI/MDIO
251 * register 0x19 which works on Broadcom
252 * parts
253 */
254 int phy_status =
255 cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
256 0x19);
257 switch ((phy_status >> 8) & 0x7) {
258 case 0:
259 result.u64 = 0;
260 break;
261 case 1:
262 result.s.link_up = 1;
263 result.s.full_duplex = 0;
264 result.s.speed = 10;
265 break;
266 case 2:
267 result.s.link_up = 1;
268 result.s.full_duplex = 1;
269 result.s.speed = 10;
270 break;
271 case 3:
272 result.s.link_up = 1;
273 result.s.full_duplex = 0;
274 result.s.speed = 100;
275 break;
276 case 4:
277 result.s.link_up = 1;
278 result.s.full_duplex = 1;
279 result.s.speed = 100;
280 break;
281 case 5:
282 result.s.link_up = 1;
283 result.s.full_duplex = 1;
284 result.s.speed = 100;
285 break;
286 case 6:
287 result.s.link_up = 1;
288 result.s.full_duplex = 0;
289 result.s.speed = 1000;
290 break;
291 case 7:
292 result.s.link_up = 1;
293 result.s.full_duplex = 1;
294 result.s.speed = 1000;
295 break;
296 }
297 } else {
298 /*
299 * This code assumes we are using a Marvell
300 * Gigabit PHY. All the speed information can
301 * be read from register 17 in one
302 * go. Somebody using a different PHY will
303 * need to handle it above in the board
304 * specific area.
305 */
306 int phy_status =
307 cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 17);
308
309 /*
310 * If the resolve bit 11 isn't set, see if
311 * autoneg is turned off (bit 12, reg 0). The
312 * resolve bit doesn't get set properly when
313 * autoneg is off, so force it.
314 */
315 if ((phy_status & (1 << 11)) == 0) {
316 int auto_status =
317 cvmx_mdio_read(phy_addr >> 8,
318 phy_addr & 0xff, 0);
319 if ((auto_status & (1 << 12)) == 0)
320 phy_status |= 1 << 11;
321 }
322
323 /*
324 * Only return a link if the PHY has finished
325 * auto negotiation and set the resolved bit
326 * (bit 11)
327 */
328 if (phy_status & (1 << 11)) {
329 result.s.link_up = 1;
330 result.s.full_duplex = ((phy_status >> 13) & 1);
331 switch ((phy_status >> 14) & 3) {
332 case 0: /* 10 Mbps */
333 result.s.speed = 10;
334 break;
335 case 1: /* 100 Mbps */
336 result.s.speed = 100;
337 break;
338 case 2: /* 1 Gbps */
339 result.s.speed = 1000;
340 break;
341 case 3: /* Illegal */
342 result.u64 = 0;
343 break;
344 }
345 }
346 }
347 } else if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
348 || OCTEON_IS_MODEL(OCTEON_CN58XX)
349 || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
350 /*
351 * We don't have a PHY address, so attempt to use
352 * in-band status. It is really important that boards
353 * not supporting in-band status never get
354 * here. Reading broken in-band status tends to do bad
355 * things
356 */
357 union cvmx_gmxx_rxx_rx_inbnd inband_status;
358 int interface = cvmx_helper_get_interface_num(ipd_port);
359 int index = cvmx_helper_get_interface_index_num(ipd_port);
360 inband_status.u64 =
361 cvmx_read_csr(CVMX_GMXX_RXX_RX_INBND(index, interface));
362
363 result.s.link_up = inband_status.s.status;
364 result.s.full_duplex = inband_status.s.duplex;
365 switch (inband_status.s.speed) {
366 case 0: /* 10 Mbps */
367 result.s.speed = 10;
368 break;
369 case 1: /* 100 Mbps */
370 result.s.speed = 100;
371 break;
372 case 2: /* 1 Gbps */
373 result.s.speed = 1000;
374 break;
375 case 3: /* Illegal */
376 result.u64 = 0;
377 break;
378 }
379 } else {
380 /*
381 * We don't have a PHY address and we don't have
382 * in-band status. There is no way to determine the
383 * link speed. Return down assuming this port isn't
384 * wired
385 */
386 result.u64 = 0;
387 }
388
389 /* If link is down, return all fields as zero. */
390 if (!result.s.link_up)
391 result.u64 = 0;
392
393 return result;
394}
395
396/**
397 * This function as a board specific method of changing the PHY
398 * speed, duplex, and auto-negotiation. This programs the PHY and
399 * not Octeon. This can be used to force Octeon's links to
400 * specific settings.
401 *
402 * @phy_addr: The address of the PHY to program
403 * @enable_autoneg:
404 * Non zero if you want to enable auto-negotiation.
405 * @link_info: Link speed to program. If the speed is zero and auto-negotiation
406 * is enabled, all possible negotiation speeds are advertised.
407 *
408 * Returns Zero on success, negative on failure
409 */
410int cvmx_helper_board_link_set_phy(int phy_addr,
411 cvmx_helper_board_set_phy_link_flags_types_t
412 link_flags,
413 cvmx_helper_link_info_t link_info)
414{
415
416 /* Set the flow control settings based on link_flags */
417 if ((link_flags & set_phy_link_flags_flow_control_mask) !=
418 set_phy_link_flags_flow_control_dont_touch) {
419 cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
420 reg_autoneg_adver.u16 =
421 cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
422 CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
423 reg_autoneg_adver.s.asymmetric_pause =
424 (link_flags & set_phy_link_flags_flow_control_mask) ==
425 set_phy_link_flags_flow_control_enable;
426 reg_autoneg_adver.s.pause =
427 (link_flags & set_phy_link_flags_flow_control_mask) ==
428 set_phy_link_flags_flow_control_enable;
429 cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
430 CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
431 reg_autoneg_adver.u16);
432 }
433
434 /* If speed isn't set and autoneg is on advertise all supported modes */
435 if ((link_flags & set_phy_link_flags_autoneg)
436 && (link_info.s.speed == 0)) {
437 cvmx_mdio_phy_reg_control_t reg_control;
438 cvmx_mdio_phy_reg_status_t reg_status;
439 cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
440 cvmx_mdio_phy_reg_extended_status_t reg_extended_status;
441 cvmx_mdio_phy_reg_control_1000_t reg_control_1000;
442
443 reg_status.u16 =
444 cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
445 CVMX_MDIO_PHY_REG_STATUS);
446 reg_autoneg_adver.u16 =
447 cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
448 CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
449 reg_autoneg_adver.s.advert_100base_t4 =
450 reg_status.s.capable_100base_t4;
451 reg_autoneg_adver.s.advert_10base_tx_full =
452 reg_status.s.capable_10_full;
453 reg_autoneg_adver.s.advert_10base_tx_half =
454 reg_status.s.capable_10_half;
455 reg_autoneg_adver.s.advert_100base_tx_full =
456 reg_status.s.capable_100base_x_full;
457 reg_autoneg_adver.s.advert_100base_tx_half =
458 reg_status.s.capable_100base_x_half;
459 cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
460 CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
461 reg_autoneg_adver.u16);
462 if (reg_status.s.capable_extended_status) {
463 reg_extended_status.u16 =
464 cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
465 CVMX_MDIO_PHY_REG_EXTENDED_STATUS);
466 reg_control_1000.u16 =
467 cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
468 CVMX_MDIO_PHY_REG_CONTROL_1000);
469 reg_control_1000.s.advert_1000base_t_full =
470 reg_extended_status.s.capable_1000base_t_full;
471 reg_control_1000.s.advert_1000base_t_half =
472 reg_extended_status.s.capable_1000base_t_half;
473 cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
474 CVMX_MDIO_PHY_REG_CONTROL_1000,
475 reg_control_1000.u16);
476 }
477 reg_control.u16 =
478 cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
479 CVMX_MDIO_PHY_REG_CONTROL);
480 reg_control.s.autoneg_enable = 1;
481 reg_control.s.restart_autoneg = 1;
482 cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
483 CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
484 } else if ((link_flags & set_phy_link_flags_autoneg)) {
485 cvmx_mdio_phy_reg_control_t reg_control;
486 cvmx_mdio_phy_reg_status_t reg_status;
487 cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
488 cvmx_mdio_phy_reg_extended_status_t reg_extended_status;
489 cvmx_mdio_phy_reg_control_1000_t reg_control_1000;
490
491 reg_status.u16 =
492 cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
493 CVMX_MDIO_PHY_REG_STATUS);
494 reg_autoneg_adver.u16 =
495 cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
496 CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
497 reg_autoneg_adver.s.advert_100base_t4 = 0;
498 reg_autoneg_adver.s.advert_10base_tx_full = 0;
499 reg_autoneg_adver.s.advert_10base_tx_half = 0;
500 reg_autoneg_adver.s.advert_100base_tx_full = 0;
501 reg_autoneg_adver.s.advert_100base_tx_half = 0;
502 if (reg_status.s.capable_extended_status) {
503 reg_extended_status.u16 =
504 cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
505 CVMX_MDIO_PHY_REG_EXTENDED_STATUS);
506 reg_control_1000.u16 =
507 cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
508 CVMX_MDIO_PHY_REG_CONTROL_1000);
509 reg_control_1000.s.advert_1000base_t_full = 0;
510 reg_control_1000.s.advert_1000base_t_half = 0;
511 }
512 switch (link_info.s.speed) {
513 case 10:
514 reg_autoneg_adver.s.advert_10base_tx_full =
515 link_info.s.full_duplex;
516 reg_autoneg_adver.s.advert_10base_tx_half =
517 !link_info.s.full_duplex;
518 break;
519 case 100:
520 reg_autoneg_adver.s.advert_100base_tx_full =
521 link_info.s.full_duplex;
522 reg_autoneg_adver.s.advert_100base_tx_half =
523 !link_info.s.full_duplex;
524 break;
525 case 1000:
526 reg_control_1000.s.advert_1000base_t_full =
527 link_info.s.full_duplex;
528 reg_control_1000.s.advert_1000base_t_half =
529 !link_info.s.full_duplex;
530 break;
531 }
532 cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
533 CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
534 reg_autoneg_adver.u16);
535 if (reg_status.s.capable_extended_status)
536 cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
537 CVMX_MDIO_PHY_REG_CONTROL_1000,
538 reg_control_1000.u16);
539 reg_control.u16 =
540 cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
541 CVMX_MDIO_PHY_REG_CONTROL);
542 reg_control.s.autoneg_enable = 1;
543 reg_control.s.restart_autoneg = 1;
544 cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
545 CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
546 } else {
547 cvmx_mdio_phy_reg_control_t reg_control;
548 reg_control.u16 =
549 cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
550 CVMX_MDIO_PHY_REG_CONTROL);
551 reg_control.s.autoneg_enable = 0;
552 reg_control.s.restart_autoneg = 1;
553 reg_control.s.duplex = link_info.s.full_duplex;
554 if (link_info.s.speed == 1000) {
555 reg_control.s.speed_msb = 1;
556 reg_control.s.speed_lsb = 0;
557 } else if (link_info.s.speed == 100) {
558 reg_control.s.speed_msb = 0;
559 reg_control.s.speed_lsb = 1;
560 } else if (link_info.s.speed == 10) {
561 reg_control.s.speed_msb = 0;
562 reg_control.s.speed_lsb = 0;
563 }
564 cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
565 CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
566 }
567 return 0;
568}
569
570/**
571 * This function is called by cvmx_helper_interface_probe() after it
572 * determines the number of ports Octeon can support on a specific
573 * interface. This function is the per board location to override
574 * this value. It is called with the number of ports Octeon might
575 * support and should return the number of actual ports on the
576 * board.
577 *
578 * This function must be modifed for every new Octeon board.
579 * Internally it uses switch statements based on the cvmx_sysinfo
580 * data to determine board types and revisions. It relys on the
581 * fact that every Octeon board receives a unique board type
582 * enumeration from the bootloader.
583 *
584 * @interface: Interface to probe
585 * @supported_ports:
586 * Number of ports Octeon supports.
587 *
588 * Returns Number of ports the actual board supports. Many times this will
589 * simple be "support_ports".
590 */
591int __cvmx_helper_board_interface_probe(int interface, int supported_ports)
592{
593 switch (cvmx_sysinfo_get()->board_type) {
594 case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
595 if (interface == 0)
596 return 2;
597 break;
598 case CVMX_BOARD_TYPE_BBGW_REF:
599 if (interface == 0)
600 return 2;
601 break;
602 case CVMX_BOARD_TYPE_NIC_XLE_4G:
603 if (interface == 0)
604 return 0;
605 break;
606 /* The 2nd interface on the EBH5600 is connected to the Marvel switch,
607 which we don't support. Disable ports connected to it */
608 case CVMX_BOARD_TYPE_EBH5600:
609 if (interface == 1)
610 return 0;
611 break;
612 }
613 return supported_ports;
614}
615
616/**
617 * Enable packet input/output from the hardware. This function is
618 * called after by cvmx_helper_packet_hardware_enable() to
619 * perform board specific initialization. For most boards
620 * nothing is needed.
621 *
622 * @interface: Interface to enable
623 *
624 * Returns Zero on success, negative on failure
625 */
626int __cvmx_helper_board_hardware_enable(int interface)
627{
628 if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5) {
629 if (interface == 0) {
630 /* Different config for switch port */
631 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0);
632 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0);
633 /*
634 * Boards with gigabit WAN ports need a
635 * different setting that is compatible with
636 * 100 Mbit settings
637 */
638 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface),
639 0xc);
640 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface),
641 0xc);
642 }
643 } else if (cvmx_sysinfo_get()->board_type ==
644 CVMX_BOARD_TYPE_CN3010_EVB_HS5) {
645 /*
646 * Broadcom PHYs require differnet ASX
647 * clocks. Unfortunately many boards don't define a
648 * new board Id and simply mangle the
649 * CN3010_EVB_HS5
650 */
651 if (interface == 0) {
652 /*
653 * Some boards use a hacked up bootloader that
654 * identifies them as CN3010_EVB_HS5
655 * evaluation boards. This leads to all kinds
656 * of configuration problems. Detect one
657 * case, and print warning, while trying to do
658 * the right thing.
659 */
660 int phy_addr = cvmx_helper_board_get_mii_address(0);
661 if (phy_addr != -1) {
662 int phy_identifier =
663 cvmx_mdio_read(phy_addr >> 8,
664 phy_addr & 0xff, 0x2);
665 /* Is it a Broadcom PHY? */
666 if (phy_identifier == 0x0143) {
667 cvmx_dprintf("\n");
668 cvmx_dprintf("ERROR:\n");
669 cvmx_dprintf
670 ("ERROR: Board type is CVMX_BOARD_TYPE_CN3010_EVB_HS5, but Broadcom PHY found.\n");
671 cvmx_dprintf
672 ("ERROR: The board type is mis-configured, and software malfunctions are likely.\n");
673 cvmx_dprintf
674 ("ERROR: All boards require a unique board type to identify them.\n");
675 cvmx_dprintf("ERROR:\n");
676 cvmx_dprintf("\n");
677 cvmx_wait(1000000000);
678 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX
679 (0, interface), 5);
680 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX
681 (0, interface), 5);
682 }
683 }
684 }
685 }
686 return 0;
687}
688
689cvmx_helper_board_usb_clock_types_t __cvmx_helper_board_usb_get_clock_type(void)
690{
691 switch (cvmx_sysinfo_get()->board_type) {
692 case CVMX_BOARD_TYPE_BBGW_REF:
693 return USB_CLOCK_TYPE_CRYSTAL_12;
694 }
695 return USB_CLOCK_TYPE_REF_48;
696}
697
698int __cvmx_helper_board_usb_get_num_ports(int supported_ports)
699{
700 switch (cvmx_sysinfo_get()->board_type) {
701 case CVMX_BOARD_TYPE_NIC_XLE_4G:
702 return 0;
703 }
704
705 return supported_ports;
706}
diff --git a/drivers/staging/octeon/cvmx-helper-board.h b/drivers/staging/octeon/cvmx-helper-board.h
new file mode 100644
index 000000000000..dc20b01247c4
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-board.h
@@ -0,0 +1,180 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 *
30 * Helper functions to abstract board specific data about
31 * network ports from the rest of the cvmx-helper files.
32 *
33 */
34#ifndef __CVMX_HELPER_BOARD_H__
35#define __CVMX_HELPER_BOARD_H__
36
37#include "cvmx-helper.h"
38
39typedef enum {
40 USB_CLOCK_TYPE_REF_12,
41 USB_CLOCK_TYPE_REF_24,
42 USB_CLOCK_TYPE_REF_48,
43 USB_CLOCK_TYPE_CRYSTAL_12,
44} cvmx_helper_board_usb_clock_types_t;
45
46typedef enum {
47 set_phy_link_flags_autoneg = 0x1,
48 set_phy_link_flags_flow_control_dont_touch = 0x0 << 1,
49 set_phy_link_flags_flow_control_enable = 0x1 << 1,
50 set_phy_link_flags_flow_control_disable = 0x2 << 1,
51 set_phy_link_flags_flow_control_mask = 0x3 << 1, /* Mask for 2 bit wide flow control field */
52} cvmx_helper_board_set_phy_link_flags_types_t;
53
54/**
55 * cvmx_override_board_link_get(int ipd_port) is a function
56 * pointer. It is meant to allow customization of the process of
57 * talking to a PHY to determine link speed. It is called every
58 * time a PHY must be polled for link status. Users should set
59 * this pointer to a function before calling any cvmx-helper
60 * operations.
61 */
62extern cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port);
63
64/**
65 * Return the MII PHY address associated with the given IPD
66 * port. A result of -1 means there isn't a MII capable PHY
67 * connected to this port. On chips supporting multiple MII
68 * busses the bus number is encoded in bits <15:8>.
69 *
70 * This function must be modifed for every new Octeon board.
71 * Internally it uses switch statements based on the cvmx_sysinfo
72 * data to determine board types and revisions. It relys on the
73 * fact that every Octeon board receives a unique board type
74 * enumeration from the bootloader.
75 *
76 * @ipd_port: Octeon IPD port to get the MII address for.
77 *
78 * Returns MII PHY address and bus number or -1.
79 */
80extern int cvmx_helper_board_get_mii_address(int ipd_port);
81
82/**
83 * This function as a board specific method of changing the PHY
84 * speed, duplex, and autonegotiation. This programs the PHY and
85 * not Octeon. This can be used to force Octeon's links to
86 * specific settings.
87 *
88 * @phy_addr: The address of the PHY to program
89 * @link_flags:
90 * Flags to control autonegotiation. Bit 0 is autonegotiation
91 * enable/disable to maintain backware compatability.
92 * @link_info: Link speed to program. If the speed is zero and autonegotiation
93 * is enabled, all possible negotiation speeds are advertised.
94 *
95 * Returns Zero on success, negative on failure
96 */
97int cvmx_helper_board_link_set_phy(int phy_addr,
98 cvmx_helper_board_set_phy_link_flags_types_t
99 link_flags,
100 cvmx_helper_link_info_t link_info);
101
102/**
103 * This function is the board specific method of determining an
104 * ethernet ports link speed. Most Octeon boards have Marvell PHYs
105 * and are handled by the fall through case. This function must be
106 * updated for boards that don't have the normal Marvell PHYs.
107 *
108 * This function must be modifed for every new Octeon board.
109 * Internally it uses switch statements based on the cvmx_sysinfo
110 * data to determine board types and revisions. It relys on the
111 * fact that every Octeon board receives a unique board type
112 * enumeration from the bootloader.
113 *
114 * @ipd_port: IPD input port associated with the port we want to get link
115 * status for.
116 *
117 * Returns The ports link status. If the link isn't fully resolved, this must
118 * return zero.
119 */
120extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
121
122/**
123 * This function is called by cvmx_helper_interface_probe() after it
124 * determines the number of ports Octeon can support on a specific
125 * interface. This function is the per board location to override
126 * this value. It is called with the number of ports Octeon might
127 * support and should return the number of actual ports on the
128 * board.
129 *
130 * This function must be modifed for every new Octeon board.
131 * Internally it uses switch statements based on the cvmx_sysinfo
132 * data to determine board types and revisions. It relys on the
133 * fact that every Octeon board receives a unique board type
134 * enumeration from the bootloader.
135 *
136 * @interface: Interface to probe
137 * @supported_ports:
138 * Number of ports Octeon supports.
139 *
140 * Returns Number of ports the actual board supports. Many times this will
141 * simple be "support_ports".
142 */
143extern int __cvmx_helper_board_interface_probe(int interface,
144 int supported_ports);
145
146/**
147 * Enable packet input/output from the hardware. This function is
148 * called after by cvmx_helper_packet_hardware_enable() to
149 * perform board specific initialization. For most boards
150 * nothing is needed.
151 *
152 * @interface: Interface to enable
153 *
154 * Returns Zero on success, negative on failure
155 */
156extern int __cvmx_helper_board_hardware_enable(int interface);
157
158/**
159 * Gets the clock type used for the USB block based on board type.
160 * Used by the USB code for auto configuration of clock type.
161 *
162 * Returns USB clock type enumeration
163 */
164cvmx_helper_board_usb_clock_types_t
165__cvmx_helper_board_usb_get_clock_type(void);
166
167/**
168 * Adjusts the number of available USB ports on Octeon based on board
169 * specifics.
170 *
171 * @supported_ports: expected number of ports based on chip type;
172 *
173 *
174 * Returns number of available usb ports, based on board specifics.
175 * Return value is supported_ports if function does not
176 * override.
177 */
178int __cvmx_helper_board_usb_get_num_ports(int supported_ports);
179
180#endif /* __CVMX_HELPER_BOARD_H__ */
diff --git a/drivers/staging/octeon/cvmx-helper-fpa.c b/drivers/staging/octeon/cvmx-helper-fpa.c
new file mode 100644
index 000000000000..c239e5f4ab9a
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-fpa.c
@@ -0,0 +1,243 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 * @file
30 *
31 * Helper functions for FPA setup.
32 *
33 */
34#include "executive-config.h"
35#include "cvmx-config.h"
36#include "cvmx.h"
37#include "cvmx-bootmem.h"
38#include "cvmx-fpa.h"
39#include "cvmx-helper-fpa.h"
40
41/**
42 * Allocate memory for and initialize a single FPA pool.
43 *
44 * @pool: Pool to initialize
45 * @buffer_size: Size of buffers to allocate in bytes
46 * @buffers: Number of buffers to put in the pool. Zero is allowed
47 * @name: String name of the pool for debugging purposes
48 * Returns Zero on success, non-zero on failure
49 */
50static int __cvmx_helper_initialize_fpa_pool(int pool, uint64_t buffer_size,
51 uint64_t buffers, const char *name)
52{
53 uint64_t current_num;
54 void *memory;
55 uint64_t align = CVMX_CACHE_LINE_SIZE;
56
57 /*
58 * Align the allocation so that power of 2 size buffers are
59 * naturally aligned.
60 */
61 while (align < buffer_size)
62 align = align << 1;
63
64 if (buffers == 0)
65 return 0;
66
67 current_num = cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(pool));
68 if (current_num) {
69 cvmx_dprintf("Fpa pool %d(%s) already has %llu buffers. "
70 "Skipping setup.\n",
71 pool, name, (unsigned long long)current_num);
72 return 0;
73 }
74
75 memory = cvmx_bootmem_alloc(buffer_size * buffers, align);
76 if (memory == NULL) {
77 cvmx_dprintf("Out of memory initializing fpa pool %d(%s).\n",
78 pool, name);
79 return -1;
80 }
81 cvmx_fpa_setup_pool(pool, name, memory, buffer_size, buffers);
82 return 0;
83}
84
85/**
86 * Allocate memory and initialize the FPA pools using memory
87 * from cvmx-bootmem. Specifying zero for the number of
88 * buffers will cause that FPA pool to not be setup. This is
89 * useful if you aren't using some of the hardware and want
90 * to save memory. Use cvmx_helper_initialize_fpa instead of
91 * this function directly.
92 *
93 * @pip_pool: Should always be CVMX_FPA_PACKET_POOL
94 * @pip_size: Should always be CVMX_FPA_PACKET_POOL_SIZE
95 * @pip_buffers:
96 * Number of packet buffers.
97 * @wqe_pool: Should always be CVMX_FPA_WQE_POOL
98 * @wqe_size: Should always be CVMX_FPA_WQE_POOL_SIZE
99 * @wqe_entries:
100 * Number of work queue entries
101 * @pko_pool: Should always be CVMX_FPA_OUTPUT_BUFFER_POOL
102 * @pko_size: Should always be CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
103 * @pko_buffers:
104 * PKO Command buffers. You should at minimum have two per
105 * each PKO queue.
106 * @tim_pool: Should always be CVMX_FPA_TIMER_POOL
107 * @tim_size: Should always be CVMX_FPA_TIMER_POOL_SIZE
108 * @tim_buffers:
109 * TIM ring buffer command queues. At least two per timer bucket
110 * is recommened.
111 * @dfa_pool: Should always be CVMX_FPA_DFA_POOL
112 * @dfa_size: Should always be CVMX_FPA_DFA_POOL_SIZE
113 * @dfa_buffers:
114 * DFA command buffer. A relatively small (32 for example)
115 * number should work.
116 * Returns Zero on success, non-zero if out of memory
117 */
118static int __cvmx_helper_initialize_fpa(int pip_pool, int pip_size,
119 int pip_buffers, int wqe_pool,
120 int wqe_size, int wqe_entries,
121 int pko_pool, int pko_size,
122 int pko_buffers, int tim_pool,
123 int tim_size, int tim_buffers,
124 int dfa_pool, int dfa_size,
125 int dfa_buffers)
126{
127 int status;
128
129 cvmx_fpa_enable();
130
131 if ((pip_buffers > 0) && (pip_buffers <= 64))
132 cvmx_dprintf
133 ("Warning: %d packet buffers may not be enough for hardware"
134 " prefetch. 65 or more is recommended.\n", pip_buffers);
135
136 if (pip_pool >= 0) {
137 status =
138 __cvmx_helper_initialize_fpa_pool(pip_pool, pip_size,
139 pip_buffers,
140 "Packet Buffers");
141 if (status)
142 return status;
143 }
144
145 if (wqe_pool >= 0) {
146 status =
147 __cvmx_helper_initialize_fpa_pool(wqe_pool, wqe_size,
148 wqe_entries,
149 "Work Queue Entries");
150 if (status)
151 return status;
152 }
153
154 if (pko_pool >= 0) {
155 status =
156 __cvmx_helper_initialize_fpa_pool(pko_pool, pko_size,
157 pko_buffers,
158 "PKO Command Buffers");
159 if (status)
160 return status;
161 }
162
163 if (tim_pool >= 0) {
164 status =
165 __cvmx_helper_initialize_fpa_pool(tim_pool, tim_size,
166 tim_buffers,
167 "TIM Command Buffers");
168 if (status)
169 return status;
170 }
171
172 if (dfa_pool >= 0) {
173 status =
174 __cvmx_helper_initialize_fpa_pool(dfa_pool, dfa_size,
175 dfa_buffers,
176 "DFA Command Buffers");
177 if (status)
178 return status;
179 }
180
181 return 0;
182}
183
184/**
185 * Allocate memory and initialize the FPA pools using memory
186 * from cvmx-bootmem. Sizes of each element in the pools is
187 * controlled by the cvmx-config.h header file. Specifying
188 * zero for any parameter will cause that FPA pool to not be
189 * setup. This is useful if you aren't using some of the
190 * hardware and want to save memory.
191 *
192 * @packet_buffers:
193 * Number of packet buffers to allocate
194 * @work_queue_entries:
195 * Number of work queue entries
196 * @pko_buffers:
197 * PKO Command buffers. You should at minimum have two per
198 * each PKO queue.
199 * @tim_buffers:
200 * TIM ring buffer command queues. At least two per timer bucket
201 * is recommened.
202 * @dfa_buffers:
203 * DFA command buffer. A relatively small (32 for example)
204 * number should work.
205 * Returns Zero on success, non-zero if out of memory
206 */
207int cvmx_helper_initialize_fpa(int packet_buffers, int work_queue_entries,
208 int pko_buffers, int tim_buffers,
209 int dfa_buffers)
210{
211#ifndef CVMX_FPA_PACKET_POOL
212#define CVMX_FPA_PACKET_POOL -1
213#define CVMX_FPA_PACKET_POOL_SIZE 0
214#endif
215#ifndef CVMX_FPA_WQE_POOL
216#define CVMX_FPA_WQE_POOL -1
217#define CVMX_FPA_WQE_POOL_SIZE 0
218#endif
219#ifndef CVMX_FPA_OUTPUT_BUFFER_POOL
220#define CVMX_FPA_OUTPUT_BUFFER_POOL -1
221#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 0
222#endif
223#ifndef CVMX_FPA_TIMER_POOL
224#define CVMX_FPA_TIMER_POOL -1
225#define CVMX_FPA_TIMER_POOL_SIZE 0
226#endif
227#ifndef CVMX_FPA_DFA_POOL
228#define CVMX_FPA_DFA_POOL -1
229#define CVMX_FPA_DFA_POOL_SIZE 0
230#endif
231 return __cvmx_helper_initialize_fpa(CVMX_FPA_PACKET_POOL,
232 CVMX_FPA_PACKET_POOL_SIZE,
233 packet_buffers, CVMX_FPA_WQE_POOL,
234 CVMX_FPA_WQE_POOL_SIZE,
235 work_queue_entries,
236 CVMX_FPA_OUTPUT_BUFFER_POOL,
237 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE,
238 pko_buffers, CVMX_FPA_TIMER_POOL,
239 CVMX_FPA_TIMER_POOL_SIZE,
240 tim_buffers, CVMX_FPA_DFA_POOL,
241 CVMX_FPA_DFA_POOL_SIZE,
242 dfa_buffers);
243}
diff --git a/drivers/staging/octeon/cvmx-helper-fpa.h b/drivers/staging/octeon/cvmx-helper-fpa.h
new file mode 100644
index 000000000000..5ff8c93198de
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-fpa.h
@@ -0,0 +1,64 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 * @file
30 *
31 * Helper functions for FPA setup.
32 *
33 */
34#ifndef __CVMX_HELPER_H_FPA__
35#define __CVMX_HELPER_H_FPA__
36
37/**
38 * Allocate memory and initialize the FPA pools using memory
39 * from cvmx-bootmem. Sizes of each element in the pools is
40 * controlled by the cvmx-config.h header file. Specifying
41 * zero for any parameter will cause that FPA pool to not be
42 * setup. This is useful if you aren't using some of the
43 * hardware and want to save memory.
44 *
45 * @packet_buffers:
46 * Number of packet buffers to allocate
47 * @work_queue_entries:
48 * Number of work queue entries
49 * @pko_buffers:
50 * PKO Command buffers. You should at minimum have two per
51 * each PKO queue.
52 * @tim_buffers:
53 * TIM ring buffer command queues. At least two per timer bucket
54 * is recommened.
55 * @dfa_buffers:
56 * DFA command buffer. A relatively small (32 for example)
57 * number should work.
58 * Returns Zero on success, non-zero if out of memory
59 */
60extern int cvmx_helper_initialize_fpa(int packet_buffers,
61 int work_queue_entries, int pko_buffers,
62 int tim_buffers, int dfa_buffers);
63
64#endif /* __CVMX_HELPER_H__ */
diff --git a/drivers/staging/octeon/cvmx-helper-loop.c b/drivers/staging/octeon/cvmx-helper-loop.c
new file mode 100644
index 000000000000..55a571a69529
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-loop.c
@@ -0,0 +1,85 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Functions for LOOP initialization, configuration,
30 * and monitoring.
31 */
32#include <asm/octeon/octeon.h>
33
34#include "cvmx-config.h"
35
36#include "cvmx-helper.h"
37#include "cvmx-pip-defs.h"
38
39/**
40 * Probe a LOOP interface and determine the number of ports
41 * connected to it. The LOOP interface should still be down
42 * after this call.
43 *
44 * @interface: Interface to probe
45 *
46 * Returns Number of ports on the interface. Zero to disable.
47 */
48int __cvmx_helper_loop_probe(int interface)
49{
50 union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
51 int num_ports = 4;
52 int port;
53
54 /* We need to disable length checking so packet < 64 bytes and jumbo
55 frames don't get errors */
56 for (port = 0; port < num_ports; port++) {
57 union cvmx_pip_prt_cfgx port_cfg;
58 int ipd_port = cvmx_helper_get_ipd_port(interface, port);
59 port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
60 port_cfg.s.maxerr_en = 0;
61 port_cfg.s.minerr_en = 0;
62 cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64);
63 }
64
65 /* Disable FCS stripping for loopback ports */
66 ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
67 ipd_sub_port_fcs.s.port_bit2 = 0;
68 cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
69 return num_ports;
70}
71
72/**
73 * Bringup and enable a LOOP interface. After this call packet
74 * I/O should be fully functional. This is called with IPD
75 * enabled but PKO disabled.
76 *
77 * @interface: Interface to bring up
78 *
79 * Returns Zero on success, negative on failure
80 */
81int __cvmx_helper_loop_enable(int interface)
82{
83 /* Do nothing. */
84 return 0;
85}
diff --git a/drivers/staging/octeon/cvmx-helper-loop.h b/drivers/staging/octeon/cvmx-helper-loop.h
new file mode 100644
index 000000000000..e646a6ccce75
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-loop.h
@@ -0,0 +1,59 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as published by
11 * the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful,
14 * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or NONINFRINGEMENT.
16 * See the GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this file; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 * or visit http://www.gnu.org/licenses/.
22 *
23 * This file may also be available under a different license from Cavium.
24 * Contact Cavium Networks for more information
25 ***********************license end**************************************/
26
27/**
28 * @file
29 *
30 * Functions for LOOP initialization, configuration,
31 * and monitoring.
32 *
33 */
34#ifndef __CVMX_HELPER_LOOP_H__
35#define __CVMX_HELPER_LOOP_H__
36
37/**
38 * Probe a LOOP interface and determine the number of ports
39 * connected to it. The LOOP interface should still be down after
40 * this call.
41 *
42 * @interface: Interface to probe
43 *
44 * Returns Number of ports on the interface. Zero to disable.
45 */
46extern int __cvmx_helper_loop_probe(int interface);
47
48/**
49 * Bringup and enable a LOOP interface. After this call packet
50 * I/O should be fully functional. This is called with IPD
51 * enabled but PKO disabled.
52 *
53 * @interface: Interface to bring up
54 *
55 * Returns Zero on success, negative on failure
56 */
57extern int __cvmx_helper_loop_enable(int interface);
58
59#endif
diff --git a/drivers/staging/octeon/cvmx-helper-npi.c b/drivers/staging/octeon/cvmx-helper-npi.c
new file mode 100644
index 000000000000..7388a1e72b38
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-npi.c
@@ -0,0 +1,113 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Functions for NPI initialization, configuration,
30 * and monitoring.
31 */
32#include <asm/octeon/octeon.h>
33
34#include "cvmx-config.h"
35
36#include "cvmx-helper.h"
37
38#include "cvmx-pip-defs.h"
39
40/**
41 * Probe a NPI interface and determine the number of ports
42 * connected to it. The NPI interface should still be down
43 * after this call.
44 *
45 * @interface: Interface to probe
46 *
47 * Returns Number of ports on the interface. Zero to disable.
48 */
49int __cvmx_helper_npi_probe(int interface)
50{
51#if CVMX_PKO_QUEUES_PER_PORT_PCI > 0
52 if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
53 return 4;
54 else if (OCTEON_IS_MODEL(OCTEON_CN56XX)
55 && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
56 /* The packet engines didn't exist before pass 2 */
57 return 4;
58 else if (OCTEON_IS_MODEL(OCTEON_CN52XX)
59 && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
60 /* The packet engines didn't exist before pass 2 */
61 return 4;
62#if 0
63 /*
64 * Technically CN30XX, CN31XX, and CN50XX contain packet
65 * engines, but nobody ever uses them. Since this is the case,
66 * we disable them here.
67 */
68 else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
69 || OCTEON_IS_MODEL(OCTEON_CN50XX))
70 return 2;
71 else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
72 return 1;
73#endif
74#endif
75 return 0;
76}
77
78/**
79 * Bringup and enable a NPI interface. After this call packet
80 * I/O should be fully functional. This is called with IPD
81 * enabled but PKO disabled.
82 *
83 * @interface: Interface to bring up
84 *
85 * Returns Zero on success, negative on failure
86 */
87int __cvmx_helper_npi_enable(int interface)
88{
89 /*
90 * On CN50XX, CN52XX, and CN56XX we need to disable length
91 * checking so packet < 64 bytes and jumbo frames don't get
92 * errors.
93 */
94 if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
95 !OCTEON_IS_MODEL(OCTEON_CN58XX)) {
96 int num_ports = cvmx_helper_ports_on_interface(interface);
97 int port;
98 for (port = 0; port < num_ports; port++) {
99 union cvmx_pip_prt_cfgx port_cfg;
100 int ipd_port =
101 cvmx_helper_get_ipd_port(interface, port);
102 port_cfg.u64 =
103 cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
104 port_cfg.s.maxerr_en = 0;
105 port_cfg.s.minerr_en = 0;
106 cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port),
107 port_cfg.u64);
108 }
109 }
110
111 /* Enables are controlled by the remote host, so nothing to do here */
112 return 0;
113}
diff --git a/drivers/staging/octeon/cvmx-helper-npi.h b/drivers/staging/octeon/cvmx-helper-npi.h
new file mode 100644
index 000000000000..908e7b08c214
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-npi.h
@@ -0,0 +1,60 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 * @file
30 *
31 * Functions for NPI initialization, configuration,
32 * and monitoring.
33 *
34 */
35#ifndef __CVMX_HELPER_NPI_H__
36#define __CVMX_HELPER_NPI_H__
37
38/**
39 * Probe a NPI interface and determine the number of ports
40 * connected to it. The NPI interface should still be down after
41 * this call.
42 *
43 * @interface: Interface to probe
44 *
45 * Returns Number of ports on the interface. Zero to disable.
46 */
47extern int __cvmx_helper_npi_probe(int interface);
48
49/**
50 * Bringup and enable a NPI interface. After this call packet
51 * I/O should be fully functional. This is called with IPD
52 * enabled but PKO disabled.
53 *
54 * @interface: Interface to bring up
55 *
56 * Returns Zero on success, negative on failure
57 */
58extern int __cvmx_helper_npi_enable(int interface);
59
60#endif
diff --git a/drivers/staging/octeon/cvmx-helper-rgmii.c b/drivers/staging/octeon/cvmx-helper-rgmii.c
new file mode 100644
index 000000000000..aa2d5d7fee2b
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-rgmii.c
@@ -0,0 +1,525 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Functions for RGMII/GMII/MII initialization, configuration,
30 * and monitoring.
31 */
32#include <asm/octeon/octeon.h>
33
34#include "cvmx-config.h"
35
36
37#include "cvmx-mdio.h"
38#include "cvmx-pko.h"
39#include "cvmx-helper.h"
40#include "cvmx-helper-board.h"
41
42#include <asm/octeon/cvmx-npi-defs.h>
43#include "cvmx-gmxx-defs.h"
44#include "cvmx-asxx-defs.h"
45#include "cvmx-dbg-defs.h"
46
47void __cvmx_interrupt_gmxx_enable(int interface);
48void __cvmx_interrupt_asxx_enable(int block);
49
50/**
51 * Probe RGMII ports and determine the number present
52 *
53 * @interface: Interface to probe
54 *
55 * Returns Number of RGMII/GMII/MII ports (0-4).
56 */
57int __cvmx_helper_rgmii_probe(int interface)
58{
59 int num_ports = 0;
60 union cvmx_gmxx_inf_mode mode;
61 mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
62
63 if (mode.s.type) {
64 if (OCTEON_IS_MODEL(OCTEON_CN38XX)
65 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
66 cvmx_dprintf("ERROR: RGMII initialize called in "
67 "SPI interface\n");
68 } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
69 || OCTEON_IS_MODEL(OCTEON_CN30XX)
70 || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
71 /*
72 * On these chips "type" says we're in
73 * GMII/MII mode. This limits us to 2 ports
74 */
75 num_ports = 2;
76 } else {
77 cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
78 __func__);
79 }
80 } else {
81 if (OCTEON_IS_MODEL(OCTEON_CN38XX)
82 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
83 num_ports = 4;
84 } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
85 || OCTEON_IS_MODEL(OCTEON_CN30XX)
86 || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
87 num_ports = 3;
88 } else {
89 cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
90 __func__);
91 }
92 }
93 return num_ports;
94}
95
96/**
97 * Put an RGMII interface in loopback mode. Internal packets sent
98 * out will be received back again on the same port. Externally
99 * received packets will echo back out.
100 *
101 * @port: IPD port number to loop.
102 */
103void cvmx_helper_rgmii_internal_loopback(int port)
104{
105 int interface = (port >> 4) & 1;
106 int index = port & 0xf;
107 uint64_t tmp;
108
109 union cvmx_gmxx_prtx_cfg gmx_cfg;
110 gmx_cfg.u64 = 0;
111 gmx_cfg.s.duplex = 1;
112 gmx_cfg.s.slottime = 1;
113 gmx_cfg.s.speed = 1;
114 cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
115 cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
116 cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
117 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
118 tmp = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
119 cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), (1 << index) | tmp);
120 tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
121 cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
122 tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
123 cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
124 gmx_cfg.s.en = 1;
125 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
126}
127
128/**
129 * Workaround ASX setup errata with CN38XX pass1
130 *
131 * @interface: Interface to setup
132 * @port: Port to setup (0..3)
133 * @cpu_clock_hz:
134 * Chip frequency in Hertz
135 *
136 * Returns Zero on success, negative on failure
137 */
138static int __cvmx_helper_errata_asx_pass1(int interface, int port,
139 int cpu_clock_hz)
140{
141 /* Set hi water mark as per errata GMX-4 */
142 if (cpu_clock_hz >= 325000000 && cpu_clock_hz < 375000000)
143 cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 12);
144 else if (cpu_clock_hz >= 375000000 && cpu_clock_hz < 437000000)
145 cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 11);
146 else if (cpu_clock_hz >= 437000000 && cpu_clock_hz < 550000000)
147 cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 10);
148 else if (cpu_clock_hz >= 550000000 && cpu_clock_hz < 687000000)
149 cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 9);
150 else
151 cvmx_dprintf("Illegal clock frequency (%d). "
152 "CVMX_ASXX_TX_HI_WATERX not set\n", cpu_clock_hz);
153 return 0;
154}
155
156/**
157 * Configure all of the ASX, GMX, and PKO regsiters required
158 * to get RGMII to function on the supplied interface.
159 *
160 * @interface: PKO Interface to configure (0 or 1)
161 *
162 * Returns Zero on success
163 */
164int __cvmx_helper_rgmii_enable(int interface)
165{
166 int num_ports = cvmx_helper_ports_on_interface(interface);
167 int port;
168 struct cvmx_sysinfo *sys_info_ptr = cvmx_sysinfo_get();
169 union cvmx_gmxx_inf_mode mode;
170 union cvmx_asxx_tx_prt_en asx_tx;
171 union cvmx_asxx_rx_prt_en asx_rx;
172
173 mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
174
175 if (mode.s.en == 0)
176 return -1;
177 if ((OCTEON_IS_MODEL(OCTEON_CN38XX) ||
178 OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1)
179 /* Ignore SPI interfaces */
180 return -1;
181
182 /* Configure the ASX registers needed to use the RGMII ports */
183 asx_tx.u64 = 0;
184 asx_tx.s.prt_en = cvmx_build_mask(num_ports);
185 cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);
186
187 asx_rx.u64 = 0;
188 asx_rx.s.prt_en = cvmx_build_mask(num_ports);
189 cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);
190
191 /* Configure the GMX registers needed to use the RGMII ports */
192 for (port = 0; port < num_ports; port++) {
193 /* Setting of CVMX_GMXX_TXX_THRESH has been moved to
194 __cvmx_helper_setup_gmx() */
195
196 if (cvmx_octeon_is_pass1())
197 __cvmx_helper_errata_asx_pass1(interface, port,
198 sys_info_ptr->
199 cpu_clock_hz);
200 else {
201 /*
202 * Configure more flexible RGMII preamble
203 * checking. Pass 1 doesn't support this
204 * feature.
205 */
206 union cvmx_gmxx_rxx_frm_ctl frm_ctl;
207 frm_ctl.u64 =
208 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL
209 (port, interface));
210 /* New field, so must be compile time */
211 frm_ctl.s.pre_free = 1;
212 cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface),
213 frm_ctl.u64);
214 }
215
216 /*
217 * Each pause frame transmitted will ask for about 10M
218 * bit times before resume. If buffer space comes
219 * available before that time has expired, an XON
220 * pause frame (0 time) will be transmitted to restart
221 * the flow.
222 */
223 cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface),
224 20000);
225 cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL
226 (port, interface), 19000);
227
228 if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
229 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
230 16);
231 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
232 16);
233 } else {
234 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
235 24);
236 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
237 24);
238 }
239 }
240
241 __cvmx_helper_setup_gmx(interface, num_ports);
242
243 /* enable the ports now */
244 for (port = 0; port < num_ports; port++) {
245 union cvmx_gmxx_prtx_cfg gmx_cfg;
246 cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port
247 (interface, port));
248 gmx_cfg.u64 =
249 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface));
250 gmx_cfg.s.en = 1;
251 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface),
252 gmx_cfg.u64);
253 }
254 __cvmx_interrupt_asxx_enable(interface);
255 __cvmx_interrupt_gmxx_enable(interface);
256
257 return 0;
258}
259
260/**
261 * Return the link state of an IPD/PKO port as returned by
262 * auto negotiation. The result of this function may not match
263 * Octeon's link config if auto negotiation has changed since
264 * the last call to cvmx_helper_link_set().
265 *
266 * @ipd_port: IPD/PKO port to query
267 *
268 * Returns Link state
269 */
270cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port)
271{
272 int interface = cvmx_helper_get_interface_num(ipd_port);
273 int index = cvmx_helper_get_interface_index_num(ipd_port);
274 union cvmx_asxx_prt_loop asxx_prt_loop;
275
276 asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
277 if (asxx_prt_loop.s.int_loop & (1 << index)) {
278 /* Force 1Gbps full duplex on internal loopback */
279 cvmx_helper_link_info_t result;
280 result.u64 = 0;
281 result.s.full_duplex = 1;
282 result.s.link_up = 1;
283 result.s.speed = 1000;
284 return result;
285 } else
286 return __cvmx_helper_board_link_get(ipd_port);
287}
288
289/**
290 * Configure an IPD/PKO port for the specified link state. This
291 * function does not influence auto negotiation at the PHY level.
292 * The passed link state must always match the link state returned
293 * by cvmx_helper_link_get(). It is normally best to use
294 * cvmx_helper_link_autoconf() instead.
295 *
296 * @ipd_port: IPD/PKO port to configure
297 * @link_info: The new link state
298 *
299 * Returns Zero on success, negative on failure
300 */
301int __cvmx_helper_rgmii_link_set(int ipd_port,
302 cvmx_helper_link_info_t link_info)
303{
304 int result = 0;
305 int interface = cvmx_helper_get_interface_num(ipd_port);
306 int index = cvmx_helper_get_interface_index_num(ipd_port);
307 union cvmx_gmxx_prtx_cfg original_gmx_cfg;
308 union cvmx_gmxx_prtx_cfg new_gmx_cfg;
309 union cvmx_pko_mem_queue_qos pko_mem_queue_qos;
310 union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16];
311 union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp;
312 union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save;
313 int i;
314
315 /* Ignore speed sets in the simulator */
316 if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
317 return 0;
318
319 /* Read the current settings so we know the current enable state */
320 original_gmx_cfg.u64 =
321 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
322 new_gmx_cfg = original_gmx_cfg;
323
324 /* Disable the lowest level RX */
325 cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
326 cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) &
327 ~(1 << index));
328
329 /* Disable all queues so that TX should become idle */
330 for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
331 int queue = cvmx_pko_get_base_queue(ipd_port) + i;
332 cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
333 pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS);
334 pko_mem_queue_qos.s.pid = ipd_port;
335 pko_mem_queue_qos.s.qid = queue;
336 pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
337 pko_mem_queue_qos.s.qos_mask = 0;
338 cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
339 }
340
341 /* Disable backpressure */
342 gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
343 gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
344 gmx_tx_ovr_bp.s.bp &= ~(1 << index);
345 gmx_tx_ovr_bp.s.en |= 1 << index;
346 cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
347 cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
348
349 /*
350 * Poll the GMX state machine waiting for it to become
351 * idle. Preferably we should only change speed when it is
352 * idle. If it doesn't become idle we will still do the speed
353 * change, but there is a slight chance that GMX will
354 * lockup.
355 */
356 cvmx_write_csr(CVMX_NPI_DBG_SELECT,
357 interface * 0x800 + index * 0x100 + 0x880);
358 CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 7,
359 ==, 0, 10000);
360 CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 0xf,
361 ==, 0, 10000);
362
363 /* Disable the port before we make any changes */
364 new_gmx_cfg.s.en = 0;
365 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
366 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
367
368 /* Set full/half duplex */
369 if (cvmx_octeon_is_pass1())
370 /* Half duplex is broken for 38XX Pass 1 */
371 new_gmx_cfg.s.duplex = 1;
372 else if (!link_info.s.link_up)
373 /* Force full duplex on down links */
374 new_gmx_cfg.s.duplex = 1;
375 else
376 new_gmx_cfg.s.duplex = link_info.s.full_duplex;
377
378 /* Set the link speed. Anything unknown is set to 1Gbps */
379 if (link_info.s.speed == 10) {
380 new_gmx_cfg.s.slottime = 0;
381 new_gmx_cfg.s.speed = 0;
382 } else if (link_info.s.speed == 100) {
383 new_gmx_cfg.s.slottime = 0;
384 new_gmx_cfg.s.speed = 0;
385 } else {
386 new_gmx_cfg.s.slottime = 1;
387 new_gmx_cfg.s.speed = 1;
388 }
389
390 /* Adjust the clocks */
391 if (link_info.s.speed == 10) {
392 cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50);
393 cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
394 cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
395 } else if (link_info.s.speed == 100) {
396 cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5);
397 cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
398 cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
399 } else {
400 cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
401 cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
402 cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
403 }
404
405 if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
406 if ((link_info.s.speed == 10) || (link_info.s.speed == 100)) {
407 union cvmx_gmxx_inf_mode mode;
408 mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
409
410 /*
411 * Port .en .type .p0mii Configuration
412 * ---- --- ----- ------ -----------------------------------------
413 * X 0 X X All links are disabled.
414 * 0 1 X 0 Port 0 is RGMII
415 * 0 1 X 1 Port 0 is MII
416 * 1 1 0 X Ports 1 and 2 are configured as RGMII ports.
417 * 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or
418 * MII port is selected by GMX_PRT1_CFG[SPEED].
419 */
420
421 /* In MII mode, CLK_CNT = 1. */
422 if (((index == 0) && (mode.s.p0mii == 1))
423 || ((index != 0) && (mode.s.type == 1))) {
424 cvmx_write_csr(CVMX_GMXX_TXX_CLK
425 (index, interface), 1);
426 }
427 }
428 }
429
430 /* Do a read to make sure all setup stuff is complete */
431 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
432
433 /* Save the new GMX setting without enabling the port */
434 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
435
436 /* Enable the lowest level RX */
437 cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
438 cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1 <<
439 index));
440
441 /* Re-enable the TX path */
442 for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
443 int queue = cvmx_pko_get_base_queue(ipd_port) + i;
444 cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
445 cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS,
446 pko_mem_queue_qos_save[i].u64);
447 }
448
449 /* Restore backpressure */
450 cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);
451
452 /* Restore the GMX enable state. Port config is complete */
453 new_gmx_cfg.s.en = original_gmx_cfg.s.en;
454 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
455
456 return result;
457}
458
459/**
460 * Configure a port for internal and/or external loopback. Internal loopback
461 * causes packets sent by the port to be received by Octeon. External loopback
462 * causes packets received from the wire to sent out again.
463 *
464 * @ipd_port: IPD/PKO port to loopback.
465 * @enable_internal:
466 * Non zero if you want internal loopback
467 * @enable_external:
468 * Non zero if you want external loopback
469 *
470 * Returns Zero on success, negative on failure.
471 */
472int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal,
473 int enable_external)
474{
475 int interface = cvmx_helper_get_interface_num(ipd_port);
476 int index = cvmx_helper_get_interface_index_num(ipd_port);
477 int original_enable;
478 union cvmx_gmxx_prtx_cfg gmx_cfg;
479 union cvmx_asxx_prt_loop asxx_prt_loop;
480
481 /* Read the current enable state and save it */
482 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
483 original_enable = gmx_cfg.s.en;
484 /* Force port to be disabled */
485 gmx_cfg.s.en = 0;
486 if (enable_internal) {
487 /* Force speed if we're doing internal loopback */
488 gmx_cfg.s.duplex = 1;
489 gmx_cfg.s.slottime = 1;
490 gmx_cfg.s.speed = 1;
491 cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
492 cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
493 cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
494 }
495 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
496
497 /* Set the loopback bits */
498 asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
499 if (enable_internal)
500 asxx_prt_loop.s.int_loop |= 1 << index;
501 else
502 asxx_prt_loop.s.int_loop &= ~(1 << index);
503 if (enable_external)
504 asxx_prt_loop.s.ext_loop |= 1 << index;
505 else
506 asxx_prt_loop.s.ext_loop &= ~(1 << index);
507 cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64);
508
509 /* Force enables in internal loopback */
510 if (enable_internal) {
511 uint64_t tmp;
512 tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
513 cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface),
514 (1 << index) | tmp);
515 tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
516 cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
517 (1 << index) | tmp);
518 original_enable = 1;
519 }
520
521 /* Restore the enable state */
522 gmx_cfg.s.en = original_enable;
523 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
524 return 0;
525}
diff --git a/drivers/staging/octeon/cvmx-helper-rgmii.h b/drivers/staging/octeon/cvmx-helper-rgmii.h
new file mode 100644
index 000000000000..ea2652604a57
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-rgmii.h
@@ -0,0 +1,110 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 * @file
30 *
31 * Functions for RGMII/GMII/MII initialization, configuration,
32 * and monitoring.
33 *
34 */
35#ifndef __CVMX_HELPER_RGMII_H__
36#define __CVMX_HELPER_RGMII_H__
37
38/**
39 * Probe RGMII ports and determine the number present
40 *
41 * @interface: Interface to probe
42 *
43 * Returns Number of RGMII/GMII/MII ports (0-4).
44 */
45extern int __cvmx_helper_rgmii_probe(int interface);
46
47/**
48 * Put an RGMII interface in loopback mode. Internal packets sent
49 * out will be received back again on the same port. Externally
50 * received packets will echo back out.
51 *
52 * @port: IPD port number to loop.
53 */
54extern void cvmx_helper_rgmii_internal_loopback(int port);
55
56/**
57 * Configure all of the ASX, GMX, and PKO regsiters required
58 * to get RGMII to function on the supplied interface.
59 *
60 * @interface: PKO Interface to configure (0 or 1)
61 *
62 * Returns Zero on success
63 */
64extern int __cvmx_helper_rgmii_enable(int interface);
65
66/**
67 * Return the link state of an IPD/PKO port as returned by
68 * auto negotiation. The result of this function may not match
69 * Octeon's link config if auto negotiation has changed since
70 * the last call to cvmx_helper_link_set().
71 *
72 * @ipd_port: IPD/PKO port to query
73 *
74 * Returns Link state
75 */
76extern cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port);
77
78/**
79 * Configure an IPD/PKO port for the specified link state. This
80 * function does not influence auto negotiation at the PHY level.
81 * The passed link state must always match the link state returned
82 * by cvmx_helper_link_get(). It is normally best to use
83 * cvmx_helper_link_autoconf() instead.
84 *
85 * @ipd_port: IPD/PKO port to configure
86 * @link_info: The new link state
87 *
88 * Returns Zero on success, negative on failure
89 */
90extern int __cvmx_helper_rgmii_link_set(int ipd_port,
91 cvmx_helper_link_info_t link_info);
92
93/**
94 * Configure a port for internal and/or external loopback. Internal loopback
95 * causes packets sent by the port to be received by Octeon. External loopback
96 * causes packets received from the wire to sent out again.
97 *
98 * @ipd_port: IPD/PKO port to loopback.
99 * @enable_internal:
100 * Non zero if you want internal loopback
101 * @enable_external:
102 * Non zero if you want external loopback
103 *
104 * Returns Zero on success, negative on failure.
105 */
106extern int __cvmx_helper_rgmii_configure_loopback(int ipd_port,
107 int enable_internal,
108 int enable_external);
109
110#endif
diff --git a/drivers/staging/octeon/cvmx-helper-sgmii.c b/drivers/staging/octeon/cvmx-helper-sgmii.c
new file mode 100644
index 000000000000..6214e3b6d975
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-sgmii.c
@@ -0,0 +1,550 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Functions for SGMII initialization, configuration,
30 * and monitoring.
31 */
32
33#include <asm/octeon/octeon.h>
34
35#include "cvmx-config.h"
36
37#include "cvmx-mdio.h"
38#include "cvmx-helper.h"
39#include "cvmx-helper-board.h"
40
41#include "cvmx-gmxx-defs.h"
42#include "cvmx-pcsx-defs.h"
43
44void __cvmx_interrupt_gmxx_enable(int interface);
45void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
46void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
47
48/**
49 * Perform initialization required only once for an SGMII port.
50 *
51 * @interface: Interface to init
52 * @index: Index of prot on the interface
53 *
54 * Returns Zero on success, negative on failure
55 */
56static int __cvmx_helper_sgmii_hardware_init_one_time(int interface, int index)
57{
58 const uint64_t clock_mhz = cvmx_sysinfo_get()->cpu_clock_hz / 1000000;
59 union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
60 union cvmx_pcsx_linkx_timer_count_reg pcsx_linkx_timer_count_reg;
61 union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
62
63 /* Disable GMX */
64 gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
65 gmxx_prtx_cfg.s.en = 0;
66 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
67
68 /*
69 * Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the
70 * appropriate value. 1000BASE-X specifies a 10ms
71 * interval. SGMII specifies a 1.6ms interval.
72 */
73 pcs_misc_ctl_reg.u64 =
74 cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
75 pcsx_linkx_timer_count_reg.u64 =
76 cvmx_read_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface));
77 if (pcs_misc_ctl_reg.s.mode) {
78 /* 1000BASE-X */
79 pcsx_linkx_timer_count_reg.s.count =
80 (10000ull * clock_mhz) >> 10;
81 } else {
82 /* SGMII */
83 pcsx_linkx_timer_count_reg.s.count =
84 (1600ull * clock_mhz) >> 10;
85 }
86 cvmx_write_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface),
87 pcsx_linkx_timer_count_reg.u64);
88
89 /*
90 * Write the advertisement register to be used as the
91 * tx_Config_Reg<D15:D0> of the autonegotiation. In
92 * 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
93 * In SGMII PHY mode, tx_Config_Reg<D15:D0> is
94 * PCS*_SGM*_AN_ADV_REG. In SGMII MAC mode,
95 * tx_Config_Reg<D15:D0> is the fixed value 0x4001, so this
96 * step can be skipped.
97 */
98 if (pcs_misc_ctl_reg.s.mode) {
99 /* 1000BASE-X */
100 union cvmx_pcsx_anx_adv_reg pcsx_anx_adv_reg;
101 pcsx_anx_adv_reg.u64 =
102 cvmx_read_csr(CVMX_PCSX_ANX_ADV_REG(index, interface));
103 pcsx_anx_adv_reg.s.rem_flt = 0;
104 pcsx_anx_adv_reg.s.pause = 3;
105 pcsx_anx_adv_reg.s.hfd = 1;
106 pcsx_anx_adv_reg.s.fd = 1;
107 cvmx_write_csr(CVMX_PCSX_ANX_ADV_REG(index, interface),
108 pcsx_anx_adv_reg.u64);
109 } else {
110 union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
111 pcsx_miscx_ctl_reg.u64 =
112 cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
113 if (pcsx_miscx_ctl_reg.s.mac_phy) {
114 /* PHY Mode */
115 union cvmx_pcsx_sgmx_an_adv_reg pcsx_sgmx_an_adv_reg;
116 pcsx_sgmx_an_adv_reg.u64 =
117 cvmx_read_csr(CVMX_PCSX_SGMX_AN_ADV_REG
118 (index, interface));
119 pcsx_sgmx_an_adv_reg.s.link = 1;
120 pcsx_sgmx_an_adv_reg.s.dup = 1;
121 pcsx_sgmx_an_adv_reg.s.speed = 2;
122 cvmx_write_csr(CVMX_PCSX_SGMX_AN_ADV_REG
123 (index, interface),
124 pcsx_sgmx_an_adv_reg.u64);
125 } else {
126 /* MAC Mode - Nothing to do */
127 }
128 }
129 return 0;
130}
131
132/**
133 * Initialize the SERTES link for the first time or after a loss
134 * of link.
135 *
136 * @interface: Interface to init
137 * @index: Index of prot on the interface
138 *
139 * Returns Zero on success, negative on failure
140 */
141static int __cvmx_helper_sgmii_hardware_init_link(int interface, int index)
142{
143 union cvmx_pcsx_mrx_control_reg control_reg;
144
145 /*
146 * Take PCS through a reset sequence.
147 * PCS*_MR*_CONTROL_REG[PWR_DN] should be cleared to zero.
148 * Write PCS*_MR*_CONTROL_REG[RESET]=1 (while not changing the
149 * value of the other PCS*_MR*_CONTROL_REG bits). Read
150 * PCS*_MR*_CONTROL_REG[RESET] until it changes value to
151 * zero.
152 */
153 control_reg.u64 =
154 cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
155 if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
156 control_reg.s.reset = 1;
157 cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
158 control_reg.u64);
159 if (CVMX_WAIT_FOR_FIELD64
160 (CVMX_PCSX_MRX_CONTROL_REG(index, interface),
161 union cvmx_pcsx_mrx_control_reg, reset, ==, 0, 10000)) {
162 cvmx_dprintf("SGMII%d: Timeout waiting for port %d "
163 "to finish reset\n",
164 interface, index);
165 return -1;
166 }
167 }
168
169 /*
170 * Write PCS*_MR*_CONTROL_REG[RST_AN]=1 to ensure a fresh
171 * sgmii negotiation starts.
172 */
173 control_reg.s.rst_an = 1;
174 control_reg.s.an_en = 1;
175 control_reg.s.pwr_dn = 0;
176 cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
177 control_reg.u64);
178
179 /*
180 * Wait for PCS*_MR*_STATUS_REG[AN_CPT] to be set, indicating
181 * that sgmii autonegotiation is complete. In MAC mode this
182 * isn't an ethernet link, but a link between Octeon and the
183 * PHY.
184 */
185 if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
186 CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_STATUS_REG(index, interface),
187 union cvmx_pcsx_mrx_status_reg, an_cpt, ==, 1,
188 10000)) {
189 /* cvmx_dprintf("SGMII%d: Port %d link timeout\n", interface, index); */
190 return -1;
191 }
192 return 0;
193}
194
195/**
196 * Configure an SGMII link to the specified speed after the SERTES
197 * link is up.
198 *
199 * @interface: Interface to init
200 * @index: Index of prot on the interface
201 * @link_info: Link state to configure
202 *
203 * Returns Zero on success, negative on failure
204 */
205static int __cvmx_helper_sgmii_hardware_init_link_speed(int interface,
206 int index,
207 cvmx_helper_link_info_t
208 link_info)
209{
210 int is_enabled;
211 union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
212 union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
213
214 /* Disable GMX before we make any changes. Remember the enable state */
215 gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
216 is_enabled = gmxx_prtx_cfg.s.en;
217 gmxx_prtx_cfg.s.en = 0;
218 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
219
220 /* Wait for GMX to be idle */
221 if (CVMX_WAIT_FOR_FIELD64
222 (CVMX_GMXX_PRTX_CFG(index, interface), union cvmx_gmxx_prtx_cfg,
223 rx_idle, ==, 1, 10000)
224 || CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
225 union cvmx_gmxx_prtx_cfg, tx_idle, ==, 1,
226 10000)) {
227 cvmx_dprintf
228 ("SGMII%d: Timeout waiting for port %d to be idle\n",
229 interface, index);
230 return -1;
231 }
232
233 /* Read GMX CFG again to make sure the disable completed */
234 gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
235
236 /*
237 * Get the misc control for PCS. We will need to set the
238 * duplication amount.
239 */
240 pcsx_miscx_ctl_reg.u64 =
241 cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
242
243 /*
244 * Use GMXENO to force the link down if the status we get says
245 * it should be down.
246 */
247 pcsx_miscx_ctl_reg.s.gmxeno = !link_info.s.link_up;
248
249 /* Only change the duplex setting if the link is up */
250 if (link_info.s.link_up)
251 gmxx_prtx_cfg.s.duplex = link_info.s.full_duplex;
252
253 /* Do speed based setting for GMX */
254 switch (link_info.s.speed) {
255 case 10:
256 gmxx_prtx_cfg.s.speed = 0;
257 gmxx_prtx_cfg.s.speed_msb = 1;
258 gmxx_prtx_cfg.s.slottime = 0;
259 /* Setting from GMX-603 */
260 pcsx_miscx_ctl_reg.s.samp_pt = 25;
261 cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
262 cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
263 break;
264 case 100:
265 gmxx_prtx_cfg.s.speed = 0;
266 gmxx_prtx_cfg.s.speed_msb = 0;
267 gmxx_prtx_cfg.s.slottime = 0;
268 pcsx_miscx_ctl_reg.s.samp_pt = 0x5;
269 cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
270 cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
271 break;
272 case 1000:
273 gmxx_prtx_cfg.s.speed = 1;
274 gmxx_prtx_cfg.s.speed_msb = 0;
275 gmxx_prtx_cfg.s.slottime = 1;
276 pcsx_miscx_ctl_reg.s.samp_pt = 1;
277 cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 512);
278 cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 8192);
279 break;
280 default:
281 break;
282 }
283
284 /* Write the new misc control for PCS */
285 cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
286 pcsx_miscx_ctl_reg.u64);
287
288 /* Write the new GMX settings with the port still disabled */
289 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
290
291 /* Read GMX CFG again to make sure the config completed */
292 gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
293
294 /* Restore the enabled / disabled state */
295 gmxx_prtx_cfg.s.en = is_enabled;
296 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
297
298 return 0;
299}
300
301/**
302 * Bring up the SGMII interface to be ready for packet I/O but
303 * leave I/O disabled using the GMX override. This function
304 * follows the bringup documented in 10.6.3 of the manual.
305 *
306 * @interface: Interface to bringup
307 * @num_ports: Number of ports on the interface
308 *
309 * Returns Zero on success, negative on failure
310 */
311static int __cvmx_helper_sgmii_hardware_init(int interface, int num_ports)
312{
313 int index;
314
315 __cvmx_helper_setup_gmx(interface, num_ports);
316
317 for (index = 0; index < num_ports; index++) {
318 int ipd_port = cvmx_helper_get_ipd_port(interface, index);
319 __cvmx_helper_sgmii_hardware_init_one_time(interface, index);
320 __cvmx_helper_sgmii_link_set(ipd_port,
321 __cvmx_helper_sgmii_link_get
322 (ipd_port));
323
324 }
325
326 return 0;
327}
328
329/**
330 * Probe a SGMII interface and determine the number of ports
331 * connected to it. The SGMII interface should still be down after
332 * this call.
333 *
334 * @interface: Interface to probe
335 *
336 * Returns Number of ports on the interface. Zero to disable.
337 */
338int __cvmx_helper_sgmii_probe(int interface)
339{
340 union cvmx_gmxx_inf_mode mode;
341
342 /*
343 * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
344 * interface needs to be enabled before IPD otherwise per port
345 * backpressure may not work properly
346 */
347 mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
348 mode.s.en = 1;
349 cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
350 return 4;
351}
352
353/**
354 * Bringup and enable a SGMII interface. After this call packet
355 * I/O should be fully functional. This is called with IPD
356 * enabled but PKO disabled.
357 *
358 * @interface: Interface to bring up
359 *
360 * Returns Zero on success, negative on failure
361 */
362int __cvmx_helper_sgmii_enable(int interface)
363{
364 int num_ports = cvmx_helper_ports_on_interface(interface);
365 int index;
366
367 __cvmx_helper_sgmii_hardware_init(interface, num_ports);
368
369 for (index = 0; index < num_ports; index++) {
370 union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
371 gmxx_prtx_cfg.u64 =
372 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
373 gmxx_prtx_cfg.s.en = 1;
374 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
375 gmxx_prtx_cfg.u64);
376 __cvmx_interrupt_pcsx_intx_en_reg_enable(index, interface);
377 }
378 __cvmx_interrupt_pcsxx_int_en_reg_enable(interface);
379 __cvmx_interrupt_gmxx_enable(interface);
380 return 0;
381}
382
383/**
384 * Return the link state of an IPD/PKO port as returned by
385 * auto negotiation. The result of this function may not match
386 * Octeon's link config if auto negotiation has changed since
387 * the last call to cvmx_helper_link_set().
388 *
389 * @ipd_port: IPD/PKO port to query
390 *
391 * Returns Link state
392 */
393cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port)
394{
395 cvmx_helper_link_info_t result;
396 union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
397 int interface = cvmx_helper_get_interface_num(ipd_port);
398 int index = cvmx_helper_get_interface_index_num(ipd_port);
399 union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
400
401 result.u64 = 0;
402
403 if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
404 /* The simulator gives you a simulated 1Gbps full duplex link */
405 result.s.link_up = 1;
406 result.s.full_duplex = 1;
407 result.s.speed = 1000;
408 return result;
409 }
410
411 pcsx_mrx_control_reg.u64 =
412 cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
413 if (pcsx_mrx_control_reg.s.loopbck1) {
414 /* Force 1Gbps full duplex link for internal loopback */
415 result.s.link_up = 1;
416 result.s.full_duplex = 1;
417 result.s.speed = 1000;
418 return result;
419 }
420
421 pcs_misc_ctl_reg.u64 =
422 cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
423 if (pcs_misc_ctl_reg.s.mode) {
424 /* 1000BASE-X */
425 /* FIXME */
426 } else {
427 union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
428 pcsx_miscx_ctl_reg.u64 =
429 cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
430 if (pcsx_miscx_ctl_reg.s.mac_phy) {
431 /* PHY Mode */
432 union cvmx_pcsx_mrx_status_reg pcsx_mrx_status_reg;
433 union cvmx_pcsx_anx_results_reg pcsx_anx_results_reg;
434
435 /*
436 * Don't bother continuing if the SERTES low
437 * level link is down
438 */
439 pcsx_mrx_status_reg.u64 =
440 cvmx_read_csr(CVMX_PCSX_MRX_STATUS_REG
441 (index, interface));
442 if (pcsx_mrx_status_reg.s.lnk_st == 0) {
443 if (__cvmx_helper_sgmii_hardware_init_link
444 (interface, index) != 0)
445 return result;
446 }
447
448 /* Read the autoneg results */
449 pcsx_anx_results_reg.u64 =
450 cvmx_read_csr(CVMX_PCSX_ANX_RESULTS_REG
451 (index, interface));
452 if (pcsx_anx_results_reg.s.an_cpt) {
453 /*
454 * Auto negotiation is complete. Set
455 * status accordingly.
456 */
457 result.s.full_duplex =
458 pcsx_anx_results_reg.s.dup;
459 result.s.link_up =
460 pcsx_anx_results_reg.s.link_ok;
461 switch (pcsx_anx_results_reg.s.spd) {
462 case 0:
463 result.s.speed = 10;
464 break;
465 case 1:
466 result.s.speed = 100;
467 break;
468 case 2:
469 result.s.speed = 1000;
470 break;
471 default:
472 result.s.speed = 0;
473 result.s.link_up = 0;
474 break;
475 }
476 } else {
477 /*
478 * Auto negotiation isn't
479 * complete. Return link down.
480 */
481 result.s.speed = 0;
482 result.s.link_up = 0;
483 }
484 } else { /* MAC Mode */
485
486 result = __cvmx_helper_board_link_get(ipd_port);
487 }
488 }
489 return result;
490}
491
492/**
493 * Configure an IPD/PKO port for the specified link state. This
494 * function does not influence auto negotiation at the PHY level.
495 * The passed link state must always match the link state returned
496 * by cvmx_helper_link_get(). It is normally best to use
497 * cvmx_helper_link_autoconf() instead.
498 *
499 * @ipd_port: IPD/PKO port to configure
500 * @link_info: The new link state
501 *
502 * Returns Zero on success, negative on failure
503 */
504int __cvmx_helper_sgmii_link_set(int ipd_port,
505 cvmx_helper_link_info_t link_info)
506{
507 int interface = cvmx_helper_get_interface_num(ipd_port);
508 int index = cvmx_helper_get_interface_index_num(ipd_port);
509 __cvmx_helper_sgmii_hardware_init_link(interface, index);
510 return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index,
511 link_info);
512}
513
514/**
515 * Configure a port for internal and/or external loopback. Internal
516 * loopback causes packets sent by the port to be received by
517 * Octeon. External loopback causes packets received from the wire to
518 * sent out again.
519 *
520 * @ipd_port: IPD/PKO port to loopback.
521 * @enable_internal:
522 * Non zero if you want internal loopback
523 * @enable_external:
524 * Non zero if you want external loopback
525 *
526 * Returns Zero on success, negative on failure.
527 */
528int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal,
529 int enable_external)
530{
531 int interface = cvmx_helper_get_interface_num(ipd_port);
532 int index = cvmx_helper_get_interface_index_num(ipd_port);
533 union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
534 union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
535
536 pcsx_mrx_control_reg.u64 =
537 cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
538 pcsx_mrx_control_reg.s.loopbck1 = enable_internal;
539 cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
540 pcsx_mrx_control_reg.u64);
541
542 pcsx_miscx_ctl_reg.u64 =
543 cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
544 pcsx_miscx_ctl_reg.s.loopbck2 = enable_external;
545 cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
546 pcsx_miscx_ctl_reg.u64);
547
548 __cvmx_helper_sgmii_hardware_init_link(interface, index);
549 return 0;
550}
diff --git a/drivers/staging/octeon/cvmx-helper-sgmii.h b/drivers/staging/octeon/cvmx-helper-sgmii.h
new file mode 100644
index 000000000000..19b48d60857f
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-sgmii.h
@@ -0,0 +1,104 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 * @file
30 *
31 * Functions for SGMII initialization, configuration,
32 * and monitoring.
33 *
34 */
35#ifndef __CVMX_HELPER_SGMII_H__
36#define __CVMX_HELPER_SGMII_H__
37
38/**
39 * Probe a SGMII interface and determine the number of ports
40 * connected to it. The SGMII interface should still be down after
41 * this call.
42 *
43 * @interface: Interface to probe
44 *
45 * Returns Number of ports on the interface. Zero to disable.
46 */
47extern int __cvmx_helper_sgmii_probe(int interface);
48
49/**
50 * Bringup and enable a SGMII interface. After this call packet
51 * I/O should be fully functional. This is called with IPD
52 * enabled but PKO disabled.
53 *
54 * @interface: Interface to bring up
55 *
56 * Returns Zero on success, negative on failure
57 */
58extern int __cvmx_helper_sgmii_enable(int interface);
59
60/**
61 * Return the link state of an IPD/PKO port as returned by
62 * auto negotiation. The result of this function may not match
63 * Octeon's link config if auto negotiation has changed since
64 * the last call to cvmx_helper_link_set().
65 *
66 * @ipd_port: IPD/PKO port to query
67 *
68 * Returns Link state
69 */
70extern cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port);
71
72/**
73 * Configure an IPD/PKO port for the specified link state. This
74 * function does not influence auto negotiation at the PHY level.
75 * The passed link state must always match the link state returned
76 * by cvmx_helper_link_get(). It is normally best to use
77 * cvmx_helper_link_autoconf() instead.
78 *
79 * @ipd_port: IPD/PKO port to configure
80 * @link_info: The new link state
81 *
82 * Returns Zero on success, negative on failure
83 */
84extern int __cvmx_helper_sgmii_link_set(int ipd_port,
85 cvmx_helper_link_info_t link_info);
86
87/**
88 * Configure a port for internal and/or external loopback. Internal loopback
89 * causes packets sent by the port to be received by Octeon. External loopback
90 * causes packets received from the wire to sent out again.
91 *
92 * @ipd_port: IPD/PKO port to loopback.
93 * @enable_internal:
94 * Non zero if you want internal loopback
95 * @enable_external:
96 * Non zero if you want external loopback
97 *
98 * Returns Zero on success, negative on failure.
99 */
100extern int __cvmx_helper_sgmii_configure_loopback(int ipd_port,
101 int enable_internal,
102 int enable_external);
103
104#endif
diff --git a/drivers/staging/octeon/cvmx-helper-spi.c b/drivers/staging/octeon/cvmx-helper-spi.c
new file mode 100644
index 000000000000..8ba6c832471e
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-spi.c
@@ -0,0 +1,195 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28void __cvmx_interrupt_gmxx_enable(int interface);
29void __cvmx_interrupt_spxx_int_msk_enable(int index);
30void __cvmx_interrupt_stxx_int_msk_enable(int index);
31
32/*
33 * Functions for SPI initialization, configuration,
34 * and monitoring.
35 */
36#include <asm/octeon/octeon.h>
37
38#include "cvmx-config.h"
39#include "cvmx-spi.h"
40#include "cvmx-helper.h"
41
42#include "cvmx-pip-defs.h"
43#include "cvmx-pko-defs.h"
44
45/*
46 * CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI
47 * initialization routines wait for SPI training. You can override the
48 * value using executive-config.h if necessary.
49 */
50#ifndef CVMX_HELPER_SPI_TIMEOUT
51#define CVMX_HELPER_SPI_TIMEOUT 10
52#endif
53
54/**
55 * Probe a SPI interface and determine the number of ports
56 * connected to it. The SPI interface should still be down after
57 * this call.
58 *
59 * @interface: Interface to probe
60 *
61 * Returns Number of ports on the interface. Zero to disable.
62 */
63int __cvmx_helper_spi_probe(int interface)
64{
65 int num_ports = 0;
66
67 if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
68 cvmx_spi4000_is_present(interface)) {
69 num_ports = 10;
70 } else {
71 union cvmx_pko_reg_crc_enable enable;
72 num_ports = 16;
73 /*
74 * Unlike the SPI4000, most SPI devices don't
75 * automatically put on the L2 CRC. For everything
76 * except for the SPI4000 have PKO append the L2 CRC
77 * to the packet.
78 */
79 enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE);
80 enable.s.enable |= 0xffff << (interface * 16);
81 cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64);
82 }
83 __cvmx_helper_setup_gmx(interface, num_ports);
84 return num_ports;
85}
86
87/**
88 * Bringup and enable a SPI interface. After this call packet I/O
89 * should be fully functional. This is called with IPD enabled but
90 * PKO disabled.
91 *
92 * @interface: Interface to bring up
93 *
94 * Returns Zero on success, negative on failure
95 */
96int __cvmx_helper_spi_enable(int interface)
97{
98 /*
99 * Normally the ethernet L2 CRC is checked and stripped in the
100 * GMX block. When you are using SPI, this isn' the case and
101 * IPD needs to check the L2 CRC.
102 */
103 int num_ports = cvmx_helper_ports_on_interface(interface);
104 int ipd_port;
105 for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports;
106 ipd_port++) {
107 union cvmx_pip_prt_cfgx port_config;
108 port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
109 port_config.s.crc_en = 1;
110 cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
111 }
112
113 if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
114 cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX,
115 CVMX_HELPER_SPI_TIMEOUT, num_ports);
116 if (cvmx_spi4000_is_present(interface))
117 cvmx_spi4000_initialize(interface);
118 }
119 __cvmx_interrupt_spxx_int_msk_enable(interface);
120 __cvmx_interrupt_stxx_int_msk_enable(interface);
121 __cvmx_interrupt_gmxx_enable(interface);
122 return 0;
123}
124
125/**
126 * Return the link state of an IPD/PKO port as returned by
127 * auto negotiation. The result of this function may not match
128 * Octeon's link config if auto negotiation has changed since
129 * the last call to cvmx_helper_link_set().
130 *
131 * @ipd_port: IPD/PKO port to query
132 *
133 * Returns Link state
134 */
135cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port)
136{
137 cvmx_helper_link_info_t result;
138 int interface = cvmx_helper_get_interface_num(ipd_port);
139 int index = cvmx_helper_get_interface_index_num(ipd_port);
140 result.u64 = 0;
141
142 if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
143 /* The simulator gives you a simulated full duplex link */
144 result.s.link_up = 1;
145 result.s.full_duplex = 1;
146 result.s.speed = 10000;
147 } else if (cvmx_spi4000_is_present(interface)) {
148 union cvmx_gmxx_rxx_rx_inbnd inband =
149 cvmx_spi4000_check_speed(interface, index);
150 result.s.link_up = inband.s.status;
151 result.s.full_duplex = inband.s.duplex;
152 switch (inband.s.speed) {
153 case 0: /* 10 Mbps */
154 result.s.speed = 10;
155 break;
156 case 1: /* 100 Mbps */
157 result.s.speed = 100;
158 break;
159 case 2: /* 1 Gbps */
160 result.s.speed = 1000;
161 break;
162 case 3: /* Illegal */
163 result.s.speed = 0;
164 result.s.link_up = 0;
165 break;
166 }
167 } else {
168 /* For generic SPI we can't determine the link, just return some
169 sane results */
170 result.s.link_up = 1;
171 result.s.full_duplex = 1;
172 result.s.speed = 10000;
173 }
174 return result;
175}
176
177/**
178 * Configure an IPD/PKO port for the specified link state. This
179 * function does not influence auto negotiation at the PHY level.
180 * The passed link state must always match the link state returned
181 * by cvmx_helper_link_get(). It is normally best to use
182 * cvmx_helper_link_autoconf() instead.
183 *
184 * @ipd_port: IPD/PKO port to configure
185 * @link_info: The new link state
186 *
187 * Returns Zero on success, negative on failure
188 */
189int __cvmx_helper_spi_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
190{
191 /* Nothing to do. If we have a SPI4000 then the setup was already performed
192 by cvmx_spi4000_check_speed(). If not then there isn't any link
193 info */
194 return 0;
195}
diff --git a/drivers/staging/octeon/cvmx-helper-spi.h b/drivers/staging/octeon/cvmx-helper-spi.h
new file mode 100644
index 000000000000..69bac036d10e
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-spi.h
@@ -0,0 +1,84 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Functions for SPI initialization, configuration,
30 * and monitoring.
31 */
32#ifndef __CVMX_HELPER_SPI_H__
33#define __CVMX_HELPER_SPI_H__
34
35/**
36 * Probe a SPI interface and determine the number of ports
37 * connected to it. The SPI interface should still be down after
38 * this call.
39 *
40 * @interface: Interface to probe
41 *
42 * Returns Number of ports on the interface. Zero to disable.
43 */
44extern int __cvmx_helper_spi_probe(int interface);
45
46/**
47 * Bringup and enable a SPI interface. After this call packet I/O
48 * should be fully functional. This is called with IPD enabled but
49 * PKO disabled.
50 *
51 * @interface: Interface to bring up
52 *
53 * Returns Zero on success, negative on failure
54 */
55extern int __cvmx_helper_spi_enable(int interface);
56
57/**
58 * Return the link state of an IPD/PKO port as returned by
59 * auto negotiation. The result of this function may not match
60 * Octeon's link config if auto negotiation has changed since
61 * the last call to cvmx_helper_link_set().
62 *
63 * @ipd_port: IPD/PKO port to query
64 *
65 * Returns Link state
66 */
67extern cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port);
68
69/**
70 * Configure an IPD/PKO port for the specified link state. This
71 * function does not influence auto negotiation at the PHY level.
72 * The passed link state must always match the link state returned
73 * by cvmx_helper_link_get(). It is normally best to use
74 * cvmx_helper_link_autoconf() instead.
75 *
76 * @ipd_port: IPD/PKO port to configure
77 * @link_info: The new link state
78 *
79 * Returns Zero on success, negative on failure
80 */
81extern int __cvmx_helper_spi_link_set(int ipd_port,
82 cvmx_helper_link_info_t link_info);
83
84#endif
diff --git a/drivers/staging/octeon/cvmx-helper-util.c b/drivers/staging/octeon/cvmx-helper-util.c
new file mode 100644
index 000000000000..41ef8a40bb03
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-util.c
@@ -0,0 +1,433 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Small helper utilities.
30 */
31#include <linux/kernel.h>
32
33#include <asm/octeon/octeon.h>
34
35#include "cvmx-config.h"
36
37#include "cvmx-fpa.h"
38#include "cvmx-pip.h"
39#include "cvmx-pko.h"
40#include "cvmx-ipd.h"
41#include "cvmx-spi.h"
42
43#include "cvmx-helper.h"
44#include "cvmx-helper-util.h"
45
46#include <asm/octeon/cvmx-ipd-defs.h>
47
48/**
49 * Convert a interface mode into a human readable string
50 *
51 * @mode: Mode to convert
52 *
53 * Returns String
54 */
55const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t
56 mode)
57{
58 switch (mode) {
59 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
60 return "DISABLED";
61 case CVMX_HELPER_INTERFACE_MODE_RGMII:
62 return "RGMII";
63 case CVMX_HELPER_INTERFACE_MODE_GMII:
64 return "GMII";
65 case CVMX_HELPER_INTERFACE_MODE_SPI:
66 return "SPI";
67 case CVMX_HELPER_INTERFACE_MODE_PCIE:
68 return "PCIE";
69 case CVMX_HELPER_INTERFACE_MODE_XAUI:
70 return "XAUI";
71 case CVMX_HELPER_INTERFACE_MODE_SGMII:
72 return "SGMII";
73 case CVMX_HELPER_INTERFACE_MODE_PICMG:
74 return "PICMG";
75 case CVMX_HELPER_INTERFACE_MODE_NPI:
76 return "NPI";
77 case CVMX_HELPER_INTERFACE_MODE_LOOP:
78 return "LOOP";
79 }
80 return "UNKNOWN";
81}
82
83/**
84 * Debug routine to dump the packet structure to the console
85 *
86 * @work: Work queue entry containing the packet to dump
87 * Returns
88 */
89int cvmx_helper_dump_packet(cvmx_wqe_t *work)
90{
91 uint64_t count;
92 uint64_t remaining_bytes;
93 union cvmx_buf_ptr buffer_ptr;
94 uint64_t start_of_buffer;
95 uint8_t *data_address;
96 uint8_t *end_of_data;
97
98 cvmx_dprintf("Packet Length: %u\n", work->len);
99 cvmx_dprintf(" Input Port: %u\n", work->ipprt);
100 cvmx_dprintf(" QoS: %u\n", work->qos);
101 cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs);
102
103 if (work->word2.s.bufs == 0) {
104 union cvmx_ipd_wqe_fpa_queue wqe_pool;
105 wqe_pool.u64 = cvmx_read_csr(CVMX_IPD_WQE_FPA_QUEUE);
106 buffer_ptr.u64 = 0;
107 buffer_ptr.s.pool = wqe_pool.s.wqe_pool;
108 buffer_ptr.s.size = 128;
109 buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data);
110 if (likely(!work->word2.s.not_IP)) {
111 union cvmx_pip_ip_offset pip_ip_offset;
112 pip_ip_offset.u64 = cvmx_read_csr(CVMX_PIP_IP_OFFSET);
113 buffer_ptr.s.addr +=
114 (pip_ip_offset.s.offset << 3) -
115 work->word2.s.ip_offset;
116 buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2;
117 } else {
118 /*
119 * WARNING: This code assumes that the packet
120 * is not RAW. If it was, we would use
121 * PIP_GBL_CFG[RAW_SHF] instead of
122 * PIP_GBL_CFG[NIP_SHF].
123 */
124 union cvmx_pip_gbl_cfg pip_gbl_cfg;
125 pip_gbl_cfg.u64 = cvmx_read_csr(CVMX_PIP_GBL_CFG);
126 buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf;
127 }
128 } else
129 buffer_ptr = work->packet_ptr;
130 remaining_bytes = work->len;
131
132 while (remaining_bytes) {
133 start_of_buffer =
134 ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
135 cvmx_dprintf(" Buffer Start:%llx\n",
136 (unsigned long long)start_of_buffer);
137 cvmx_dprintf(" Buffer I : %u\n", buffer_ptr.s.i);
138 cvmx_dprintf(" Buffer Back: %u\n", buffer_ptr.s.back);
139 cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool);
140 cvmx_dprintf(" Buffer Data: %llx\n",
141 (unsigned long long)buffer_ptr.s.addr);
142 cvmx_dprintf(" Buffer Size: %u\n", buffer_ptr.s.size);
143
144 cvmx_dprintf("\t\t");
145 data_address = (uint8_t *) cvmx_phys_to_ptr(buffer_ptr.s.addr);
146 end_of_data = data_address + buffer_ptr.s.size;
147 count = 0;
148 while (data_address < end_of_data) {
149 if (remaining_bytes == 0)
150 break;
151 else
152 remaining_bytes--;
153 cvmx_dprintf("%02x", (unsigned int)*data_address);
154 data_address++;
155 if (remaining_bytes && (count == 7)) {
156 cvmx_dprintf("\n\t\t");
157 count = 0;
158 } else
159 count++;
160 }
161 cvmx_dprintf("\n");
162
163 if (remaining_bytes)
164 buffer_ptr = *(union cvmx_buf_ptr *)
165 cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
166 }
167 return 0;
168}
169
170/**
171 * Setup Random Early Drop on a specific input queue
172 *
173 * @queue: Input queue to setup RED on (0-7)
174 * @pass_thresh:
175 * Packets will begin slowly dropping when there are less than
176 * this many packet buffers free in FPA 0.
177 * @drop_thresh:
178 * All incomming packets will be dropped when there are less
179 * than this many free packet buffers in FPA 0.
180 * Returns Zero on success. Negative on failure
181 */
182int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
183{
184 union cvmx_ipd_qosx_red_marks red_marks;
185 union cvmx_ipd_red_quex_param red_param;
186
187 /* Set RED to begin dropping packets when there are pass_thresh buffers
188 left. It will linearly drop more packets until reaching drop_thresh
189 buffers */
190 red_marks.u64 = 0;
191 red_marks.s.drop = drop_thresh;
192 red_marks.s.pass = pass_thresh;
193 cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64);
194
195 /* Use the actual queue 0 counter, not the average */
196 red_param.u64 = 0;
197 red_param.s.prb_con =
198 (255ul << 24) / (red_marks.s.pass - red_marks.s.drop);
199 red_param.s.avg_con = 1;
200 red_param.s.new_con = 255;
201 red_param.s.use_pcnt = 1;
202 cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64);
203 return 0;
204}
205
206/**
207 * Setup Random Early Drop to automatically begin dropping packets.
208 *
209 * @pass_thresh:
210 * Packets will begin slowly dropping when there are less than
211 * this many packet buffers free in FPA 0.
212 * @drop_thresh:
213 * All incomming packets will be dropped when there are less
214 * than this many free packet buffers in FPA 0.
215 * Returns Zero on success. Negative on failure
216 */
217int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
218{
219 union cvmx_ipd_portx_bp_page_cnt page_cnt;
220 union cvmx_ipd_bp_prt_red_end ipd_bp_prt_red_end;
221 union cvmx_ipd_red_port_enable red_port_enable;
222 int queue;
223 int interface;
224 int port;
225
226 /* Disable backpressure based on queued buffers. It needs SW support */
227 page_cnt.u64 = 0;
228 page_cnt.s.bp_enb = 0;
229 page_cnt.s.page_cnt = 100;
230 for (interface = 0; interface < 2; interface++) {
231 for (port = cvmx_helper_get_first_ipd_port(interface);
232 port < cvmx_helper_get_last_ipd_port(interface); port++)
233 cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port),
234 page_cnt.u64);
235 }
236
237 for (queue = 0; queue < 8; queue++)
238 cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh);
239
240 /* Shutoff the dropping based on the per port page count. SW isn't
241 decrementing it right now */
242 ipd_bp_prt_red_end.u64 = 0;
243 ipd_bp_prt_red_end.s.prt_enb = 0;
244 cvmx_write_csr(CVMX_IPD_BP_PRT_RED_END, ipd_bp_prt_red_end.u64);
245
246 red_port_enable.u64 = 0;
247 red_port_enable.s.prt_enb = 0xfffffffffull;
248 red_port_enable.s.avg_dly = 10000;
249 red_port_enable.s.prb_dly = 10000;
250 cvmx_write_csr(CVMX_IPD_RED_PORT_ENABLE, red_port_enable.u64);
251
252 return 0;
253}
254
255/**
256 * Setup the common GMX settings that determine the number of
257 * ports. These setting apply to almost all configurations of all
258 * chips.
259 *
260 * @interface: Interface to configure
261 * @num_ports: Number of ports on the interface
262 *
263 * Returns Zero on success, negative on failure
264 */
265int __cvmx_helper_setup_gmx(int interface, int num_ports)
266{
267 union cvmx_gmxx_tx_prts gmx_tx_prts;
268 union cvmx_gmxx_rx_prts gmx_rx_prts;
269 union cvmx_pko_reg_gmx_port_mode pko_mode;
270 union cvmx_gmxx_txx_thresh gmx_tx_thresh;
271 int index;
272
273 /* Tell GMX the number of TX ports on this interface */
274 gmx_tx_prts.u64 = cvmx_read_csr(CVMX_GMXX_TX_PRTS(interface));
275 gmx_tx_prts.s.prts = num_ports;
276 cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), gmx_tx_prts.u64);
277
278 /* Tell GMX the number of RX ports on this interface. This only
279 ** applies to *GMII and XAUI ports */
280 if (cvmx_helper_interface_get_mode(interface) ==
281 CVMX_HELPER_INTERFACE_MODE_RGMII
282 || cvmx_helper_interface_get_mode(interface) ==
283 CVMX_HELPER_INTERFACE_MODE_SGMII
284 || cvmx_helper_interface_get_mode(interface) ==
285 CVMX_HELPER_INTERFACE_MODE_GMII
286 || cvmx_helper_interface_get_mode(interface) ==
287 CVMX_HELPER_INTERFACE_MODE_XAUI) {
288 if (num_ports > 4) {
289 cvmx_dprintf("__cvmx_helper_setup_gmx: Illegal "
290 "num_ports\n");
291 return -1;
292 }
293
294 gmx_rx_prts.u64 = cvmx_read_csr(CVMX_GMXX_RX_PRTS(interface));
295 gmx_rx_prts.s.prts = num_ports;
296 cvmx_write_csr(CVMX_GMXX_RX_PRTS(interface), gmx_rx_prts.u64);
297 }
298
299 /* Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, and 50XX */
300 if (!OCTEON_IS_MODEL(OCTEON_CN30XX) && !OCTEON_IS_MODEL(OCTEON_CN31XX)
301 && !OCTEON_IS_MODEL(OCTEON_CN50XX)) {
302 /* Tell PKO the number of ports on this interface */
303 pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
304 if (interface == 0) {
305 if (num_ports == 1)
306 pko_mode.s.mode0 = 4;
307 else if (num_ports == 2)
308 pko_mode.s.mode0 = 3;
309 else if (num_ports <= 4)
310 pko_mode.s.mode0 = 2;
311 else if (num_ports <= 8)
312 pko_mode.s.mode0 = 1;
313 else
314 pko_mode.s.mode0 = 0;
315 } else {
316 if (num_ports == 1)
317 pko_mode.s.mode1 = 4;
318 else if (num_ports == 2)
319 pko_mode.s.mode1 = 3;
320 else if (num_ports <= 4)
321 pko_mode.s.mode1 = 2;
322 else if (num_ports <= 8)
323 pko_mode.s.mode1 = 1;
324 else
325 pko_mode.s.mode1 = 0;
326 }
327 cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);
328 }
329
330 /*
331 * Set GMX to buffer as much data as possible before starting
332 * transmit. This reduces the chances that we have a TX under
333 * run due to memory contention. Any packet that fits entirely
334 * in the GMX FIFO can never have an under run regardless of
335 * memory load.
336 */
337 gmx_tx_thresh.u64 = cvmx_read_csr(CVMX_GMXX_TXX_THRESH(0, interface));
338 if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX)
339 || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
340 /* These chips have a fixed max threshold of 0x40 */
341 gmx_tx_thresh.s.cnt = 0x40;
342 } else {
343 /* Choose the max value for the number of ports */
344 if (num_ports <= 1)
345 gmx_tx_thresh.s.cnt = 0x100 / 1;
346 else if (num_ports == 2)
347 gmx_tx_thresh.s.cnt = 0x100 / 2;
348 else
349 gmx_tx_thresh.s.cnt = 0x100 / 4;
350 }
351 /*
352 * SPI and XAUI can have lots of ports but the GMX hardware
353 * only ever has a max of 4.
354 */
355 if (num_ports > 4)
356 num_ports = 4;
357 for (index = 0; index < num_ports; index++)
358 cvmx_write_csr(CVMX_GMXX_TXX_THRESH(index, interface),
359 gmx_tx_thresh.u64);
360
361 return 0;
362}
363
364/**
365 * Returns the IPD/PKO port number for a port on teh given
366 * interface.
367 *
368 * @interface: Interface to use
369 * @port: Port on the interface
370 *
371 * Returns IPD/PKO port number
372 */
373int cvmx_helper_get_ipd_port(int interface, int port)
374{
375 switch (interface) {
376 case 0:
377 return port;
378 case 1:
379 return port + 16;
380 case 2:
381 return port + 32;
382 case 3:
383 return port + 36;
384 }
385 return -1;
386}
387
388/**
389 * Returns the interface number for an IPD/PKO port number.
390 *
391 * @ipd_port: IPD/PKO port number
392 *
393 * Returns Interface number
394 */
395int cvmx_helper_get_interface_num(int ipd_port)
396{
397 if (ipd_port < 16)
398 return 0;
399 else if (ipd_port < 32)
400 return 1;
401 else if (ipd_port < 36)
402 return 2;
403 else if (ipd_port < 40)
404 return 3;
405 else
406 cvmx_dprintf("cvmx_helper_get_interface_num: Illegal IPD "
407 "port number\n");
408
409 return -1;
410}
411
412/**
413 * Returns the interface index number for an IPD/PKO port
414 * number.
415 *
416 * @ipd_port: IPD/PKO port number
417 *
418 * Returns Interface index number
419 */
420int cvmx_helper_get_interface_index_num(int ipd_port)
421{
422 if (ipd_port < 32)
423 return ipd_port & 15;
424 else if (ipd_port < 36)
425 return ipd_port & 3;
426 else if (ipd_port < 40)
427 return ipd_port & 3;
428 else
429 cvmx_dprintf("cvmx_helper_get_interface_index_num: "
430 "Illegal IPD port number\n");
431
432 return -1;
433}
diff --git a/drivers/staging/octeon/cvmx-helper-util.h b/drivers/staging/octeon/cvmx-helper-util.h
new file mode 100644
index 000000000000..6a6e52fc22c1
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-util.h
@@ -0,0 +1,215 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 *
30 * Small helper utilities.
31 *
32 */
33
34#ifndef __CVMX_HELPER_UTIL_H__
35#define __CVMX_HELPER_UTIL_H__
36
37/**
38 * Convert a interface mode into a human readable string
39 *
40 * @mode: Mode to convert
41 *
42 * Returns String
43 */
44extern const char
45 *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode);
46
47/**
48 * Debug routine to dump the packet structure to the console
49 *
50 * @work: Work queue entry containing the packet to dump
51 * Returns
52 */
53extern int cvmx_helper_dump_packet(cvmx_wqe_t *work);
54
55/**
56 * Setup Random Early Drop on a specific input queue
57 *
58 * @queue: Input queue to setup RED on (0-7)
59 * @pass_thresh:
60 * Packets will begin slowly dropping when there are less than
61 * this many packet buffers free in FPA 0.
62 * @drop_thresh:
63 * All incomming packets will be dropped when there are less
64 * than this many free packet buffers in FPA 0.
65 * Returns Zero on success. Negative on failure
66 */
67extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
68 int drop_thresh);
69
70/**
71 * Setup Random Early Drop to automatically begin dropping packets.
72 *
73 * @pass_thresh:
74 * Packets will begin slowly dropping when there are less than
75 * this many packet buffers free in FPA 0.
76 * @drop_thresh:
77 * All incomming packets will be dropped when there are less
78 * than this many free packet buffers in FPA 0.
79 * Returns Zero on success. Negative on failure
80 */
81extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
82
83/**
84 * Get the version of the CVMX libraries.
85 *
86 * Returns Version string. Note this buffer is allocated statically
87 * and will be shared by all callers.
88 */
89extern const char *cvmx_helper_get_version(void);
90
91/**
92 * Setup the common GMX settings that determine the number of
93 * ports. These setting apply to almost all configurations of all
94 * chips.
95 *
96 * @interface: Interface to configure
97 * @num_ports: Number of ports on the interface
98 *
99 * Returns Zero on success, negative on failure
100 */
101extern int __cvmx_helper_setup_gmx(int interface, int num_ports);
102
103/**
104 * Returns the IPD/PKO port number for a port on the given
105 * interface.
106 *
107 * @interface: Interface to use
108 * @port: Port on the interface
109 *
110 * Returns IPD/PKO port number
111 */
112extern int cvmx_helper_get_ipd_port(int interface, int port);
113
114/**
115 * Returns the IPD/PKO port number for the first port on the given
116 * interface.
117 *
118 * @interface: Interface to use
119 *
120 * Returns IPD/PKO port number
121 */
122static inline int cvmx_helper_get_first_ipd_port(int interface)
123{
124 return cvmx_helper_get_ipd_port(interface, 0);
125}
126
127/**
128 * Returns the IPD/PKO port number for the last port on the given
129 * interface.
130 *
131 * @interface: Interface to use
132 *
133 * Returns IPD/PKO port number
134 */
135static inline int cvmx_helper_get_last_ipd_port(int interface)
136{
137 extern int cvmx_helper_ports_on_interface(int interface);
138
139 return cvmx_helper_get_first_ipd_port(interface) +
140 cvmx_helper_ports_on_interface(interface) - 1;
141}
142
143/**
144 * Free the packet buffers contained in a work queue entry.
145 * The work queue entry is not freed.
146 *
147 * @work: Work queue entry with packet to free
148 */
149static inline void cvmx_helper_free_packet_data(cvmx_wqe_t *work)
150{
151 uint64_t number_buffers;
152 union cvmx_buf_ptr buffer_ptr;
153 union cvmx_buf_ptr next_buffer_ptr;
154 uint64_t start_of_buffer;
155
156 number_buffers = work->word2.s.bufs;
157 if (number_buffers == 0)
158 return;
159 buffer_ptr = work->packet_ptr;
160
161 /*
162 * Since the number of buffers is not zero, we know this is
163 * not a dynamic short packet. We need to check if it is a
164 * packet received with IPD_CTL_STATUS[NO_WPTR]. If this is
165 * true, we need to free all buffers except for the first
166 * one. The caller doesn't expect their WQE pointer to be
167 * freed
168 */
169 start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
170 if (cvmx_ptr_to_phys(work) == start_of_buffer) {
171 next_buffer_ptr =
172 *(union cvmx_buf_ptr *) cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
173 buffer_ptr = next_buffer_ptr;
174 number_buffers--;
175 }
176
177 while (number_buffers--) {
178 /*
179 * Remember the back pointer is in cache lines, not
180 * 64bit words
181 */
182 start_of_buffer =
183 ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
184 /*
185 * Read pointer to next buffer before we free the
186 * current buffer.
187 */
188 next_buffer_ptr =
189 *(union cvmx_buf_ptr *) cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
190 cvmx_fpa_free(cvmx_phys_to_ptr(start_of_buffer),
191 buffer_ptr.s.pool, 0);
192 buffer_ptr = next_buffer_ptr;
193 }
194}
195
196/**
197 * Returns the interface number for an IPD/PKO port number.
198 *
199 * @ipd_port: IPD/PKO port number
200 *
201 * Returns Interface number
202 */
203extern int cvmx_helper_get_interface_num(int ipd_port);
204
205/**
206 * Returns the interface index number for an IPD/PKO port
207 * number.
208 *
209 * @ipd_port: IPD/PKO port number
210 *
211 * Returns Interface index number
212 */
213extern int cvmx_helper_get_interface_index_num(int ipd_port);
214
215#endif /* __CVMX_HELPER_H__ */
diff --git a/drivers/staging/octeon/cvmx-helper-xaui.c b/drivers/staging/octeon/cvmx-helper-xaui.c
new file mode 100644
index 000000000000..a11e6769e234
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-xaui.c
@@ -0,0 +1,348 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Functions for XAUI initialization, configuration,
30 * and monitoring.
31 *
32 */
33
34#include <asm/octeon/octeon.h>
35
36#include "cvmx-config.h"
37
38#include "cvmx-helper.h"
39
40#include "cvmx-pko-defs.h"
41#include "cvmx-gmxx-defs.h"
42#include "cvmx-pcsxx-defs.h"
43
44void __cvmx_interrupt_gmxx_enable(int interface);
45void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
46void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
47/**
48 * Probe a XAUI interface and determine the number of ports
49 * connected to it. The XAUI interface should still be down
50 * after this call.
51 *
52 * @interface: Interface to probe
53 *
54 * Returns Number of ports on the interface. Zero to disable.
55 */
56int __cvmx_helper_xaui_probe(int interface)
57{
58 int i;
59 union cvmx_gmxx_hg2_control gmx_hg2_control;
60 union cvmx_gmxx_inf_mode mode;
61
62 /*
63 * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
64 * interface needs to be enabled before IPD otherwise per port
65 * backpressure may not work properly.
66 */
67 mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
68 mode.s.en = 1;
69 cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
70
71 __cvmx_helper_setup_gmx(interface, 1);
72
73 /*
74 * Setup PKO to support 16 ports for HiGig2 virtual
75 * ports. We're pointing all of the PKO packet ports for this
76 * interface to the XAUI. This allows us to use HiGig2
77 * backpressure per port.
78 */
79 for (i = 0; i < 16; i++) {
80 union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs;
81 pko_mem_port_ptrs.u64 = 0;
82 /*
83 * We set each PKO port to have equal priority in a
84 * round robin fashion.
85 */
86 pko_mem_port_ptrs.s.static_p = 0;
87 pko_mem_port_ptrs.s.qos_mask = 0xff;
88 /* All PKO ports map to the same XAUI hardware port */
89 pko_mem_port_ptrs.s.eid = interface * 4;
90 pko_mem_port_ptrs.s.pid = interface * 16 + i;
91 cvmx_write_csr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
92 }
93
94 /* If HiGig2 is enabled return 16 ports, otherwise return 1 port */
95 gmx_hg2_control.u64 = cvmx_read_csr(CVMX_GMXX_HG2_CONTROL(interface));
96 if (gmx_hg2_control.s.hg2tx_en)
97 return 16;
98 else
99 return 1;
100}
101
102/**
103 * Bringup and enable a XAUI interface. After this call packet
104 * I/O should be fully functional. This is called with IPD
105 * enabled but PKO disabled.
106 *
107 * @interface: Interface to bring up
108 *
109 * Returns Zero on success, negative on failure
110 */
111int __cvmx_helper_xaui_enable(int interface)
112{
113 union cvmx_gmxx_prtx_cfg gmx_cfg;
114 union cvmx_pcsxx_control1_reg xauiCtl;
115 union cvmx_pcsxx_misc_ctl_reg xauiMiscCtl;
116 union cvmx_gmxx_tx_xaui_ctl gmxXauiTxCtl;
117 union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
118 union cvmx_gmxx_tx_int_en gmx_tx_int_en;
119 union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
120
121 /* (1) Interface has already been enabled. */
122
123 /* (2) Disable GMX. */
124 xauiMiscCtl.u64 = cvmx_read_csr(CVMX_PCSXX_MISC_CTL_REG(interface));
125 xauiMiscCtl.s.gmxeno = 1;
126 cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
127
128 /* (3) Disable GMX and PCSX interrupts. */
129 gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(0, interface));
130 cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
131 gmx_tx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_TX_INT_EN(interface));
132 cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
133 pcsx_int_en_reg.u64 = cvmx_read_csr(CVMX_PCSXX_INT_EN_REG(interface));
134 cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
135
136 /* (4) Bring up the PCSX and GMX reconciliation layer. */
137 /* (4)a Set polarity and lane swapping. */
138 /* (4)b */
139 gmxXauiTxCtl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
140 /* Enable better IFG packing and improves performance */
141 gmxXauiTxCtl.s.dic_en = 1;
142 gmxXauiTxCtl.s.uni_en = 0;
143 cvmx_write_csr(CVMX_GMXX_TX_XAUI_CTL(interface), gmxXauiTxCtl.u64);
144
145 /* (4)c Aply reset sequence */
146 xauiCtl.u64 = cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
147 xauiCtl.s.lo_pwr = 0;
148 xauiCtl.s.reset = 1;
149 cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface), xauiCtl.u64);
150
151 /* Wait for PCS to come out of reset */
152 if (CVMX_WAIT_FOR_FIELD64
153 (CVMX_PCSXX_CONTROL1_REG(interface), union cvmx_pcsxx_control1_reg,
154 reset, ==, 0, 10000))
155 return -1;
156 /* Wait for PCS to be aligned */
157 if (CVMX_WAIT_FOR_FIELD64
158 (CVMX_PCSXX_10GBX_STATUS_REG(interface),
159 union cvmx_pcsxx_10gbx_status_reg, alignd, ==, 1, 10000))
160 return -1;
161 /* Wait for RX to be ready */
162 if (CVMX_WAIT_FOR_FIELD64
163 (CVMX_GMXX_RX_XAUI_CTL(interface), union cvmx_gmxx_rx_xaui_ctl,
164 status, ==, 0, 10000))
165 return -1;
166
167 /* (6) Configure GMX */
168 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
169 gmx_cfg.s.en = 0;
170 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
171
172 /* Wait for GMX RX to be idle */
173 if (CVMX_WAIT_FOR_FIELD64
174 (CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg,
175 rx_idle, ==, 1, 10000))
176 return -1;
177 /* Wait for GMX TX to be idle */
178 if (CVMX_WAIT_FOR_FIELD64
179 (CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg,
180 tx_idle, ==, 1, 10000))
181 return -1;
182
183 /* GMX configure */
184 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
185 gmx_cfg.s.speed = 1;
186 gmx_cfg.s.speed_msb = 0;
187 gmx_cfg.s.slottime = 1;
188 cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), 1);
189 cvmx_write_csr(CVMX_GMXX_TXX_SLOT(0, interface), 512);
190 cvmx_write_csr(CVMX_GMXX_TXX_BURST(0, interface), 8192);
191 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
192
193 /* (7) Clear out any error state */
194 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(0, interface),
195 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(0, interface)));
196 cvmx_write_csr(CVMX_GMXX_TX_INT_REG(interface),
197 cvmx_read_csr(CVMX_GMXX_TX_INT_REG(interface)));
198 cvmx_write_csr(CVMX_PCSXX_INT_REG(interface),
199 cvmx_read_csr(CVMX_PCSXX_INT_REG(interface)));
200
201 /* Wait for receive link */
202 if (CVMX_WAIT_FOR_FIELD64
203 (CVMX_PCSXX_STATUS1_REG(interface), union cvmx_pcsxx_status1_reg,
204 rcv_lnk, ==, 1, 10000))
205 return -1;
206 if (CVMX_WAIT_FOR_FIELD64
207 (CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg,
208 xmtflt, ==, 0, 10000))
209 return -1;
210 if (CVMX_WAIT_FOR_FIELD64
211 (CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg,
212 rcvflt, ==, 0, 10000))
213 return -1;
214
215 cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), gmx_rx_int_en.u64);
216 cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
217 cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), pcsx_int_en_reg.u64);
218
219 cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port(interface, 0));
220
221 /* (8) Enable packet reception */
222 xauiMiscCtl.s.gmxeno = 0;
223 cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
224
225 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
226 gmx_cfg.s.en = 1;
227 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
228
229 __cvmx_interrupt_pcsx_intx_en_reg_enable(0, interface);
230 __cvmx_interrupt_pcsx_intx_en_reg_enable(1, interface);
231 __cvmx_interrupt_pcsx_intx_en_reg_enable(2, interface);
232 __cvmx_interrupt_pcsx_intx_en_reg_enable(3, interface);
233 __cvmx_interrupt_pcsxx_int_en_reg_enable(interface);
234 __cvmx_interrupt_gmxx_enable(interface);
235
236 return 0;
237}
238
239/**
240 * Return the link state of an IPD/PKO port as returned by
241 * auto negotiation. The result of this function may not match
242 * Octeon's link config if auto negotiation has changed since
243 * the last call to cvmx_helper_link_set().
244 *
245 * @ipd_port: IPD/PKO port to query
246 *
247 * Returns Link state
248 */
249cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port)
250{
251 int interface = cvmx_helper_get_interface_num(ipd_port);
252 union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
253 union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
254 union cvmx_pcsxx_status1_reg pcsxx_status1_reg;
255 cvmx_helper_link_info_t result;
256
257 gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
258 gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
259 pcsxx_status1_reg.u64 =
260 cvmx_read_csr(CVMX_PCSXX_STATUS1_REG(interface));
261 result.u64 = 0;
262
263 /* Only return a link if both RX and TX are happy */
264 if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0) &&
265 (pcsxx_status1_reg.s.rcv_lnk == 1)) {
266 result.s.link_up = 1;
267 result.s.full_duplex = 1;
268 result.s.speed = 10000;
269 } else {
270 /* Disable GMX and PCSX interrupts. */
271 cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
272 cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
273 cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
274 }
275 return result;
276}
277
278/**
279 * Configure an IPD/PKO port for the specified link state. This
280 * function does not influence auto negotiation at the PHY level.
281 * The passed link state must always match the link state returned
282 * by cvmx_helper_link_get(). It is normally best to use
283 * cvmx_helper_link_autoconf() instead.
284 *
285 * @ipd_port: IPD/PKO port to configure
286 * @link_info: The new link state
287 *
288 * Returns Zero on success, negative on failure
289 */
290int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
291{
292 int interface = cvmx_helper_get_interface_num(ipd_port);
293 union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
294 union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
295
296 gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
297 gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
298
299 /* If the link shouldn't be up, then just return */
300 if (!link_info.s.link_up)
301 return 0;
302
303 /* Do nothing if both RX and TX are happy */
304 if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0))
305 return 0;
306
307 /* Bring the link up */
308 return __cvmx_helper_xaui_enable(interface);
309}
310
311/**
312 * Configure a port for internal and/or external loopback. Internal loopback
313 * causes packets sent by the port to be received by Octeon. External loopback
314 * causes packets received from the wire to sent out again.
315 *
316 * @ipd_port: IPD/PKO port to loopback.
317 * @enable_internal:
318 * Non zero if you want internal loopback
319 * @enable_external:
320 * Non zero if you want external loopback
321 *
322 * Returns Zero on success, negative on failure.
323 */
324extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
325 int enable_internal,
326 int enable_external)
327{
328 int interface = cvmx_helper_get_interface_num(ipd_port);
329 union cvmx_pcsxx_control1_reg pcsxx_control1_reg;
330 union cvmx_gmxx_xaui_ext_loopback gmxx_xaui_ext_loopback;
331
332 /* Set the internal loop */
333 pcsxx_control1_reg.u64 =
334 cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
335 pcsxx_control1_reg.s.loopbck1 = enable_internal;
336 cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface),
337 pcsxx_control1_reg.u64);
338
339 /* Set the external loop */
340 gmxx_xaui_ext_loopback.u64 =
341 cvmx_read_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface));
342 gmxx_xaui_ext_loopback.s.en = enable_external;
343 cvmx_write_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface),
344 gmxx_xaui_ext_loopback.u64);
345
346 /* Take the link through a reset */
347 return __cvmx_helper_xaui_enable(interface);
348}
diff --git a/drivers/staging/octeon/cvmx-helper-xaui.h b/drivers/staging/octeon/cvmx-helper-xaui.h
new file mode 100644
index 000000000000..4b4db2f93cd4
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper-xaui.h
@@ -0,0 +1,103 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 * @file
30 *
31 * Functions for XAUI initialization, configuration,
32 * and monitoring.
33 *
34 */
35#ifndef __CVMX_HELPER_XAUI_H__
36#define __CVMX_HELPER_XAUI_H__
37
38/**
39 * Probe a XAUI interface and determine the number of ports
40 * connected to it. The XAUI interface should still be down
41 * after this call.
42 *
43 * @interface: Interface to probe
44 *
45 * Returns Number of ports on the interface. Zero to disable.
46 */
47extern int __cvmx_helper_xaui_probe(int interface);
48
49/**
50 * Bringup and enable a XAUI interface. After this call packet
51 * I/O should be fully functional. This is called with IPD
52 * enabled but PKO disabled.
53 *
54 * @interface: Interface to bring up
55 *
56 * Returns Zero on success, negative on failure
57 */
58extern int __cvmx_helper_xaui_enable(int interface);
59
60/**
61 * Return the link state of an IPD/PKO port as returned by
62 * auto negotiation. The result of this function may not match
63 * Octeon's link config if auto negotiation has changed since
64 * the last call to cvmx_helper_link_set().
65 *
66 * @ipd_port: IPD/PKO port to query
67 *
68 * Returns Link state
69 */
70extern cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port);
71
72/**
73 * Configure an IPD/PKO port for the specified link state. This
74 * function does not influence auto negotiation at the PHY level.
75 * The passed link state must always match the link state returned
76 * by cvmx_helper_link_get(). It is normally best to use
77 * cvmx_helper_link_autoconf() instead.
78 *
79 * @ipd_port: IPD/PKO port to configure
80 * @link_info: The new link state
81 *
82 * Returns Zero on success, negative on failure
83 */
84extern int __cvmx_helper_xaui_link_set(int ipd_port,
85 cvmx_helper_link_info_t link_info);
86
87/**
88 * Configure a port for internal and/or external loopback. Internal loopback
89 * causes packets sent by the port to be received by Octeon. External loopback
90 * causes packets received from the wire to sent out again.
91 *
92 * @ipd_port: IPD/PKO port to loopback.
93 * @enable_internal:
94 * Non zero if you want internal loopback
95 * @enable_external:
96 * Non zero if you want external loopback
97 *
98 * Returns Zero on success, negative on failure.
99 */
100extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
101 int enable_internal,
102 int enable_external);
103#endif
diff --git a/drivers/staging/octeon/cvmx-helper.c b/drivers/staging/octeon/cvmx-helper.c
new file mode 100644
index 000000000000..591506643d02
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper.c
@@ -0,0 +1,1058 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 *
30 * Helper functions for common, but complicated tasks.
31 *
32 */
33#include <asm/octeon/octeon.h>
34
35#include "cvmx-config.h"
36
37#include "cvmx-fpa.h"
38#include "cvmx-pip.h"
39#include "cvmx-pko.h"
40#include "cvmx-ipd.h"
41#include "cvmx-spi.h"
42#include "cvmx-helper.h"
43#include "cvmx-helper-board.h"
44
45#include "cvmx-pip-defs.h"
46#include "cvmx-smix-defs.h"
47#include "cvmx-asxx-defs.h"
48
49/**
50 * cvmx_override_pko_queue_priority(int ipd_port, uint64_t
51 * priorities[16]) is a function pointer. It is meant to allow
52 * customization of the PKO queue priorities based on the port
53 * number. Users should set this pointer to a function before
54 * calling any cvmx-helper operations.
55 */
56void (*cvmx_override_pko_queue_priority) (int pko_port,
57 uint64_t priorities[16]);
58
59/**
60 * cvmx_override_ipd_port_setup(int ipd_port) is a function
61 * pointer. It is meant to allow customization of the IPD port
62 * setup before packet input/output comes online. It is called
63 * after cvmx-helper does the default IPD configuration, but
64 * before IPD is enabled. Users should set this pointer to a
65 * function before calling any cvmx-helper operations.
66 */
67void (*cvmx_override_ipd_port_setup) (int ipd_port);
68
69/* Port count per interface */
70static int interface_port_count[4] = { 0, 0, 0, 0 };
71
72/* Port last configured link info index by IPD/PKO port */
73static cvmx_helper_link_info_t
74 port_link_info[CVMX_PIP_NUM_INPUT_PORTS];
75
76/**
77 * Return the number of interfaces the chip has. Each interface
78 * may have multiple ports. Most chips support two interfaces,
79 * but the CNX0XX and CNX1XX are exceptions. These only support
80 * one interface.
81 *
82 * Returns Number of interfaces on chip
83 */
84int cvmx_helper_get_number_of_interfaces(void)
85{
86 if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
87 return 4;
88 else
89 return 3;
90}
91
92/**
93 * Return the number of ports on an interface. Depending on the
94 * chip and configuration, this can be 1-16. A value of 0
95 * specifies that the interface doesn't exist or isn't usable.
96 *
97 * @interface: Interface to get the port count for
98 *
99 * Returns Number of ports on interface. Can be Zero.
100 */
101int cvmx_helper_ports_on_interface(int interface)
102{
103 return interface_port_count[interface];
104}
105
106/**
107 * Get the operating mode of an interface. Depending on the Octeon
108 * chip and configuration, this function returns an enumeration
109 * of the type of packet I/O supported by an interface.
110 *
111 * @interface: Interface to probe
112 *
113 * Returns Mode of the interface. Unknown or unsupported interfaces return
114 * DISABLED.
115 */
116cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
117{
118 union cvmx_gmxx_inf_mode mode;
119 if (interface == 2)
120 return CVMX_HELPER_INTERFACE_MODE_NPI;
121
122 if (interface == 3) {
123 if (OCTEON_IS_MODEL(OCTEON_CN56XX)
124 || OCTEON_IS_MODEL(OCTEON_CN52XX))
125 return CVMX_HELPER_INTERFACE_MODE_LOOP;
126 else
127 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
128 }
129
130 if (interface == 0
131 && cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5
132 && cvmx_sysinfo_get()->board_rev_major == 1) {
133 /*
134 * Lie about interface type of CN3005 board. This
135 * board has a switch on port 1 like the other
136 * evaluation boards, but it is connected over RGMII
137 * instead of GMII. Report GMII mode so that the
138 * speed is forced to 1 Gbit full duplex. Other than
139 * some initial configuration (which does not use the
140 * output of this function) there is no difference in
141 * setup between GMII and RGMII modes.
142 */
143 return CVMX_HELPER_INTERFACE_MODE_GMII;
144 }
145
146 /* Interface 1 is always disabled on CN31XX and CN30XX */
147 if ((interface == 1)
148 && (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX)
149 || OCTEON_IS_MODEL(OCTEON_CN50XX)
150 || OCTEON_IS_MODEL(OCTEON_CN52XX)))
151 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
152
153 mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
154
155 if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
156 switch (mode.cn56xx.mode) {
157 case 0:
158 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
159 case 1:
160 return CVMX_HELPER_INTERFACE_MODE_XAUI;
161 case 2:
162 return CVMX_HELPER_INTERFACE_MODE_SGMII;
163 case 3:
164 return CVMX_HELPER_INTERFACE_MODE_PICMG;
165 default:
166 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
167 }
168 } else {
169 if (!mode.s.en)
170 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
171
172 if (mode.s.type) {
173 if (OCTEON_IS_MODEL(OCTEON_CN38XX)
174 || OCTEON_IS_MODEL(OCTEON_CN58XX))
175 return CVMX_HELPER_INTERFACE_MODE_SPI;
176 else
177 return CVMX_HELPER_INTERFACE_MODE_GMII;
178 } else
179 return CVMX_HELPER_INTERFACE_MODE_RGMII;
180 }
181}
182
183/**
184 * Configure the IPD/PIP tagging and QoS options for a specific
185 * port. This function determines the POW work queue entry
186 * contents for a port. The setup performed here is controlled by
187 * the defines in executive-config.h.
188 *
189 * @ipd_port: Port to configure. This follows the IPD numbering, not the
190 * per interface numbering
191 *
192 * Returns Zero on success, negative on failure
193 */
194static int __cvmx_helper_port_setup_ipd(int ipd_port)
195{
196 union cvmx_pip_prt_cfgx port_config;
197 union cvmx_pip_prt_tagx tag_config;
198
199 port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
200 tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(ipd_port));
201
202 /* Have each port go to a different POW queue */
203 port_config.s.qos = ipd_port & 0x7;
204
205 /* Process the headers and place the IP header in the work queue */
206 port_config.s.mode = CVMX_HELPER_INPUT_PORT_SKIP_MODE;
207
208 tag_config.s.ip6_src_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP;
209 tag_config.s.ip6_dst_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_IP;
210 tag_config.s.ip6_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT;
211 tag_config.s.ip6_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT;
212 tag_config.s.ip6_nxth_flag = CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER;
213 tag_config.s.ip4_src_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP;
214 tag_config.s.ip4_dst_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_IP;
215 tag_config.s.ip4_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT;
216 tag_config.s.ip4_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT;
217 tag_config.s.ip4_pctl_flag = CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL;
218 tag_config.s.inc_prt_flag = CVMX_HELPER_INPUT_TAG_INPUT_PORT;
219 tag_config.s.tcp6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
220 tag_config.s.tcp4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
221 tag_config.s.ip6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
222 tag_config.s.ip4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
223 tag_config.s.non_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
224 /* Put all packets in group 0. Other groups can be used by the app */
225 tag_config.s.grp = 0;
226
227 cvmx_pip_config_port(ipd_port, port_config, tag_config);
228
229 /* Give the user a chance to override our setting for each port */
230 if (cvmx_override_ipd_port_setup)
231 cvmx_override_ipd_port_setup(ipd_port);
232
233 return 0;
234}
235
236/**
237 * This function probes an interface to determine the actual
238 * number of hardware ports connected to it. It doesn't setup the
239 * ports or enable them. The main goal here is to set the global
240 * interface_port_count[interface] correctly. Hardware setup of the
241 * ports will be performed later.
242 *
243 * @interface: Interface to probe
244 *
245 * Returns Zero on success, negative on failure
246 */
247int cvmx_helper_interface_probe(int interface)
248{
249 /* At this stage in the game we don't want packets to be moving yet.
250 The following probe calls should perform hardware setup
251 needed to determine port counts. Receive must still be disabled */
252 switch (cvmx_helper_interface_get_mode(interface)) {
253 /* These types don't support ports to IPD/PKO */
254 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
255 case CVMX_HELPER_INTERFACE_MODE_PCIE:
256 interface_port_count[interface] = 0;
257 break;
258 /* XAUI is a single high speed port */
259 case CVMX_HELPER_INTERFACE_MODE_XAUI:
260 interface_port_count[interface] =
261 __cvmx_helper_xaui_probe(interface);
262 break;
263 /*
264 * RGMII/GMII/MII are all treated about the same. Most
265 * functions refer to these ports as RGMII.
266 */
267 case CVMX_HELPER_INTERFACE_MODE_RGMII:
268 case CVMX_HELPER_INTERFACE_MODE_GMII:
269 interface_port_count[interface] =
270 __cvmx_helper_rgmii_probe(interface);
271 break;
272 /*
273 * SPI4 can have 1-16 ports depending on the device at
274 * the other end.
275 */
276 case CVMX_HELPER_INTERFACE_MODE_SPI:
277 interface_port_count[interface] =
278 __cvmx_helper_spi_probe(interface);
279 break;
280 /*
281 * SGMII can have 1-4 ports depending on how many are
282 * hooked up.
283 */
284 case CVMX_HELPER_INTERFACE_MODE_SGMII:
285 case CVMX_HELPER_INTERFACE_MODE_PICMG:
286 interface_port_count[interface] =
287 __cvmx_helper_sgmii_probe(interface);
288 break;
289 /* PCI target Network Packet Interface */
290 case CVMX_HELPER_INTERFACE_MODE_NPI:
291 interface_port_count[interface] =
292 __cvmx_helper_npi_probe(interface);
293 break;
294 /*
295 * Special loopback only ports. These are not the same
296 * as other ports in loopback mode.
297 */
298 case CVMX_HELPER_INTERFACE_MODE_LOOP:
299 interface_port_count[interface] =
300 __cvmx_helper_loop_probe(interface);
301 break;
302 }
303
304 interface_port_count[interface] =
305 __cvmx_helper_board_interface_probe(interface,
306 interface_port_count
307 [interface]);
308
309 /* Make sure all global variables propagate to other cores */
310 CVMX_SYNCWS;
311
312 return 0;
313}
314
315/**
316 * Setup the IPD/PIP for the ports on an interface. Packet
317 * classification and tagging are set for every port on the
318 * interface. The number of ports on the interface must already
319 * have been probed.
320 *
321 * @interface: Interface to setup IPD/PIP for
322 *
323 * Returns Zero on success, negative on failure
324 */
325static int __cvmx_helper_interface_setup_ipd(int interface)
326{
327 int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
328 int num_ports = interface_port_count[interface];
329
330 while (num_ports--) {
331 __cvmx_helper_port_setup_ipd(ipd_port);
332 ipd_port++;
333 }
334 return 0;
335}
336
337/**
338 * Setup global setting for IPD/PIP not related to a specific
339 * interface or port. This must be called before IPD is enabled.
340 *
341 * Returns Zero on success, negative on failure.
342 */
343static int __cvmx_helper_global_setup_ipd(void)
344{
345 /* Setup the global packet input options */
346 cvmx_ipd_config(CVMX_FPA_PACKET_POOL_SIZE / 8,
347 CVMX_HELPER_FIRST_MBUFF_SKIP / 8,
348 CVMX_HELPER_NOT_FIRST_MBUFF_SKIP / 8,
349 /* The +8 is to account for the next ptr */
350 (CVMX_HELPER_FIRST_MBUFF_SKIP + 8) / 128,
351 /* The +8 is to account for the next ptr */
352 (CVMX_HELPER_NOT_FIRST_MBUFF_SKIP + 8) / 128,
353 CVMX_FPA_WQE_POOL,
354 CVMX_IPD_OPC_MODE_STT,
355 CVMX_HELPER_ENABLE_BACK_PRESSURE);
356 return 0;
357}
358
359/**
360 * Setup the PKO for the ports on an interface. The number of
361 * queues per port and the priority of each PKO output queue
362 * is set here. PKO must be disabled when this function is called.
363 *
364 * @interface: Interface to setup PKO for
365 *
366 * Returns Zero on success, negative on failure
367 */
368static int __cvmx_helper_interface_setup_pko(int interface)
369{
370 /*
371 * Each packet output queue has an associated priority. The
372 * higher the priority, the more often it can send a packet. A
373 * priority of 8 means it can send in all 8 rounds of
374 * contention. We're going to make each queue one less than
375 * the last. The vector of priorities has been extended to
376 * support CN5xxx CPUs, where up to 16 queues can be
377 * associated to a port. To keep backward compatibility we
378 * don't change the initial 8 priorities and replicate them in
379 * the second half. With per-core PKO queues (PKO lockless
380 * operation) all queues have the same priority.
381 */
382 uint64_t priorities[16] =
383 { 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1 };
384
385 /*
386 * Setup the IPD/PIP and PKO for the ports discovered
387 * above. Here packet classification, tagging and output
388 * priorities are set.
389 */
390 int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
391 int num_ports = interface_port_count[interface];
392 while (num_ports--) {
393 /*
394 * Give the user a chance to override the per queue
395 * priorities.
396 */
397 if (cvmx_override_pko_queue_priority)
398 cvmx_override_pko_queue_priority(ipd_port, priorities);
399
400 cvmx_pko_config_port(ipd_port,
401 cvmx_pko_get_base_queue_per_core(ipd_port,
402 0),
403 cvmx_pko_get_num_queues(ipd_port),
404 priorities);
405 ipd_port++;
406 }
407 return 0;
408}
409
410/**
411 * Setup global setting for PKO not related to a specific
412 * interface or port. This must be called before PKO is enabled.
413 *
414 * Returns Zero on success, negative on failure.
415 */
416static int __cvmx_helper_global_setup_pko(void)
417{
418 /*
419 * Disable tagwait FAU timeout. This needs to be done before
420 * anyone might start packet output using tags.
421 */
422 union cvmx_iob_fau_timeout fau_to;
423 fau_to.u64 = 0;
424 fau_to.s.tout_val = 0xfff;
425 fau_to.s.tout_enb = 0;
426 cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
427 return 0;
428}
429
430/**
431 * Setup global backpressure setting.
432 *
433 * Returns Zero on success, negative on failure
434 */
435static int __cvmx_helper_global_setup_backpressure(void)
436{
437#if CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE
438 /* Disable backpressure if configured to do so */
439 /* Disable backpressure (pause frame) generation */
440 int num_interfaces = cvmx_helper_get_number_of_interfaces();
441 int interface;
442 for (interface = 0; interface < num_interfaces; interface++) {
443 switch (cvmx_helper_interface_get_mode(interface)) {
444 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
445 case CVMX_HELPER_INTERFACE_MODE_PCIE:
446 case CVMX_HELPER_INTERFACE_MODE_NPI:
447 case CVMX_HELPER_INTERFACE_MODE_LOOP:
448 case CVMX_HELPER_INTERFACE_MODE_XAUI:
449 break;
450 case CVMX_HELPER_INTERFACE_MODE_RGMII:
451 case CVMX_HELPER_INTERFACE_MODE_GMII:
452 case CVMX_HELPER_INTERFACE_MODE_SPI:
453 case CVMX_HELPER_INTERFACE_MODE_SGMII:
454 case CVMX_HELPER_INTERFACE_MODE_PICMG:
455 cvmx_gmx_set_backpressure_override(interface, 0xf);
456 break;
457 }
458 }
459#endif
460
461 return 0;
462}
463
464/**
465 * Enable packet input/output from the hardware. This function is
466 * called after all internal setup is complete and IPD is enabled.
467 * After this function completes, packets will be accepted from the
468 * hardware ports. PKO should still be disabled to make sure packets
469 * aren't sent out partially setup hardware.
470 *
471 * @interface: Interface to enable
472 *
473 * Returns Zero on success, negative on failure
474 */
475static int __cvmx_helper_packet_hardware_enable(int interface)
476{
477 int result = 0;
478 switch (cvmx_helper_interface_get_mode(interface)) {
479 /* These types don't support ports to IPD/PKO */
480 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
481 case CVMX_HELPER_INTERFACE_MODE_PCIE:
482 /* Nothing to do */
483 break;
484 /* XAUI is a single high speed port */
485 case CVMX_HELPER_INTERFACE_MODE_XAUI:
486 result = __cvmx_helper_xaui_enable(interface);
487 break;
488 /*
489 * RGMII/GMII/MII are all treated about the same. Most
490 * functions refer to these ports as RGMII
491 */
492 case CVMX_HELPER_INTERFACE_MODE_RGMII:
493 case CVMX_HELPER_INTERFACE_MODE_GMII:
494 result = __cvmx_helper_rgmii_enable(interface);
495 break;
496 /*
497 * SPI4 can have 1-16 ports depending on the device at
498 * the other end
499 */
500 case CVMX_HELPER_INTERFACE_MODE_SPI:
501 result = __cvmx_helper_spi_enable(interface);
502 break;
503 /*
504 * SGMII can have 1-4 ports depending on how many are
505 * hooked up
506 */
507 case CVMX_HELPER_INTERFACE_MODE_SGMII:
508 case CVMX_HELPER_INTERFACE_MODE_PICMG:
509 result = __cvmx_helper_sgmii_enable(interface);
510 break;
511 /* PCI target Network Packet Interface */
512 case CVMX_HELPER_INTERFACE_MODE_NPI:
513 result = __cvmx_helper_npi_enable(interface);
514 break;
515 /*
516 * Special loopback only ports. These are not the same
517 * as other ports in loopback mode
518 */
519 case CVMX_HELPER_INTERFACE_MODE_LOOP:
520 result = __cvmx_helper_loop_enable(interface);
521 break;
522 }
523 result |= __cvmx_helper_board_hardware_enable(interface);
524 return result;
525}
526
527/**
528 * Function to adjust internal IPD pointer alignments
529 *
530 * Returns 0 on success
531 * !0 on failure
532 */
533int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
534{
535#define FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES \
536 (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_FIRST_MBUFF_SKIP)
537#define FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES \
538 (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_NOT_FIRST_MBUFF_SKIP)
539#define FIX_IPD_OUTPORT 0
540 /* Ports 0-15 are interface 0, 16-31 are interface 1 */
541#define INTERFACE(port) (port >> 4)
542#define INDEX(port) (port & 0xf)
543 uint64_t *p64;
544 cvmx_pko_command_word0_t pko_command;
545 union cvmx_buf_ptr g_buffer, pkt_buffer;
546 cvmx_wqe_t *work;
547 int size, num_segs = 0, wqe_pcnt, pkt_pcnt;
548 union cvmx_gmxx_prtx_cfg gmx_cfg;
549 int retry_cnt;
550 int retry_loop_cnt;
551 int mtu;
552 int i;
553 cvmx_helper_link_info_t link_info;
554
555 /* Save values for restore at end */
556 uint64_t prtx_cfg =
557 cvmx_read_csr(CVMX_GMXX_PRTX_CFG
558 (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
559 uint64_t tx_ptr_en =
560 cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
561 uint64_t rx_ptr_en =
562 cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
563 uint64_t rxx_jabber =
564 cvmx_read_csr(CVMX_GMXX_RXX_JABBER
565 (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
566 uint64_t frame_max =
567 cvmx_read_csr(CVMX_GMXX_RXX_FRM_MAX
568 (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
569
570 /* Configure port to gig FDX as required for loopback mode */
571 cvmx_helper_rgmii_internal_loopback(FIX_IPD_OUTPORT);
572
573 /*
574 * Disable reception on all ports so if traffic is present it
575 * will not interfere.
576 */
577 cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 0);
578
579 cvmx_wait(100000000ull);
580
581 for (retry_loop_cnt = 0; retry_loop_cnt < 10; retry_loop_cnt++) {
582 retry_cnt = 100000;
583 wqe_pcnt = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
584 pkt_pcnt = (wqe_pcnt >> 7) & 0x7f;
585 wqe_pcnt &= 0x7f;
586
587 num_segs = (2 + pkt_pcnt - wqe_pcnt) & 3;
588
589 if (num_segs == 0)
590 goto fix_ipd_exit;
591
592 num_segs += 1;
593
594 size =
595 FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES +
596 ((num_segs - 1) * FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES) -
597 (FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES / 2);
598
599 cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)),
600 1 << INDEX(FIX_IPD_OUTPORT));
601 CVMX_SYNC;
602
603 g_buffer.u64 = 0;
604 g_buffer.s.addr =
605 cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_WQE_POOL));
606 if (g_buffer.s.addr == 0) {
607 cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
608 "buffer allocation failure.\n");
609 goto fix_ipd_exit;
610 }
611
612 g_buffer.s.pool = CVMX_FPA_WQE_POOL;
613 g_buffer.s.size = num_segs;
614
615 pkt_buffer.u64 = 0;
616 pkt_buffer.s.addr =
617 cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL));
618 if (pkt_buffer.s.addr == 0) {
619 cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
620 "buffer allocation failure.\n");
621 goto fix_ipd_exit;
622 }
623 pkt_buffer.s.i = 1;
624 pkt_buffer.s.pool = CVMX_FPA_PACKET_POOL;
625 pkt_buffer.s.size = FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES;
626
627 p64 = (uint64_t *) cvmx_phys_to_ptr(pkt_buffer.s.addr);
628 p64[0] = 0xffffffffffff0000ull;
629 p64[1] = 0x08004510ull;
630 p64[2] = ((uint64_t) (size - 14) << 48) | 0x5ae740004000ull;
631 p64[3] = 0x3a5fc0a81073c0a8ull;
632
633 for (i = 0; i < num_segs; i++) {
634 if (i > 0)
635 pkt_buffer.s.size =
636 FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES;
637
638 if (i == (num_segs - 1))
639 pkt_buffer.s.i = 0;
640
641 *(uint64_t *) cvmx_phys_to_ptr(g_buffer.s.addr +
642 8 * i) = pkt_buffer.u64;
643 }
644
645 /* Build the PKO command */
646 pko_command.u64 = 0;
647 pko_command.s.segs = num_segs;
648 pko_command.s.total_bytes = size;
649 pko_command.s.dontfree = 0;
650 pko_command.s.gather = 1;
651
652 gmx_cfg.u64 =
653 cvmx_read_csr(CVMX_GMXX_PRTX_CFG
654 (INDEX(FIX_IPD_OUTPORT),
655 INTERFACE(FIX_IPD_OUTPORT)));
656 gmx_cfg.s.en = 1;
657 cvmx_write_csr(CVMX_GMXX_PRTX_CFG
658 (INDEX(FIX_IPD_OUTPORT),
659 INTERFACE(FIX_IPD_OUTPORT)), gmx_cfg.u64);
660 cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
661 1 << INDEX(FIX_IPD_OUTPORT));
662 cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
663 1 << INDEX(FIX_IPD_OUTPORT));
664
665 mtu =
666 cvmx_read_csr(CVMX_GMXX_RXX_JABBER
667 (INDEX(FIX_IPD_OUTPORT),
668 INTERFACE(FIX_IPD_OUTPORT)));
669 cvmx_write_csr(CVMX_GMXX_RXX_JABBER
670 (INDEX(FIX_IPD_OUTPORT),
671 INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
672 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
673 (INDEX(FIX_IPD_OUTPORT),
674 INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
675
676 cvmx_pko_send_packet_prepare(FIX_IPD_OUTPORT,
677 cvmx_pko_get_base_queue
678 (FIX_IPD_OUTPORT),
679 CVMX_PKO_LOCK_CMD_QUEUE);
680 cvmx_pko_send_packet_finish(FIX_IPD_OUTPORT,
681 cvmx_pko_get_base_queue
682 (FIX_IPD_OUTPORT), pko_command,
683 g_buffer, CVMX_PKO_LOCK_CMD_QUEUE);
684
685 CVMX_SYNC;
686
687 do {
688 work = cvmx_pow_work_request_sync(CVMX_POW_WAIT);
689 retry_cnt--;
690 } while ((work == NULL) && (retry_cnt > 0));
691
692 if (!retry_cnt)
693 cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
694 "get_work() timeout occured.\n");
695
696 /* Free packet */
697 if (work)
698 cvmx_helper_free_packet_data(work);
699 }
700
701fix_ipd_exit:
702
703 /* Return CSR configs to saved values */
704 cvmx_write_csr(CVMX_GMXX_PRTX_CFG
705 (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
706 prtx_cfg);
707 cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
708 tx_ptr_en);
709 cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
710 rx_ptr_en);
711 cvmx_write_csr(CVMX_GMXX_RXX_JABBER
712 (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
713 rxx_jabber);
714 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
715 (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
716 frame_max);
717 cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), 0);
718 /* Set link to down so autonegotiation will set it up again */
719 link_info.u64 = 0;
720 cvmx_helper_link_set(FIX_IPD_OUTPORT, link_info);
721
722 /*
723 * Bring the link back up as autonegotiation is not done in
724 * user applications.
725 */
726 cvmx_helper_link_autoconf(FIX_IPD_OUTPORT);
727
728 CVMX_SYNC;
729 if (num_segs)
730 cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT failed.\n");
731
732 return !!num_segs;
733
734}
735
736/**
737 * Called after all internal packet IO paths are setup. This
738 * function enables IPD/PIP and begins packet input and output.
739 *
740 * Returns Zero on success, negative on failure
741 */
742int cvmx_helper_ipd_and_packet_input_enable(void)
743{
744 int num_interfaces;
745 int interface;
746
747 /* Enable IPD */
748 cvmx_ipd_enable();
749
750 /*
751 * Time to enable hardware ports packet input and output. Note
752 * that at this point IPD/PIP must be fully functional and PKO
753 * must be disabled
754 */
755 num_interfaces = cvmx_helper_get_number_of_interfaces();
756 for (interface = 0; interface < num_interfaces; interface++) {
757 if (cvmx_helper_ports_on_interface(interface) > 0)
758 __cvmx_helper_packet_hardware_enable(interface);
759 }
760
761 /* Finally enable PKO now that the entire path is up and running */
762 cvmx_pko_enable();
763
764 if ((OCTEON_IS_MODEL(OCTEON_CN31XX_PASS1)
765 || OCTEON_IS_MODEL(OCTEON_CN30XX_PASS1))
766 && (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM))
767 __cvmx_helper_errata_fix_ipd_ptr_alignment();
768 return 0;
769}
770
771/**
772 * Initialize the PIP, IPD, and PKO hardware to support
773 * simple priority based queues for the ethernet ports. Each
774 * port is configured with a number of priority queues based
775 * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
776 * priority than the previous.
777 *
778 * Returns Zero on success, non-zero on failure
779 */
780int cvmx_helper_initialize_packet_io_global(void)
781{
782 int result = 0;
783 int interface;
784 union cvmx_l2c_cfg l2c_cfg;
785 union cvmx_smix_en smix_en;
786 const int num_interfaces = cvmx_helper_get_number_of_interfaces();
787
788 /*
789 * CN52XX pass 1: Due to a bug in 2nd order CDR, it needs to
790 * be disabled.
791 */
792 if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
793 __cvmx_helper_errata_qlm_disable_2nd_order_cdr(1);
794
795 /*
796 * Tell L2 to give the IOB statically higher priority compared
797 * to the cores. This avoids conditions where IO blocks might
798 * be starved under very high L2 loads.
799 */
800 l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
801 l2c_cfg.s.lrf_arb_mode = 0;
802 l2c_cfg.s.rfb_arb_mode = 0;
803 cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64);
804
805 /* Make sure SMI/MDIO is enabled so we can query PHYs */
806 smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(0));
807 if (!smix_en.s.en) {
808 smix_en.s.en = 1;
809 cvmx_write_csr(CVMX_SMIX_EN(0), smix_en.u64);
810 }
811
812 /* Newer chips actually have two SMI/MDIO interfaces */
813 if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
814 !OCTEON_IS_MODEL(OCTEON_CN58XX) &&
815 !OCTEON_IS_MODEL(OCTEON_CN50XX)) {
816 smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(1));
817 if (!smix_en.s.en) {
818 smix_en.s.en = 1;
819 cvmx_write_csr(CVMX_SMIX_EN(1), smix_en.u64);
820 }
821 }
822
823 cvmx_pko_initialize_global();
824 for (interface = 0; interface < num_interfaces; interface++) {
825 result |= cvmx_helper_interface_probe(interface);
826 if (cvmx_helper_ports_on_interface(interface) > 0)
827 cvmx_dprintf("Interface %d has %d ports (%s)\n",
828 interface,
829 cvmx_helper_ports_on_interface(interface),
830 cvmx_helper_interface_mode_to_string
831 (cvmx_helper_interface_get_mode
832 (interface)));
833 result |= __cvmx_helper_interface_setup_ipd(interface);
834 result |= __cvmx_helper_interface_setup_pko(interface);
835 }
836
837 result |= __cvmx_helper_global_setup_ipd();
838 result |= __cvmx_helper_global_setup_pko();
839
840 /* Enable any flow control and backpressure */
841 result |= __cvmx_helper_global_setup_backpressure();
842
843#if CVMX_HELPER_ENABLE_IPD
844 result |= cvmx_helper_ipd_and_packet_input_enable();
845#endif
846 return result;
847}
848
849/**
850 * Does core local initialization for packet io
851 *
852 * Returns Zero on success, non-zero on failure
853 */
854int cvmx_helper_initialize_packet_io_local(void)
855{
856 return cvmx_pko_initialize_local();
857}
858
859/**
860 * Auto configure an IPD/PKO port link state and speed. This
861 * function basically does the equivalent of:
862 * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
863 *
864 * @ipd_port: IPD/PKO port to auto configure
865 *
866 * Returns Link state after configure
867 */
868cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port)
869{
870 cvmx_helper_link_info_t link_info;
871 int interface = cvmx_helper_get_interface_num(ipd_port);
872 int index = cvmx_helper_get_interface_index_num(ipd_port);
873
874 if (index >= cvmx_helper_ports_on_interface(interface)) {
875 link_info.u64 = 0;
876 return link_info;
877 }
878
879 link_info = cvmx_helper_link_get(ipd_port);
880 if (link_info.u64 == port_link_info[ipd_port].u64)
881 return link_info;
882
883 /* If we fail to set the link speed, port_link_info will not change */
884 cvmx_helper_link_set(ipd_port, link_info);
885
886 /*
887 * port_link_info should be the current value, which will be
888 * different than expect if cvmx_helper_link_set() failed.
889 */
890 return port_link_info[ipd_port];
891}
892
893/**
894 * Return the link state of an IPD/PKO port as returned by
895 * auto negotiation. The result of this function may not match
896 * Octeon's link config if auto negotiation has changed since
897 * the last call to cvmx_helper_link_set().
898 *
899 * @ipd_port: IPD/PKO port to query
900 *
901 * Returns Link state
902 */
903cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
904{
905 cvmx_helper_link_info_t result;
906 int interface = cvmx_helper_get_interface_num(ipd_port);
907 int index = cvmx_helper_get_interface_index_num(ipd_port);
908
909 /* The default result will be a down link unless the code below
910 changes it */
911 result.u64 = 0;
912
913 if (index >= cvmx_helper_ports_on_interface(interface))
914 return result;
915
916 switch (cvmx_helper_interface_get_mode(interface)) {
917 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
918 case CVMX_HELPER_INTERFACE_MODE_PCIE:
919 /* Network links are not supported */
920 break;
921 case CVMX_HELPER_INTERFACE_MODE_XAUI:
922 result = __cvmx_helper_xaui_link_get(ipd_port);
923 break;
924 case CVMX_HELPER_INTERFACE_MODE_GMII:
925 if (index == 0)
926 result = __cvmx_helper_rgmii_link_get(ipd_port);
927 else {
928 result.s.full_duplex = 1;
929 result.s.link_up = 1;
930 result.s.speed = 1000;
931 }
932 break;
933 case CVMX_HELPER_INTERFACE_MODE_RGMII:
934 result = __cvmx_helper_rgmii_link_get(ipd_port);
935 break;
936 case CVMX_HELPER_INTERFACE_MODE_SPI:
937 result = __cvmx_helper_spi_link_get(ipd_port);
938 break;
939 case CVMX_HELPER_INTERFACE_MODE_SGMII:
940 case CVMX_HELPER_INTERFACE_MODE_PICMG:
941 result = __cvmx_helper_sgmii_link_get(ipd_port);
942 break;
943 case CVMX_HELPER_INTERFACE_MODE_NPI:
944 case CVMX_HELPER_INTERFACE_MODE_LOOP:
945 /* Network links are not supported */
946 break;
947 }
948 return result;
949}
950
951/**
952 * Configure an IPD/PKO port for the specified link state. This
953 * function does not influence auto negotiation at the PHY level.
954 * The passed link state must always match the link state returned
955 * by cvmx_helper_link_get(). It is normally best to use
956 * cvmx_helper_link_autoconf() instead.
957 *
958 * @ipd_port: IPD/PKO port to configure
959 * @link_info: The new link state
960 *
961 * Returns Zero on success, negative on failure
962 */
963int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
964{
965 int result = -1;
966 int interface = cvmx_helper_get_interface_num(ipd_port);
967 int index = cvmx_helper_get_interface_index_num(ipd_port);
968
969 if (index >= cvmx_helper_ports_on_interface(interface))
970 return -1;
971
972 switch (cvmx_helper_interface_get_mode(interface)) {
973 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
974 case CVMX_HELPER_INTERFACE_MODE_PCIE:
975 break;
976 case CVMX_HELPER_INTERFACE_MODE_XAUI:
977 result = __cvmx_helper_xaui_link_set(ipd_port, link_info);
978 break;
979 /*
980 * RGMII/GMII/MII are all treated about the same. Most
981 * functions refer to these ports as RGMII.
982 */
983 case CVMX_HELPER_INTERFACE_MODE_RGMII:
984 case CVMX_HELPER_INTERFACE_MODE_GMII:
985 result = __cvmx_helper_rgmii_link_set(ipd_port, link_info);
986 break;
987 case CVMX_HELPER_INTERFACE_MODE_SPI:
988 result = __cvmx_helper_spi_link_set(ipd_port, link_info);
989 break;
990 case CVMX_HELPER_INTERFACE_MODE_SGMII:
991 case CVMX_HELPER_INTERFACE_MODE_PICMG:
992 result = __cvmx_helper_sgmii_link_set(ipd_port, link_info);
993 break;
994 case CVMX_HELPER_INTERFACE_MODE_NPI:
995 case CVMX_HELPER_INTERFACE_MODE_LOOP:
996 break;
997 }
998 /* Set the port_link_info here so that the link status is updated
999 no matter how cvmx_helper_link_set is called. We don't change
1000 the value if link_set failed */
1001 if (result == 0)
1002 port_link_info[ipd_port].u64 = link_info.u64;
1003 return result;
1004}
1005
1006/**
1007 * Configure a port for internal and/or external loopback. Internal loopback
1008 * causes packets sent by the port to be received by Octeon. External loopback
1009 * causes packets received from the wire to sent out again.
1010 *
1011 * @ipd_port: IPD/PKO port to loopback.
1012 * @enable_internal:
1013 * Non zero if you want internal loopback
1014 * @enable_external:
1015 * Non zero if you want external loopback
1016 *
1017 * Returns Zero on success, negative on failure.
1018 */
1019int cvmx_helper_configure_loopback(int ipd_port, int enable_internal,
1020 int enable_external)
1021{
1022 int result = -1;
1023 int interface = cvmx_helper_get_interface_num(ipd_port);
1024 int index = cvmx_helper_get_interface_index_num(ipd_port);
1025
1026 if (index >= cvmx_helper_ports_on_interface(interface))
1027 return -1;
1028
1029 switch (cvmx_helper_interface_get_mode(interface)) {
1030 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
1031 case CVMX_HELPER_INTERFACE_MODE_PCIE:
1032 case CVMX_HELPER_INTERFACE_MODE_SPI:
1033 case CVMX_HELPER_INTERFACE_MODE_NPI:
1034 case CVMX_HELPER_INTERFACE_MODE_LOOP:
1035 break;
1036 case CVMX_HELPER_INTERFACE_MODE_XAUI:
1037 result =
1038 __cvmx_helper_xaui_configure_loopback(ipd_port,
1039 enable_internal,
1040 enable_external);
1041 break;
1042 case CVMX_HELPER_INTERFACE_MODE_RGMII:
1043 case CVMX_HELPER_INTERFACE_MODE_GMII:
1044 result =
1045 __cvmx_helper_rgmii_configure_loopback(ipd_port,
1046 enable_internal,
1047 enable_external);
1048 break;
1049 case CVMX_HELPER_INTERFACE_MODE_SGMII:
1050 case CVMX_HELPER_INTERFACE_MODE_PICMG:
1051 result =
1052 __cvmx_helper_sgmii_configure_loopback(ipd_port,
1053 enable_internal,
1054 enable_external);
1055 break;
1056 }
1057 return result;
1058}
diff --git a/drivers/staging/octeon/cvmx-helper.h b/drivers/staging/octeon/cvmx-helper.h
new file mode 100644
index 000000000000..51916f3cc40c
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-helper.h
@@ -0,0 +1,227 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 *
30 * Helper functions for common, but complicated tasks.
31 *
32 */
33
34#ifndef __CVMX_HELPER_H__
35#define __CVMX_HELPER_H__
36
37#include "cvmx-config.h"
38#include "cvmx-fpa.h"
39#include "cvmx-wqe.h"
40
41typedef enum {
42 CVMX_HELPER_INTERFACE_MODE_DISABLED,
43 CVMX_HELPER_INTERFACE_MODE_RGMII,
44 CVMX_HELPER_INTERFACE_MODE_GMII,
45 CVMX_HELPER_INTERFACE_MODE_SPI,
46 CVMX_HELPER_INTERFACE_MODE_PCIE,
47 CVMX_HELPER_INTERFACE_MODE_XAUI,
48 CVMX_HELPER_INTERFACE_MODE_SGMII,
49 CVMX_HELPER_INTERFACE_MODE_PICMG,
50 CVMX_HELPER_INTERFACE_MODE_NPI,
51 CVMX_HELPER_INTERFACE_MODE_LOOP,
52} cvmx_helper_interface_mode_t;
53
54typedef union {
55 uint64_t u64;
56 struct {
57 uint64_t reserved_20_63:44;
58 uint64_t link_up:1; /**< Is the physical link up? */
59 uint64_t full_duplex:1; /**< 1 if the link is full duplex */
60 uint64_t speed:18; /**< Speed of the link in Mbps */
61 } s;
62} cvmx_helper_link_info_t;
63
64#include "cvmx-helper-fpa.h"
65
66#include <asm/octeon/cvmx-helper-errata.h>
67#include "cvmx-helper-loop.h"
68#include "cvmx-helper-npi.h"
69#include "cvmx-helper-rgmii.h"
70#include "cvmx-helper-sgmii.h"
71#include "cvmx-helper-spi.h"
72#include "cvmx-helper-util.h"
73#include "cvmx-helper-xaui.h"
74
75/**
76 * cvmx_override_pko_queue_priority(int ipd_port, uint64_t
77 * priorities[16]) is a function pointer. It is meant to allow
78 * customization of the PKO queue priorities based on the port
79 * number. Users should set this pointer to a function before
80 * calling any cvmx-helper operations.
81 */
82extern void (*cvmx_override_pko_queue_priority) (int pko_port,
83 uint64_t priorities[16]);
84
85/**
86 * cvmx_override_ipd_port_setup(int ipd_port) is a function
87 * pointer. It is meant to allow customization of the IPD port
88 * setup before packet input/output comes online. It is called
89 * after cvmx-helper does the default IPD configuration, but
90 * before IPD is enabled. Users should set this pointer to a
91 * function before calling any cvmx-helper operations.
92 */
93extern void (*cvmx_override_ipd_port_setup) (int ipd_port);
94
95/**
96 * This function enables the IPD and also enables the packet interfaces.
97 * The packet interfaces (RGMII and SPI) must be enabled after the
98 * IPD. This should be called by the user program after any additional
99 * IPD configuration changes are made if CVMX_HELPER_ENABLE_IPD
100 * is not set in the executive-config.h file.
101 *
102 * Returns 0 on success
103 * -1 on failure
104 */
105extern int cvmx_helper_ipd_and_packet_input_enable(void);
106
107/**
108 * Initialize the PIP, IPD, and PKO hardware to support
109 * simple priority based queues for the ethernet ports. Each
110 * port is configured with a number of priority queues based
111 * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
112 * priority than the previous.
113 *
114 * Returns Zero on success, non-zero on failure
115 */
116extern int cvmx_helper_initialize_packet_io_global(void);
117
118/**
119 * Does core local initialization for packet io
120 *
121 * Returns Zero on success, non-zero on failure
122 */
123extern int cvmx_helper_initialize_packet_io_local(void);
124
125/**
126 * Returns the number of ports on the given interface.
127 * The interface must be initialized before the port count
128 * can be returned.
129 *
130 * @interface: Which interface to return port count for.
131 *
132 * Returns Port count for interface
133 * -1 for uninitialized interface
134 */
135extern int cvmx_helper_ports_on_interface(int interface);
136
137/**
138 * Return the number of interfaces the chip has. Each interface
139 * may have multiple ports. Most chips support two interfaces,
140 * but the CNX0XX and CNX1XX are exceptions. These only support
141 * one interface.
142 *
143 * Returns Number of interfaces on chip
144 */
145extern int cvmx_helper_get_number_of_interfaces(void);
146
147/**
148 * Get the operating mode of an interface. Depending on the Octeon
149 * chip and configuration, this function returns an enumeration
150 * of the type of packet I/O supported by an interface.
151 *
152 * @interface: Interface to probe
153 *
154 * Returns Mode of the interface. Unknown or unsupported interfaces return
155 * DISABLED.
156 */
157extern cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
158 interface);
159
160/**
161 * Auto configure an IPD/PKO port link state and speed. This
162 * function basically does the equivalent of:
163 * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
164 *
165 * @ipd_port: IPD/PKO port to auto configure
166 *
167 * Returns Link state after configure
168 */
169extern cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port);
170
171/**
172 * Return the link state of an IPD/PKO port as returned by
173 * auto negotiation. The result of this function may not match
174 * Octeon's link config if auto negotiation has changed since
175 * the last call to cvmx_helper_link_set().
176 *
177 * @ipd_port: IPD/PKO port to query
178 *
179 * Returns Link state
180 */
181extern cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port);
182
183/**
184 * Configure an IPD/PKO port for the specified link state. This
185 * function does not influence auto negotiation at the PHY level.
186 * The passed link state must always match the link state returned
187 * by cvmx_helper_link_get(). It is normally best to use
188 * cvmx_helper_link_autoconf() instead.
189 *
190 * @ipd_port: IPD/PKO port to configure
191 * @link_info: The new link state
192 *
193 * Returns Zero on success, negative on failure
194 */
195extern int cvmx_helper_link_set(int ipd_port,
196 cvmx_helper_link_info_t link_info);
197
198/**
199 * This function probes an interface to determine the actual
200 * number of hardware ports connected to it. It doesn't setup the
201 * ports or enable them. The main goal here is to set the global
202 * interface_port_count[interface] correctly. Hardware setup of the
203 * ports will be performed later.
204 *
205 * @interface: Interface to probe
206 *
207 * Returns Zero on success, negative on failure
208 */
209extern int cvmx_helper_interface_probe(int interface);
210
211/**
212 * Configure a port for internal and/or external loopback. Internal loopback
213 * causes packets sent by the port to be received by Octeon. External loopback
214 * causes packets received from the wire to sent out again.
215 *
216 * @ipd_port: IPD/PKO port to loopback.
217 * @enable_internal:
218 * Non zero if you want internal loopback
219 * @enable_external:
220 * Non zero if you want external loopback
221 *
222 * Returns Zero on success, negative on failure.
223 */
224extern int cvmx_helper_configure_loopback(int ipd_port, int enable_internal,
225 int enable_external);
226
227#endif /* __CVMX_HELPER_H__ */
diff --git a/drivers/staging/octeon/cvmx-interrupt-decodes.c b/drivers/staging/octeon/cvmx-interrupt-decodes.c
new file mode 100644
index 000000000000..a3337e382ee9
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-interrupt-decodes.c
@@ -0,0 +1,371 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2009 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 *
30 * Automatically generated functions useful for enabling
31 * and decoding RSL_INT_BLOCKS interrupts.
32 *
33 */
34
35#include <asm/octeon/octeon.h>
36
37#include "cvmx-gmxx-defs.h"
38#include "cvmx-pcsx-defs.h"
39#include "cvmx-pcsxx-defs.h"
40#include "cvmx-spxx-defs.h"
41#include "cvmx-stxx-defs.h"
42
43#ifndef PRINT_ERROR
44#define PRINT_ERROR(format, ...)
45#endif
46
47
48/**
49 * __cvmx_interrupt_gmxx_rxx_int_en_enable enables all interrupt bits in cvmx_gmxx_rxx_int_en_t
50 */
51void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
52{
53 union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
54 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, block),
55 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, block)));
56 gmx_rx_int_en.u64 = 0;
57 if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
58 /* Skipping gmx_rx_int_en.s.reserved_29_63 */
59 gmx_rx_int_en.s.hg2cc = 1;
60 gmx_rx_int_en.s.hg2fld = 1;
61 gmx_rx_int_en.s.undat = 1;
62 gmx_rx_int_en.s.uneop = 1;
63 gmx_rx_int_en.s.unsop = 1;
64 gmx_rx_int_en.s.bad_term = 1;
65 gmx_rx_int_en.s.bad_seq = 1;
66 gmx_rx_int_en.s.rem_fault = 1;
67 gmx_rx_int_en.s.loc_fault = 1;
68 gmx_rx_int_en.s.pause_drp = 1;
69 /* Skipping gmx_rx_int_en.s.reserved_16_18 */
70 /*gmx_rx_int_en.s.ifgerr = 1; */
71 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
72 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
73 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
74 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
75 gmx_rx_int_en.s.ovrerr = 1;
76 /* Skipping gmx_rx_int_en.s.reserved_9_9 */
77 gmx_rx_int_en.s.skperr = 1;
78 gmx_rx_int_en.s.rcverr = 1;
79 /* Skipping gmx_rx_int_en.s.reserved_5_6 */
80 /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
81 gmx_rx_int_en.s.jabber = 1;
82 /* Skipping gmx_rx_int_en.s.reserved_2_2 */
83 gmx_rx_int_en.s.carext = 1;
84 /* Skipping gmx_rx_int_en.s.reserved_0_0 */
85 }
86 if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
87 /* Skipping gmx_rx_int_en.s.reserved_19_63 */
88 /*gmx_rx_int_en.s.phy_dupx = 1; */
89 /*gmx_rx_int_en.s.phy_spd = 1; */
90 /*gmx_rx_int_en.s.phy_link = 1; */
91 /*gmx_rx_int_en.s.ifgerr = 1; */
92 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
93 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
94 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
95 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
96 gmx_rx_int_en.s.ovrerr = 1;
97 gmx_rx_int_en.s.niberr = 1;
98 gmx_rx_int_en.s.skperr = 1;
99 gmx_rx_int_en.s.rcverr = 1;
100 /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
101 gmx_rx_int_en.s.alnerr = 1;
102 /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
103 gmx_rx_int_en.s.jabber = 1;
104 gmx_rx_int_en.s.maxerr = 1;
105 gmx_rx_int_en.s.carext = 1;
106 gmx_rx_int_en.s.minerr = 1;
107 }
108 if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
109 /* Skipping gmx_rx_int_en.s.reserved_20_63 */
110 gmx_rx_int_en.s.pause_drp = 1;
111 /*gmx_rx_int_en.s.phy_dupx = 1; */
112 /*gmx_rx_int_en.s.phy_spd = 1; */
113 /*gmx_rx_int_en.s.phy_link = 1; */
114 /*gmx_rx_int_en.s.ifgerr = 1; */
115 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
116 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
117 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
118 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
119 gmx_rx_int_en.s.ovrerr = 1;
120 gmx_rx_int_en.s.niberr = 1;
121 gmx_rx_int_en.s.skperr = 1;
122 gmx_rx_int_en.s.rcverr = 1;
123 /* Skipping gmx_rx_int_en.s.reserved_6_6 */
124 gmx_rx_int_en.s.alnerr = 1;
125 /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
126 gmx_rx_int_en.s.jabber = 1;
127 /* Skipping gmx_rx_int_en.s.reserved_2_2 */
128 gmx_rx_int_en.s.carext = 1;
129 /* Skipping gmx_rx_int_en.s.reserved_0_0 */
130 }
131 if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
132 /* Skipping gmx_rx_int_en.s.reserved_19_63 */
133 /*gmx_rx_int_en.s.phy_dupx = 1; */
134 /*gmx_rx_int_en.s.phy_spd = 1; */
135 /*gmx_rx_int_en.s.phy_link = 1; */
136 /*gmx_rx_int_en.s.ifgerr = 1; */
137 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
138 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
139 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
140 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
141 gmx_rx_int_en.s.ovrerr = 1;
142 gmx_rx_int_en.s.niberr = 1;
143 gmx_rx_int_en.s.skperr = 1;
144 gmx_rx_int_en.s.rcverr = 1;
145 /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
146 gmx_rx_int_en.s.alnerr = 1;
147 /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
148 gmx_rx_int_en.s.jabber = 1;
149 gmx_rx_int_en.s.maxerr = 1;
150 gmx_rx_int_en.s.carext = 1;
151 gmx_rx_int_en.s.minerr = 1;
152 }
153 if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
154 /* Skipping gmx_rx_int_en.s.reserved_19_63 */
155 /*gmx_rx_int_en.s.phy_dupx = 1; */
156 /*gmx_rx_int_en.s.phy_spd = 1; */
157 /*gmx_rx_int_en.s.phy_link = 1; */
158 /*gmx_rx_int_en.s.ifgerr = 1; */
159 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
160 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
161 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
162 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
163 gmx_rx_int_en.s.ovrerr = 1;
164 gmx_rx_int_en.s.niberr = 1;
165 gmx_rx_int_en.s.skperr = 1;
166 gmx_rx_int_en.s.rcverr = 1;
167 /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
168 gmx_rx_int_en.s.alnerr = 1;
169 /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
170 gmx_rx_int_en.s.jabber = 1;
171 gmx_rx_int_en.s.maxerr = 1;
172 gmx_rx_int_en.s.carext = 1;
173 gmx_rx_int_en.s.minerr = 1;
174 }
175 if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
176 /* Skipping gmx_rx_int_en.s.reserved_20_63 */
177 gmx_rx_int_en.s.pause_drp = 1;
178 /*gmx_rx_int_en.s.phy_dupx = 1; */
179 /*gmx_rx_int_en.s.phy_spd = 1; */
180 /*gmx_rx_int_en.s.phy_link = 1; */
181 /*gmx_rx_int_en.s.ifgerr = 1; */
182 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
183 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
184 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
185 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
186 gmx_rx_int_en.s.ovrerr = 1;
187 gmx_rx_int_en.s.niberr = 1;
188 gmx_rx_int_en.s.skperr = 1;
189 gmx_rx_int_en.s.rcverr = 1;
190 /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
191 gmx_rx_int_en.s.alnerr = 1;
192 /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
193 gmx_rx_int_en.s.jabber = 1;
194 gmx_rx_int_en.s.maxerr = 1;
195 gmx_rx_int_en.s.carext = 1;
196 gmx_rx_int_en.s.minerr = 1;
197 }
198 if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
199 /* Skipping gmx_rx_int_en.s.reserved_29_63 */
200 gmx_rx_int_en.s.hg2cc = 1;
201 gmx_rx_int_en.s.hg2fld = 1;
202 gmx_rx_int_en.s.undat = 1;
203 gmx_rx_int_en.s.uneop = 1;
204 gmx_rx_int_en.s.unsop = 1;
205 gmx_rx_int_en.s.bad_term = 1;
206 gmx_rx_int_en.s.bad_seq = 0;
207 gmx_rx_int_en.s.rem_fault = 1;
208 gmx_rx_int_en.s.loc_fault = 0;
209 gmx_rx_int_en.s.pause_drp = 1;
210 /* Skipping gmx_rx_int_en.s.reserved_16_18 */
211 /*gmx_rx_int_en.s.ifgerr = 1; */
212 /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
213 /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
214 /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
215 /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
216 gmx_rx_int_en.s.ovrerr = 1;
217 /* Skipping gmx_rx_int_en.s.reserved_9_9 */
218 gmx_rx_int_en.s.skperr = 1;
219 gmx_rx_int_en.s.rcverr = 1;
220 /* Skipping gmx_rx_int_en.s.reserved_5_6 */
221 /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
222 gmx_rx_int_en.s.jabber = 1;
223 /* Skipping gmx_rx_int_en.s.reserved_2_2 */
224 gmx_rx_int_en.s.carext = 1;
225 /* Skipping gmx_rx_int_en.s.reserved_0_0 */
226 }
227 cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, block), gmx_rx_int_en.u64);
228}
229/**
230 * __cvmx_interrupt_pcsx_intx_en_reg_enable enables all interrupt bits in cvmx_pcsx_intx_en_reg_t
231 */
232void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block)
233{
234 union cvmx_pcsx_intx_en_reg pcs_int_en_reg;
235 cvmx_write_csr(CVMX_PCSX_INTX_REG(index, block),
236 cvmx_read_csr(CVMX_PCSX_INTX_REG(index, block)));
237 pcs_int_en_reg.u64 = 0;
238 if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
239 /* Skipping pcs_int_en_reg.s.reserved_12_63 */
240 /*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
241 pcs_int_en_reg.s.sync_bad_en = 1;
242 pcs_int_en_reg.s.an_bad_en = 1;
243 pcs_int_en_reg.s.rxlock_en = 1;
244 pcs_int_en_reg.s.rxbad_en = 1;
245 /*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
246 pcs_int_en_reg.s.txbad_en = 1;
247 pcs_int_en_reg.s.txfifo_en = 1;
248 pcs_int_en_reg.s.txfifu_en = 1;
249 pcs_int_en_reg.s.an_err_en = 1;
250 /*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
251 /*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
252 }
253 if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
254 /* Skipping pcs_int_en_reg.s.reserved_12_63 */
255 /*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
256 pcs_int_en_reg.s.sync_bad_en = 1;
257 pcs_int_en_reg.s.an_bad_en = 1;
258 pcs_int_en_reg.s.rxlock_en = 1;
259 pcs_int_en_reg.s.rxbad_en = 1;
260 /*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
261 pcs_int_en_reg.s.txbad_en = 1;
262 pcs_int_en_reg.s.txfifo_en = 1;
263 pcs_int_en_reg.s.txfifu_en = 1;
264 pcs_int_en_reg.s.an_err_en = 1;
265 /*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
266 /*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
267 }
268 cvmx_write_csr(CVMX_PCSX_INTX_EN_REG(index, block), pcs_int_en_reg.u64);
269}
270/**
271 * __cvmx_interrupt_pcsxx_int_en_reg_enable enables all interrupt bits in cvmx_pcsxx_int_en_reg_t
272 */
273void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index)
274{
275 union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
276 cvmx_write_csr(CVMX_PCSXX_INT_REG(index),
277 cvmx_read_csr(CVMX_PCSXX_INT_REG(index)));
278 pcsx_int_en_reg.u64 = 0;
279 if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
280 /* Skipping pcsx_int_en_reg.s.reserved_6_63 */
281 pcsx_int_en_reg.s.algnlos_en = 1;
282 pcsx_int_en_reg.s.synlos_en = 1;
283 pcsx_int_en_reg.s.bitlckls_en = 1;
284 pcsx_int_en_reg.s.rxsynbad_en = 1;
285 pcsx_int_en_reg.s.rxbad_en = 1;
286 pcsx_int_en_reg.s.txflt_en = 1;
287 }
288 if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
289 /* Skipping pcsx_int_en_reg.s.reserved_6_63 */
290 pcsx_int_en_reg.s.algnlos_en = 1;
291 pcsx_int_en_reg.s.synlos_en = 1;
292 pcsx_int_en_reg.s.bitlckls_en = 0; /* Happens if XAUI module is not installed */
293 pcsx_int_en_reg.s.rxsynbad_en = 1;
294 pcsx_int_en_reg.s.rxbad_en = 1;
295 pcsx_int_en_reg.s.txflt_en = 1;
296 }
297 cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(index), pcsx_int_en_reg.u64);
298}
299
300/**
301 * __cvmx_interrupt_spxx_int_msk_enable enables all interrupt bits in cvmx_spxx_int_msk_t
302 */
303void __cvmx_interrupt_spxx_int_msk_enable(int index)
304{
305 union cvmx_spxx_int_msk spx_int_msk;
306 cvmx_write_csr(CVMX_SPXX_INT_REG(index),
307 cvmx_read_csr(CVMX_SPXX_INT_REG(index)));
308 spx_int_msk.u64 = 0;
309 if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
310 /* Skipping spx_int_msk.s.reserved_12_63 */
311 spx_int_msk.s.calerr = 1;
312 spx_int_msk.s.syncerr = 1;
313 spx_int_msk.s.diperr = 1;
314 spx_int_msk.s.tpaovr = 1;
315 spx_int_msk.s.rsverr = 1;
316 spx_int_msk.s.drwnng = 1;
317 spx_int_msk.s.clserr = 1;
318 spx_int_msk.s.spiovr = 1;
319 /* Skipping spx_int_msk.s.reserved_2_3 */
320 spx_int_msk.s.abnorm = 1;
321 spx_int_msk.s.prtnxa = 1;
322 }
323 if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
324 /* Skipping spx_int_msk.s.reserved_12_63 */
325 spx_int_msk.s.calerr = 1;
326 spx_int_msk.s.syncerr = 1;
327 spx_int_msk.s.diperr = 1;
328 spx_int_msk.s.tpaovr = 1;
329 spx_int_msk.s.rsverr = 1;
330 spx_int_msk.s.drwnng = 1;
331 spx_int_msk.s.clserr = 1;
332 spx_int_msk.s.spiovr = 1;
333 /* Skipping spx_int_msk.s.reserved_2_3 */
334 spx_int_msk.s.abnorm = 1;
335 spx_int_msk.s.prtnxa = 1;
336 }
337 cvmx_write_csr(CVMX_SPXX_INT_MSK(index), spx_int_msk.u64);
338}
339/**
340 * __cvmx_interrupt_stxx_int_msk_enable enables all interrupt bits in cvmx_stxx_int_msk_t
341 */
342void __cvmx_interrupt_stxx_int_msk_enable(int index)
343{
344 union cvmx_stxx_int_msk stx_int_msk;
345 cvmx_write_csr(CVMX_STXX_INT_REG(index),
346 cvmx_read_csr(CVMX_STXX_INT_REG(index)));
347 stx_int_msk.u64 = 0;
348 if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
349 /* Skipping stx_int_msk.s.reserved_8_63 */
350 stx_int_msk.s.frmerr = 1;
351 stx_int_msk.s.unxfrm = 1;
352 stx_int_msk.s.nosync = 1;
353 stx_int_msk.s.diperr = 1;
354 stx_int_msk.s.datovr = 1;
355 stx_int_msk.s.ovrbst = 1;
356 stx_int_msk.s.calpar1 = 1;
357 stx_int_msk.s.calpar0 = 1;
358 }
359 if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
360 /* Skipping stx_int_msk.s.reserved_8_63 */
361 stx_int_msk.s.frmerr = 1;
362 stx_int_msk.s.unxfrm = 1;
363 stx_int_msk.s.nosync = 1;
364 stx_int_msk.s.diperr = 1;
365 stx_int_msk.s.datovr = 1;
366 stx_int_msk.s.ovrbst = 1;
367 stx_int_msk.s.calpar1 = 1;
368 stx_int_msk.s.calpar0 = 1;
369 }
370 cvmx_write_csr(CVMX_STXX_INT_MSK(index), stx_int_msk.u64);
371}
diff --git a/drivers/staging/octeon/cvmx-interrupt-rsl.c b/drivers/staging/octeon/cvmx-interrupt-rsl.c
new file mode 100644
index 000000000000..df50048cfbc0
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-interrupt-rsl.c
@@ -0,0 +1,140 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Utility functions to decode Octeon's RSL_INT_BLOCKS
30 * interrupts into error messages.
31 */
32
33#include <asm/octeon/octeon.h>
34
35#include "cvmx-asxx-defs.h"
36#include "cvmx-gmxx-defs.h"
37
38#ifndef PRINT_ERROR
39#define PRINT_ERROR(format, ...)
40#endif
41
42void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block);
43
44/**
45 * Enable ASX error interrupts that exist on CN3XXX, CN50XX, and
46 * CN58XX.
47 *
48 * @block: Interface to enable 0-1
49 */
50void __cvmx_interrupt_asxx_enable(int block)
51{
52 int mask;
53 union cvmx_asxx_int_en csr;
54 /*
55 * CN38XX and CN58XX have two interfaces with 4 ports per
56 * interface. All other chips have a max of 3 ports on
57 * interface 0
58 */
59 if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
60 mask = 0xf; /* Set enables for 4 ports */
61 else
62 mask = 0x7; /* Set enables for 3 ports */
63
64 /* Enable interface interrupts */
65 csr.u64 = cvmx_read_csr(CVMX_ASXX_INT_EN(block));
66 csr.s.txpsh = mask;
67 csr.s.txpop = mask;
68 csr.s.ovrflw = mask;
69 cvmx_write_csr(CVMX_ASXX_INT_EN(block), csr.u64);
70}
71/**
72 * Enable GMX error reporting for the supplied interface
73 *
74 * @interface: Interface to enable
75 */
76void __cvmx_interrupt_gmxx_enable(int interface)
77{
78 union cvmx_gmxx_inf_mode mode;
79 union cvmx_gmxx_tx_int_en gmx_tx_int_en;
80 int num_ports;
81 int index;
82
83 mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
84
85 if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
86 if (mode.s.en) {
87 switch (mode.cn56xx.mode) {
88 case 1: /* XAUI */
89 num_ports = 1;
90 break;
91 case 2: /* SGMII */
92 case 3: /* PICMG */
93 num_ports = 4;
94 break;
95 default: /* Disabled */
96 num_ports = 0;
97 break;
98 }
99 } else
100 num_ports = 0;
101 } else {
102 if (mode.s.en) {
103 if (OCTEON_IS_MODEL(OCTEON_CN38XX)
104 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
105 /*
106 * SPI on CN38XX and CN58XX report all
107 * errors through port 0. RGMII needs
108 * to check all 4 ports
109 */
110 if (mode.s.type)
111 num_ports = 1;
112 else
113 num_ports = 4;
114 } else {
115 /*
116 * CN30XX, CN31XX, and CN50XX have two
117 * or three ports. GMII and MII has 2,
118 * RGMII has three
119 */
120 if (mode.s.type)
121 num_ports = 2;
122 else
123 num_ports = 3;
124 }
125 } else
126 num_ports = 0;
127 }
128
129 gmx_tx_int_en.u64 = 0;
130 if (num_ports) {
131 if (OCTEON_IS_MODEL(OCTEON_CN38XX)
132 || OCTEON_IS_MODEL(OCTEON_CN58XX))
133 gmx_tx_int_en.s.ncb_nxa = 1;
134 gmx_tx_int_en.s.pko_nxa = 1;
135 }
136 gmx_tx_int_en.s.undflw = (1 << num_ports) - 1;
137 cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
138 for (index = 0; index < num_ports; index++)
139 __cvmx_interrupt_gmxx_rxx_int_en_enable(index, interface);
140}
diff --git a/drivers/staging/octeon/cvmx-ipd.h b/drivers/staging/octeon/cvmx-ipd.h
new file mode 100644
index 000000000000..115a552c5c7f
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-ipd.h
@@ -0,0 +1,338 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 *
30 * Interface to the hardware Input Packet Data unit.
31 */
32
33#ifndef __CVMX_IPD_H__
34#define __CVMX_IPD_H__
35
36#include <asm/octeon/octeon-feature.h>
37
38#include <asm/octeon/cvmx-ipd-defs.h>
39
40enum cvmx_ipd_mode {
41 CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
42 CVMX_IPD_OPC_MODE_STF = 1LL, /* All bloccks into L2 */
43 CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
44 CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */
45};
46
47#ifndef CVMX_ENABLE_LEN_M8_FIX
48#define CVMX_ENABLE_LEN_M8_FIX 0
49#endif
50
51/* CSR typedefs have been moved to cvmx-csr-*.h */
52typedef union cvmx_ipd_1st_mbuff_skip cvmx_ipd_mbuff_first_skip_t;
53typedef union cvmx_ipd_1st_next_ptr_back cvmx_ipd_first_next_ptr_back_t;
54
55typedef cvmx_ipd_mbuff_first_skip_t cvmx_ipd_mbuff_not_first_skip_t;
56typedef cvmx_ipd_first_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;
57
58/**
59 * Configure IPD
60 *
61 * @mbuff_size: Packets buffer size in 8 byte words
62 * @first_mbuff_skip:
63 * Number of 8 byte words to skip in the first buffer
64 * @not_first_mbuff_skip:
65 * Number of 8 byte words to skip in each following buffer
66 * @first_back: Must be same as first_mbuff_skip / 128
67 * @second_back:
68 * Must be same as not_first_mbuff_skip / 128
69 * @wqe_fpa_pool:
70 * FPA pool to get work entries from
71 * @cache_mode:
72 * @back_pres_enable_flag:
73 * Enable or disable port back pressure
74 */
75static inline void cvmx_ipd_config(uint64_t mbuff_size,
76 uint64_t first_mbuff_skip,
77 uint64_t not_first_mbuff_skip,
78 uint64_t first_back,
79 uint64_t second_back,
80 uint64_t wqe_fpa_pool,
81 enum cvmx_ipd_mode cache_mode,
82 uint64_t back_pres_enable_flag)
83{
84 cvmx_ipd_mbuff_first_skip_t first_skip;
85 cvmx_ipd_mbuff_not_first_skip_t not_first_skip;
86 union cvmx_ipd_packet_mbuff_size size;
87 cvmx_ipd_first_next_ptr_back_t first_back_struct;
88 cvmx_ipd_second_next_ptr_back_t second_back_struct;
89 union cvmx_ipd_wqe_fpa_queue wqe_pool;
90 union cvmx_ipd_ctl_status ipd_ctl_reg;
91
92 first_skip.u64 = 0;
93 first_skip.s.skip_sz = first_mbuff_skip;
94 cvmx_write_csr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64);
95
96 not_first_skip.u64 = 0;
97 not_first_skip.s.skip_sz = not_first_mbuff_skip;
98 cvmx_write_csr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64);
99
100 size.u64 = 0;
101 size.s.mb_size = mbuff_size;
102 cvmx_write_csr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64);
103
104 first_back_struct.u64 = 0;
105 first_back_struct.s.back = first_back;
106 cvmx_write_csr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64);
107
108 second_back_struct.u64 = 0;
109 second_back_struct.s.back = second_back;
110 cvmx_write_csr(CVMX_IPD_2nd_NEXT_PTR_BACK, second_back_struct.u64);
111
112 wqe_pool.u64 = 0;
113 wqe_pool.s.wqe_pool = wqe_fpa_pool;
114 cvmx_write_csr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64);
115
116 ipd_ctl_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
117 ipd_ctl_reg.s.opc_mode = cache_mode;
118 ipd_ctl_reg.s.pbp_en = back_pres_enable_flag;
119 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64);
120
121 /* Note: the example RED code that used to be here has been moved to
122 cvmx_helper_setup_red */
123}
124
125/**
126 * Enable IPD
127 */
128static inline void cvmx_ipd_enable(void)
129{
130 union cvmx_ipd_ctl_status ipd_reg;
131 ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
132 if (ipd_reg.s.ipd_en) {
133 cvmx_dprintf
134 ("Warning: Enabling IPD when IPD already enabled.\n");
135 }
136 ipd_reg.s.ipd_en = 1;
137#if CVMX_ENABLE_LEN_M8_FIX
138 if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
139 ipd_reg.s.len_m8 = TRUE;
140#endif
141 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
142}
143
144/**
145 * Disable IPD
146 */
147static inline void cvmx_ipd_disable(void)
148{
149 union cvmx_ipd_ctl_status ipd_reg;
150 ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
151 ipd_reg.s.ipd_en = 0;
152 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
153}
154
155/**
156 * Supportive function for cvmx_fpa_shutdown_pool.
157 */
158static inline void cvmx_ipd_free_ptr(void)
159{
160 /* Only CN38XXp{1,2} cannot read pointer out of the IPD */
161 if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1)
162 && !OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
163 int no_wptr = 0;
164 union cvmx_ipd_ptr_count ipd_ptr_count;
165 ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
166
167 /* Handle Work Queue Entry in cn56xx and cn52xx */
168 if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
169 union cvmx_ipd_ctl_status ipd_ctl_status;
170 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
171 if (ipd_ctl_status.s.no_wptr)
172 no_wptr = 1;
173 }
174
175 /* Free the prefetched WQE */
176 if (ipd_ptr_count.s.wqev_cnt) {
177 union cvmx_ipd_wqe_ptr_valid ipd_wqe_ptr_valid;
178 ipd_wqe_ptr_valid.u64 =
179 cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID);
180 if (no_wptr)
181 cvmx_fpa_free(cvmx_phys_to_ptr
182 ((uint64_t) ipd_wqe_ptr_valid.s.
183 ptr << 7), CVMX_FPA_PACKET_POOL,
184 0);
185 else
186 cvmx_fpa_free(cvmx_phys_to_ptr
187 ((uint64_t) ipd_wqe_ptr_valid.s.
188 ptr << 7), CVMX_FPA_WQE_POOL, 0);
189 }
190
191 /* Free all WQE in the fifo */
192 if (ipd_ptr_count.s.wqe_pcnt) {
193 int i;
194 union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
195 ipd_pwp_ptr_fifo_ctl.u64 =
196 cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
197 for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) {
198 ipd_pwp_ptr_fifo_ctl.s.cena = 0;
199 ipd_pwp_ptr_fifo_ctl.s.raddr =
200 ipd_pwp_ptr_fifo_ctl.s.max_cnts +
201 (ipd_pwp_ptr_fifo_ctl.s.wraddr +
202 i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
203 cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
204 ipd_pwp_ptr_fifo_ctl.u64);
205 ipd_pwp_ptr_fifo_ctl.u64 =
206 cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
207 if (no_wptr)
208 cvmx_fpa_free(cvmx_phys_to_ptr
209 ((uint64_t)
210 ipd_pwp_ptr_fifo_ctl.s.
211 ptr << 7),
212 CVMX_FPA_PACKET_POOL, 0);
213 else
214 cvmx_fpa_free(cvmx_phys_to_ptr
215 ((uint64_t)
216 ipd_pwp_ptr_fifo_ctl.s.
217 ptr << 7),
218 CVMX_FPA_WQE_POOL, 0);
219 }
220 ipd_pwp_ptr_fifo_ctl.s.cena = 1;
221 cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
222 ipd_pwp_ptr_fifo_ctl.u64);
223 }
224
225 /* Free the prefetched packet */
226 if (ipd_ptr_count.s.pktv_cnt) {
227 union cvmx_ipd_pkt_ptr_valid ipd_pkt_ptr_valid;
228 ipd_pkt_ptr_valid.u64 =
229 cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID);
230 cvmx_fpa_free(cvmx_phys_to_ptr
231 (ipd_pkt_ptr_valid.s.ptr << 7),
232 CVMX_FPA_PACKET_POOL, 0);
233 }
234
235 /* Free the per port prefetched packets */
236 if (1) {
237 int i;
238 union cvmx_ipd_prc_port_ptr_fifo_ctl
239 ipd_prc_port_ptr_fifo_ctl;
240 ipd_prc_port_ptr_fifo_ctl.u64 =
241 cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
242
243 for (i = 0; i < ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
244 i++) {
245 ipd_prc_port_ptr_fifo_ctl.s.cena = 0;
246 ipd_prc_port_ptr_fifo_ctl.s.raddr =
247 i % ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
248 cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
249 ipd_prc_port_ptr_fifo_ctl.u64);
250 ipd_prc_port_ptr_fifo_ctl.u64 =
251 cvmx_read_csr
252 (CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
253 cvmx_fpa_free(cvmx_phys_to_ptr
254 ((uint64_t)
255 ipd_prc_port_ptr_fifo_ctl.s.
256 ptr << 7), CVMX_FPA_PACKET_POOL,
257 0);
258 }
259 ipd_prc_port_ptr_fifo_ctl.s.cena = 1;
260 cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
261 ipd_prc_port_ptr_fifo_ctl.u64);
262 }
263
264 /* Free all packets in the holding fifo */
265 if (ipd_ptr_count.s.pfif_cnt) {
266 int i;
267 union cvmx_ipd_prc_hold_ptr_fifo_ctl
268 ipd_prc_hold_ptr_fifo_ctl;
269
270 ipd_prc_hold_ptr_fifo_ctl.u64 =
271 cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
272
273 for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) {
274 ipd_prc_hold_ptr_fifo_ctl.s.cena = 0;
275 ipd_prc_hold_ptr_fifo_ctl.s.raddr =
276 (ipd_prc_hold_ptr_fifo_ctl.s.praddr +
277 i) % ipd_prc_hold_ptr_fifo_ctl.s.max_pkt;
278 cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
279 ipd_prc_hold_ptr_fifo_ctl.u64);
280 ipd_prc_hold_ptr_fifo_ctl.u64 =
281 cvmx_read_csr
282 (CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
283 cvmx_fpa_free(cvmx_phys_to_ptr
284 ((uint64_t)
285 ipd_prc_hold_ptr_fifo_ctl.s.
286 ptr << 7), CVMX_FPA_PACKET_POOL,
287 0);
288 }
289 ipd_prc_hold_ptr_fifo_ctl.s.cena = 1;
290 cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
291 ipd_prc_hold_ptr_fifo_ctl.u64);
292 }
293
294 /* Free all packets in the fifo */
295 if (ipd_ptr_count.s.pkt_pcnt) {
296 int i;
297 union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
298 ipd_pwp_ptr_fifo_ctl.u64 =
299 cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
300
301 for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) {
302 ipd_pwp_ptr_fifo_ctl.s.cena = 0;
303 ipd_pwp_ptr_fifo_ctl.s.raddr =
304 (ipd_pwp_ptr_fifo_ctl.s.praddr +
305 i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
306 cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
307 ipd_pwp_ptr_fifo_ctl.u64);
308 ipd_pwp_ptr_fifo_ctl.u64 =
309 cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
310 cvmx_fpa_free(cvmx_phys_to_ptr
311 ((uint64_t) ipd_pwp_ptr_fifo_ctl.
312 s.ptr << 7),
313 CVMX_FPA_PACKET_POOL, 0);
314 }
315 ipd_pwp_ptr_fifo_ctl.s.cena = 1;
316 cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
317 ipd_pwp_ptr_fifo_ctl.u64);
318 }
319
320 /* Reset the IPD to get all buffers out of it */
321 {
322 union cvmx_ipd_ctl_status ipd_ctl_status;
323 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
324 ipd_ctl_status.s.reset = 1;
325 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
326 }
327
328 /* Reset the PIP */
329 {
330 union cvmx_pip_sft_rst pip_sft_rst;
331 pip_sft_rst.u64 = cvmx_read_csr(CVMX_PIP_SFT_RST);
332 pip_sft_rst.s.rst = 1;
333 cvmx_write_csr(CVMX_PIP_SFT_RST, pip_sft_rst.u64);
334 }
335 }
336}
337
338#endif /* __CVMX_IPD_H__ */
diff --git a/drivers/staging/octeon/cvmx-mdio.h b/drivers/staging/octeon/cvmx-mdio.h
new file mode 100644
index 000000000000..c987a75a20cf
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-mdio.h
@@ -0,0 +1,506 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 *
30 * Interface to the SMI/MDIO hardware, including support for both IEEE 802.3
31 * clause 22 and clause 45 operations.
32 *
33 */
34
35#ifndef __CVMX_MIO_H__
36#define __CVMX_MIO_H__
37
38#include "cvmx-smix-defs.h"
39
40/**
41 * PHY register 0 from the 802.3 spec
42 */
43#define CVMX_MDIO_PHY_REG_CONTROL 0
44typedef union {
45 uint16_t u16;
46 struct {
47 uint16_t reset:1;
48 uint16_t loopback:1;
49 uint16_t speed_lsb:1;
50 uint16_t autoneg_enable:1;
51 uint16_t power_down:1;
52 uint16_t isolate:1;
53 uint16_t restart_autoneg:1;
54 uint16_t duplex:1;
55 uint16_t collision_test:1;
56 uint16_t speed_msb:1;
57 uint16_t unidirectional_enable:1;
58 uint16_t reserved_0_4:5;
59 } s;
60} cvmx_mdio_phy_reg_control_t;
61
62/**
63 * PHY register 1 from the 802.3 spec
64 */
65#define CVMX_MDIO_PHY_REG_STATUS 1
66typedef union {
67 uint16_t u16;
68 struct {
69 uint16_t capable_100base_t4:1;
70 uint16_t capable_100base_x_full:1;
71 uint16_t capable_100base_x_half:1;
72 uint16_t capable_10_full:1;
73 uint16_t capable_10_half:1;
74 uint16_t capable_100base_t2_full:1;
75 uint16_t capable_100base_t2_half:1;
76 uint16_t capable_extended_status:1;
77 uint16_t capable_unidirectional:1;
78 uint16_t capable_mf_preamble_suppression:1;
79 uint16_t autoneg_complete:1;
80 uint16_t remote_fault:1;
81 uint16_t capable_autoneg:1;
82 uint16_t link_status:1;
83 uint16_t jabber_detect:1;
84 uint16_t capable_extended_registers:1;
85
86 } s;
87} cvmx_mdio_phy_reg_status_t;
88
89/**
90 * PHY register 2 from the 802.3 spec
91 */
92#define CVMX_MDIO_PHY_REG_ID1 2
93typedef union {
94 uint16_t u16;
95 struct {
96 uint16_t oui_bits_3_18;
97 } s;
98} cvmx_mdio_phy_reg_id1_t;
99
100/**
101 * PHY register 3 from the 802.3 spec
102 */
103#define CVMX_MDIO_PHY_REG_ID2 3
104typedef union {
105 uint16_t u16;
106 struct {
107 uint16_t oui_bits_19_24:6;
108 uint16_t model:6;
109 uint16_t revision:4;
110 } s;
111} cvmx_mdio_phy_reg_id2_t;
112
113/**
114 * PHY register 4 from the 802.3 spec
115 */
116#define CVMX_MDIO_PHY_REG_AUTONEG_ADVER 4
117typedef union {
118 uint16_t u16;
119 struct {
120 uint16_t next_page:1;
121 uint16_t reserved_14:1;
122 uint16_t remote_fault:1;
123 uint16_t reserved_12:1;
124 uint16_t asymmetric_pause:1;
125 uint16_t pause:1;
126 uint16_t advert_100base_t4:1;
127 uint16_t advert_100base_tx_full:1;
128 uint16_t advert_100base_tx_half:1;
129 uint16_t advert_10base_tx_full:1;
130 uint16_t advert_10base_tx_half:1;
131 uint16_t selector:5;
132 } s;
133} cvmx_mdio_phy_reg_autoneg_adver_t;
134
135/**
136 * PHY register 5 from the 802.3 spec
137 */
138#define CVMX_MDIO_PHY_REG_LINK_PARTNER_ABILITY 5
139typedef union {
140 uint16_t u16;
141 struct {
142 uint16_t next_page:1;
143 uint16_t ack:1;
144 uint16_t remote_fault:1;
145 uint16_t reserved_12:1;
146 uint16_t asymmetric_pause:1;
147 uint16_t pause:1;
148 uint16_t advert_100base_t4:1;
149 uint16_t advert_100base_tx_full:1;
150 uint16_t advert_100base_tx_half:1;
151 uint16_t advert_10base_tx_full:1;
152 uint16_t advert_10base_tx_half:1;
153 uint16_t selector:5;
154 } s;
155} cvmx_mdio_phy_reg_link_partner_ability_t;
156
157/**
158 * PHY register 6 from the 802.3 spec
159 */
160#define CVMX_MDIO_PHY_REG_AUTONEG_EXPANSION 6
161typedef union {
162 uint16_t u16;
163 struct {
164 uint16_t reserved_5_15:11;
165 uint16_t parallel_detection_fault:1;
166 uint16_t link_partner_next_page_capable:1;
167 uint16_t local_next_page_capable:1;
168 uint16_t page_received:1;
169 uint16_t link_partner_autoneg_capable:1;
170
171 } s;
172} cvmx_mdio_phy_reg_autoneg_expansion_t;
173
174/**
175 * PHY register 9 from the 802.3 spec
176 */
177#define CVMX_MDIO_PHY_REG_CONTROL_1000 9
178typedef union {
179 uint16_t u16;
180 struct {
181 uint16_t test_mode:3;
182 uint16_t manual_master_slave:1;
183 uint16_t master:1;
184 uint16_t port_type:1;
185 uint16_t advert_1000base_t_full:1;
186 uint16_t advert_1000base_t_half:1;
187 uint16_t reserved_0_7:8;
188 } s;
189} cvmx_mdio_phy_reg_control_1000_t;
190
191/**
192 * PHY register 10 from the 802.3 spec
193 */
194#define CVMX_MDIO_PHY_REG_STATUS_1000 10
195typedef union {
196 uint16_t u16;
197 struct {
198 uint16_t master_slave_fault:1;
199 uint16_t is_master:1;
200 uint16_t local_receiver_ok:1;
201 uint16_t remote_receiver_ok:1;
202 uint16_t remote_capable_1000base_t_full:1;
203 uint16_t remote_capable_1000base_t_half:1;
204 uint16_t reserved_8_9:2;
205 uint16_t idle_error_count:8;
206 } s;
207} cvmx_mdio_phy_reg_status_1000_t;
208
209/**
210 * PHY register 15 from the 802.3 spec
211 */
212#define CVMX_MDIO_PHY_REG_EXTENDED_STATUS 15
213typedef union {
214 uint16_t u16;
215 struct {
216 uint16_t capable_1000base_x_full:1;
217 uint16_t capable_1000base_x_half:1;
218 uint16_t capable_1000base_t_full:1;
219 uint16_t capable_1000base_t_half:1;
220 uint16_t reserved_0_11:12;
221 } s;
222} cvmx_mdio_phy_reg_extended_status_t;
223
224/**
225 * PHY register 13 from the 802.3 spec
226 */
227#define CVMX_MDIO_PHY_REG_MMD_CONTROL 13
228typedef union {
229 uint16_t u16;
230 struct {
231 uint16_t function:2;
232 uint16_t reserved_5_13:9;
233 uint16_t devad:5;
234 } s;
235} cvmx_mdio_phy_reg_mmd_control_t;
236
237/**
238 * PHY register 14 from the 802.3 spec
239 */
240#define CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA 14
241typedef union {
242 uint16_t u16;
243 struct {
244 uint16_t address_data:16;
245 } s;
246} cvmx_mdio_phy_reg_mmd_address_data_t;
247
248/* Operating request encodings. */
249#define MDIO_CLAUSE_22_WRITE 0
250#define MDIO_CLAUSE_22_READ 1
251
252#define MDIO_CLAUSE_45_ADDRESS 0
253#define MDIO_CLAUSE_45_WRITE 1
254#define MDIO_CLAUSE_45_READ_INC 2
255#define MDIO_CLAUSE_45_READ 3
256
257/* MMD identifiers, mostly for accessing devices withing XENPAK modules. */
258#define CVMX_MMD_DEVICE_PMA_PMD 1
259#define CVMX_MMD_DEVICE_WIS 2
260#define CVMX_MMD_DEVICE_PCS 3
261#define CVMX_MMD_DEVICE_PHY_XS 4
262#define CVMX_MMD_DEVICE_DTS_XS 5
263#define CVMX_MMD_DEVICE_TC 6
264#define CVMX_MMD_DEVICE_CL22_EXT 29
265#define CVMX_MMD_DEVICE_VENDOR_1 30
266#define CVMX_MMD_DEVICE_VENDOR_2 31
267
268/* Helper function to put MDIO interface into clause 45 mode */
269static inline void __cvmx_mdio_set_clause45_mode(int bus_id)
270{
271 union cvmx_smix_clk smi_clk;
272 /* Put bus into clause 45 mode */
273 smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
274 smi_clk.s.mode = 1;
275 smi_clk.s.preamble = 1;
276 cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
277}
278
279/* Helper function to put MDIO interface into clause 22 mode */
280static inline void __cvmx_mdio_set_clause22_mode(int bus_id)
281{
282 union cvmx_smix_clk smi_clk;
283 /* Put bus into clause 22 mode */
284 smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
285 smi_clk.s.mode = 0;
286 cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
287}
288
289/**
290 * Perform an MII read. This function is used to read PHY
291 * registers controlling auto negotiation.
292 *
293 * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
294 * support multiple busses.
295 * @phy_id: The MII phy id
296 * @location: Register location to read
297 *
298 * Returns Result from the read or -1 on failure
299 */
300static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
301{
302 union cvmx_smix_cmd smi_cmd;
303 union cvmx_smix_rd_dat smi_rd;
304 int timeout = 1000;
305
306 if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
307 __cvmx_mdio_set_clause22_mode(bus_id);
308
309 smi_cmd.u64 = 0;
310 smi_cmd.s.phy_op = MDIO_CLAUSE_22_READ;
311 smi_cmd.s.phy_adr = phy_id;
312 smi_cmd.s.reg_adr = location;
313 cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
314
315 do {
316 cvmx_wait(1000);
317 smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
318 } while (smi_rd.s.pending && timeout--);
319
320 if (smi_rd.s.val)
321 return smi_rd.s.dat;
322 else
323 return -1;
324}
325
326/**
327 * Perform an MII write. This function is used to write PHY
328 * registers controlling auto negotiation.
329 *
330 * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
331 * support multiple busses.
332 * @phy_id: The MII phy id
333 * @location: Register location to write
334 * @val: Value to write
335 *
336 * Returns -1 on error
337 * 0 on success
338 */
339static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
340{
341 union cvmx_smix_cmd smi_cmd;
342 union cvmx_smix_wr_dat smi_wr;
343 int timeout = 1000;
344
345 if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
346 __cvmx_mdio_set_clause22_mode(bus_id);
347
348 smi_wr.u64 = 0;
349 smi_wr.s.dat = val;
350 cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
351
352 smi_cmd.u64 = 0;
353 smi_cmd.s.phy_op = MDIO_CLAUSE_22_WRITE;
354 smi_cmd.s.phy_adr = phy_id;
355 smi_cmd.s.reg_adr = location;
356 cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
357
358 do {
359 cvmx_wait(1000);
360 smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
361 } while (smi_wr.s.pending && --timeout);
362 if (timeout <= 0)
363 return -1;
364
365 return 0;
366}
367
368/**
369 * Perform an IEEE 802.3 clause 45 MII read. This function is used to
370 * read PHY registers controlling auto negotiation.
371 *
372 * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
373 * support multiple busses.
374 * @phy_id: The MII phy id
375 * @device: MDIO Managable Device (MMD) id
376 * @location: Register location to read
377 *
378 * Returns Result from the read or -1 on failure
379 */
380
381static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
382 int location)
383{
384 union cvmx_smix_cmd smi_cmd;
385 union cvmx_smix_rd_dat smi_rd;
386 union cvmx_smix_wr_dat smi_wr;
387 int timeout = 1000;
388
389 if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
390 return -1;
391
392 __cvmx_mdio_set_clause45_mode(bus_id);
393
394 smi_wr.u64 = 0;
395 smi_wr.s.dat = location;
396 cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
397
398 smi_cmd.u64 = 0;
399 smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
400 smi_cmd.s.phy_adr = phy_id;
401 smi_cmd.s.reg_adr = device;
402 cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
403
404 do {
405 cvmx_wait(1000);
406 smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
407 } while (smi_wr.s.pending && --timeout);
408 if (timeout <= 0) {
409 cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
410 "device %2d register %2d TIME OUT(address)\n",
411 bus_id, phy_id, device, location);
412 return -1;
413 }
414
415 smi_cmd.u64 = 0;
416 smi_cmd.s.phy_op = MDIO_CLAUSE_45_READ;
417 smi_cmd.s.phy_adr = phy_id;
418 smi_cmd.s.reg_adr = device;
419 cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
420
421 do {
422 cvmx_wait(1000);
423 smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
424 } while (smi_rd.s.pending && timeout--);
425
426 if (timeout <= 0) {
427 cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
428 "device %2d register %2d TIME OUT(data)\n",
429 bus_id, phy_id, device, location);
430 return -1;
431 }
432
433 if (smi_rd.s.val)
434 return smi_rd.s.dat;
435 else {
436 cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
437 "device %2d register %2d INVALID READ\n",
438 bus_id, phy_id, device, location);
439 return -1;
440 }
441}
442
443/**
444 * Perform an IEEE 802.3 clause 45 MII write. This function is used to
445 * write PHY registers controlling auto negotiation.
446 *
447 * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
448 * support multiple busses.
449 * @phy_id: The MII phy id
450 * @device: MDIO Managable Device (MMD) id
451 * @location: Register location to write
452 * @val: Value to write
453 *
454 * Returns -1 on error
455 * 0 on success
456 */
457static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device,
458 int location, int val)
459{
460 union cvmx_smix_cmd smi_cmd;
461 union cvmx_smix_wr_dat smi_wr;
462 int timeout = 1000;
463
464 if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
465 return -1;
466
467 __cvmx_mdio_set_clause45_mode(bus_id);
468
469 smi_wr.u64 = 0;
470 smi_wr.s.dat = location;
471 cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
472
473 smi_cmd.u64 = 0;
474 smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
475 smi_cmd.s.phy_adr = phy_id;
476 smi_cmd.s.reg_adr = device;
477 cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
478
479 do {
480 cvmx_wait(1000);
481 smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
482 } while (smi_wr.s.pending && --timeout);
483 if (timeout <= 0)
484 return -1;
485
486 smi_wr.u64 = 0;
487 smi_wr.s.dat = val;
488 cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
489
490 smi_cmd.u64 = 0;
491 smi_cmd.s.phy_op = MDIO_CLAUSE_45_WRITE;
492 smi_cmd.s.phy_adr = phy_id;
493 smi_cmd.s.reg_adr = device;
494 cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
495
496 do {
497 cvmx_wait(1000);
498 smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
499 } while (smi_wr.s.pending && --timeout);
500 if (timeout <= 0)
501 return -1;
502
503 return 0;
504}
505
506#endif
diff --git a/drivers/staging/octeon/cvmx-packet.h b/drivers/staging/octeon/cvmx-packet.h
new file mode 100644
index 000000000000..62ffe78a8c81
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-packet.h
@@ -0,0 +1,65 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 *
30 * Packet buffer defines.
31 */
32
33#ifndef __CVMX_PACKET_H__
34#define __CVMX_PACKET_H__
35
36/**
37 * This structure defines a buffer pointer on Octeon
38 */
39union cvmx_buf_ptr {
40 void *ptr;
41 uint64_t u64;
42 struct {
43 /*
44 * if set, invert the "free" pick of the overall
45 * packet. HW always sets this bit to 0 on inbound
46 * packet
47 */
48 uint64_t i:1;
49 /*
50 * Indicates the amount to back up to get to the
51 * buffer start in cache lines. In most cases this is
52 * less than one complete cache line, so the value is
53 * zero.
54 */
55 uint64_t back:4;
56 /* The pool that the buffer came from / goes to */
57 uint64_t pool:3;
58 /* The size of the segment pointed to by addr (in bytes) */
59 uint64_t size:16;
60 /* Pointer to the first byte of the data, NOT buffer */
61 uint64_t addr:40;
62 } s;
63};
64
65#endif /* __CVMX_PACKET_H__ */
diff --git a/drivers/staging/octeon/cvmx-pcsx-defs.h b/drivers/staging/octeon/cvmx-pcsx-defs.h
new file mode 100644
index 000000000000..d45952df5f5b
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pcsx-defs.h
@@ -0,0 +1,370 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28#ifndef __CVMX_PCSX_DEFS_H__
29#define __CVMX_PCSX_DEFS_H__
30
31#define CVMX_PCSX_ANX_ADV_REG(offset, block_id) \
32 CVMX_ADD_IO_SEG(0x00011800B0001010ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
33#define CVMX_PCSX_ANX_EXT_ST_REG(offset, block_id) \
34 CVMX_ADD_IO_SEG(0x00011800B0001028ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
35#define CVMX_PCSX_ANX_LP_ABIL_REG(offset, block_id) \
36 CVMX_ADD_IO_SEG(0x00011800B0001018ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
37#define CVMX_PCSX_ANX_RESULTS_REG(offset, block_id) \
38 CVMX_ADD_IO_SEG(0x00011800B0001020ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
39#define CVMX_PCSX_INTX_EN_REG(offset, block_id) \
40 CVMX_ADD_IO_SEG(0x00011800B0001088ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
41#define CVMX_PCSX_INTX_REG(offset, block_id) \
42 CVMX_ADD_IO_SEG(0x00011800B0001080ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
43#define CVMX_PCSX_LINKX_TIMER_COUNT_REG(offset, block_id) \
44 CVMX_ADD_IO_SEG(0x00011800B0001040ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
45#define CVMX_PCSX_LOG_ANLX_REG(offset, block_id) \
46 CVMX_ADD_IO_SEG(0x00011800B0001090ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
47#define CVMX_PCSX_MISCX_CTL_REG(offset, block_id) \
48 CVMX_ADD_IO_SEG(0x00011800B0001078ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
49#define CVMX_PCSX_MRX_CONTROL_REG(offset, block_id) \
50 CVMX_ADD_IO_SEG(0x00011800B0001000ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
51#define CVMX_PCSX_MRX_STATUS_REG(offset, block_id) \
52 CVMX_ADD_IO_SEG(0x00011800B0001008ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
53#define CVMX_PCSX_RXX_STATES_REG(offset, block_id) \
54 CVMX_ADD_IO_SEG(0x00011800B0001058ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
55#define CVMX_PCSX_RXX_SYNC_REG(offset, block_id) \
56 CVMX_ADD_IO_SEG(0x00011800B0001050ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
57#define CVMX_PCSX_SGMX_AN_ADV_REG(offset, block_id) \
58 CVMX_ADD_IO_SEG(0x00011800B0001068ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
59#define CVMX_PCSX_SGMX_LP_ADV_REG(offset, block_id) \
60 CVMX_ADD_IO_SEG(0x00011800B0001070ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
61#define CVMX_PCSX_TXX_STATES_REG(offset, block_id) \
62 CVMX_ADD_IO_SEG(0x00011800B0001060ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
63#define CVMX_PCSX_TX_RXX_POLARITY_REG(offset, block_id) \
64 CVMX_ADD_IO_SEG(0x00011800B0001048ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
65
66union cvmx_pcsx_anx_adv_reg {
67 uint64_t u64;
68 struct cvmx_pcsx_anx_adv_reg_s {
69 uint64_t reserved_16_63:48;
70 uint64_t np:1;
71 uint64_t reserved_14_14:1;
72 uint64_t rem_flt:2;
73 uint64_t reserved_9_11:3;
74 uint64_t pause:2;
75 uint64_t hfd:1;
76 uint64_t fd:1;
77 uint64_t reserved_0_4:5;
78 } s;
79 struct cvmx_pcsx_anx_adv_reg_s cn52xx;
80 struct cvmx_pcsx_anx_adv_reg_s cn52xxp1;
81 struct cvmx_pcsx_anx_adv_reg_s cn56xx;
82 struct cvmx_pcsx_anx_adv_reg_s cn56xxp1;
83};
84
85union cvmx_pcsx_anx_ext_st_reg {
86 uint64_t u64;
87 struct cvmx_pcsx_anx_ext_st_reg_s {
88 uint64_t reserved_16_63:48;
89 uint64_t thou_xfd:1;
90 uint64_t thou_xhd:1;
91 uint64_t thou_tfd:1;
92 uint64_t thou_thd:1;
93 uint64_t reserved_0_11:12;
94 } s;
95 struct cvmx_pcsx_anx_ext_st_reg_s cn52xx;
96 struct cvmx_pcsx_anx_ext_st_reg_s cn52xxp1;
97 struct cvmx_pcsx_anx_ext_st_reg_s cn56xx;
98 struct cvmx_pcsx_anx_ext_st_reg_s cn56xxp1;
99};
100
101union cvmx_pcsx_anx_lp_abil_reg {
102 uint64_t u64;
103 struct cvmx_pcsx_anx_lp_abil_reg_s {
104 uint64_t reserved_16_63:48;
105 uint64_t np:1;
106 uint64_t ack:1;
107 uint64_t rem_flt:2;
108 uint64_t reserved_9_11:3;
109 uint64_t pause:2;
110 uint64_t hfd:1;
111 uint64_t fd:1;
112 uint64_t reserved_0_4:5;
113 } s;
114 struct cvmx_pcsx_anx_lp_abil_reg_s cn52xx;
115 struct cvmx_pcsx_anx_lp_abil_reg_s cn52xxp1;
116 struct cvmx_pcsx_anx_lp_abil_reg_s cn56xx;
117 struct cvmx_pcsx_anx_lp_abil_reg_s cn56xxp1;
118};
119
120union cvmx_pcsx_anx_results_reg {
121 uint64_t u64;
122 struct cvmx_pcsx_anx_results_reg_s {
123 uint64_t reserved_7_63:57;
124 uint64_t pause:2;
125 uint64_t spd:2;
126 uint64_t an_cpt:1;
127 uint64_t dup:1;
128 uint64_t link_ok:1;
129 } s;
130 struct cvmx_pcsx_anx_results_reg_s cn52xx;
131 struct cvmx_pcsx_anx_results_reg_s cn52xxp1;
132 struct cvmx_pcsx_anx_results_reg_s cn56xx;
133 struct cvmx_pcsx_anx_results_reg_s cn56xxp1;
134};
135
136union cvmx_pcsx_intx_en_reg {
137 uint64_t u64;
138 struct cvmx_pcsx_intx_en_reg_s {
139 uint64_t reserved_12_63:52;
140 uint64_t dup:1;
141 uint64_t sync_bad_en:1;
142 uint64_t an_bad_en:1;
143 uint64_t rxlock_en:1;
144 uint64_t rxbad_en:1;
145 uint64_t rxerr_en:1;
146 uint64_t txbad_en:1;
147 uint64_t txfifo_en:1;
148 uint64_t txfifu_en:1;
149 uint64_t an_err_en:1;
150 uint64_t xmit_en:1;
151 uint64_t lnkspd_en:1;
152 } s;
153 struct cvmx_pcsx_intx_en_reg_s cn52xx;
154 struct cvmx_pcsx_intx_en_reg_s cn52xxp1;
155 struct cvmx_pcsx_intx_en_reg_s cn56xx;
156 struct cvmx_pcsx_intx_en_reg_s cn56xxp1;
157};
158
159union cvmx_pcsx_intx_reg {
160 uint64_t u64;
161 struct cvmx_pcsx_intx_reg_s {
162 uint64_t reserved_12_63:52;
163 uint64_t dup:1;
164 uint64_t sync_bad:1;
165 uint64_t an_bad:1;
166 uint64_t rxlock:1;
167 uint64_t rxbad:1;
168 uint64_t rxerr:1;
169 uint64_t txbad:1;
170 uint64_t txfifo:1;
171 uint64_t txfifu:1;
172 uint64_t an_err:1;
173 uint64_t xmit:1;
174 uint64_t lnkspd:1;
175 } s;
176 struct cvmx_pcsx_intx_reg_s cn52xx;
177 struct cvmx_pcsx_intx_reg_s cn52xxp1;
178 struct cvmx_pcsx_intx_reg_s cn56xx;
179 struct cvmx_pcsx_intx_reg_s cn56xxp1;
180};
181
182union cvmx_pcsx_linkx_timer_count_reg {
183 uint64_t u64;
184 struct cvmx_pcsx_linkx_timer_count_reg_s {
185 uint64_t reserved_16_63:48;
186 uint64_t count:16;
187 } s;
188 struct cvmx_pcsx_linkx_timer_count_reg_s cn52xx;
189 struct cvmx_pcsx_linkx_timer_count_reg_s cn52xxp1;
190 struct cvmx_pcsx_linkx_timer_count_reg_s cn56xx;
191 struct cvmx_pcsx_linkx_timer_count_reg_s cn56xxp1;
192};
193
194union cvmx_pcsx_log_anlx_reg {
195 uint64_t u64;
196 struct cvmx_pcsx_log_anlx_reg_s {
197 uint64_t reserved_4_63:60;
198 uint64_t lafifovfl:1;
199 uint64_t la_en:1;
200 uint64_t pkt_sz:2;
201 } s;
202 struct cvmx_pcsx_log_anlx_reg_s cn52xx;
203 struct cvmx_pcsx_log_anlx_reg_s cn52xxp1;
204 struct cvmx_pcsx_log_anlx_reg_s cn56xx;
205 struct cvmx_pcsx_log_anlx_reg_s cn56xxp1;
206};
207
208union cvmx_pcsx_miscx_ctl_reg {
209 uint64_t u64;
210 struct cvmx_pcsx_miscx_ctl_reg_s {
211 uint64_t reserved_13_63:51;
212 uint64_t sgmii:1;
213 uint64_t gmxeno:1;
214 uint64_t loopbck2:1;
215 uint64_t mac_phy:1;
216 uint64_t mode:1;
217 uint64_t an_ovrd:1;
218 uint64_t samp_pt:7;
219 } s;
220 struct cvmx_pcsx_miscx_ctl_reg_s cn52xx;
221 struct cvmx_pcsx_miscx_ctl_reg_s cn52xxp1;
222 struct cvmx_pcsx_miscx_ctl_reg_s cn56xx;
223 struct cvmx_pcsx_miscx_ctl_reg_s cn56xxp1;
224};
225
226union cvmx_pcsx_mrx_control_reg {
227 uint64_t u64;
228 struct cvmx_pcsx_mrx_control_reg_s {
229 uint64_t reserved_16_63:48;
230 uint64_t reset:1;
231 uint64_t loopbck1:1;
232 uint64_t spdlsb:1;
233 uint64_t an_en:1;
234 uint64_t pwr_dn:1;
235 uint64_t reserved_10_10:1;
236 uint64_t rst_an:1;
237 uint64_t dup:1;
238 uint64_t coltst:1;
239 uint64_t spdmsb:1;
240 uint64_t uni:1;
241 uint64_t reserved_0_4:5;
242 } s;
243 struct cvmx_pcsx_mrx_control_reg_s cn52xx;
244 struct cvmx_pcsx_mrx_control_reg_s cn52xxp1;
245 struct cvmx_pcsx_mrx_control_reg_s cn56xx;
246 struct cvmx_pcsx_mrx_control_reg_s cn56xxp1;
247};
248
249union cvmx_pcsx_mrx_status_reg {
250 uint64_t u64;
251 struct cvmx_pcsx_mrx_status_reg_s {
252 uint64_t reserved_16_63:48;
253 uint64_t hun_t4:1;
254 uint64_t hun_xfd:1;
255 uint64_t hun_xhd:1;
256 uint64_t ten_fd:1;
257 uint64_t ten_hd:1;
258 uint64_t hun_t2fd:1;
259 uint64_t hun_t2hd:1;
260 uint64_t ext_st:1;
261 uint64_t reserved_7_7:1;
262 uint64_t prb_sup:1;
263 uint64_t an_cpt:1;
264 uint64_t rm_flt:1;
265 uint64_t an_abil:1;
266 uint64_t lnk_st:1;
267 uint64_t reserved_1_1:1;
268 uint64_t extnd:1;
269 } s;
270 struct cvmx_pcsx_mrx_status_reg_s cn52xx;
271 struct cvmx_pcsx_mrx_status_reg_s cn52xxp1;
272 struct cvmx_pcsx_mrx_status_reg_s cn56xx;
273 struct cvmx_pcsx_mrx_status_reg_s cn56xxp1;
274};
275
276union cvmx_pcsx_rxx_states_reg {
277 uint64_t u64;
278 struct cvmx_pcsx_rxx_states_reg_s {
279 uint64_t reserved_16_63:48;
280 uint64_t rx_bad:1;
281 uint64_t rx_st:5;
282 uint64_t sync_bad:1;
283 uint64_t sync:4;
284 uint64_t an_bad:1;
285 uint64_t an_st:4;
286 } s;
287 struct cvmx_pcsx_rxx_states_reg_s cn52xx;
288 struct cvmx_pcsx_rxx_states_reg_s cn52xxp1;
289 struct cvmx_pcsx_rxx_states_reg_s cn56xx;
290 struct cvmx_pcsx_rxx_states_reg_s cn56xxp1;
291};
292
293union cvmx_pcsx_rxx_sync_reg {
294 uint64_t u64;
295 struct cvmx_pcsx_rxx_sync_reg_s {
296 uint64_t reserved_2_63:62;
297 uint64_t sync:1;
298 uint64_t bit_lock:1;
299 } s;
300 struct cvmx_pcsx_rxx_sync_reg_s cn52xx;
301 struct cvmx_pcsx_rxx_sync_reg_s cn52xxp1;
302 struct cvmx_pcsx_rxx_sync_reg_s cn56xx;
303 struct cvmx_pcsx_rxx_sync_reg_s cn56xxp1;
304};
305
306union cvmx_pcsx_sgmx_an_adv_reg {
307 uint64_t u64;
308 struct cvmx_pcsx_sgmx_an_adv_reg_s {
309 uint64_t reserved_16_63:48;
310 uint64_t link:1;
311 uint64_t ack:1;
312 uint64_t reserved_13_13:1;
313 uint64_t dup:1;
314 uint64_t speed:2;
315 uint64_t reserved_1_9:9;
316 uint64_t one:1;
317 } s;
318 struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xx;
319 struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xxp1;
320 struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xx;
321 struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xxp1;
322};
323
324union cvmx_pcsx_sgmx_lp_adv_reg {
325 uint64_t u64;
326 struct cvmx_pcsx_sgmx_lp_adv_reg_s {
327 uint64_t reserved_16_63:48;
328 uint64_t link:1;
329 uint64_t reserved_13_14:2;
330 uint64_t dup:1;
331 uint64_t speed:2;
332 uint64_t reserved_1_9:9;
333 uint64_t one:1;
334 } s;
335 struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xx;
336 struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xxp1;
337 struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xx;
338 struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xxp1;
339};
340
341union cvmx_pcsx_txx_states_reg {
342 uint64_t u64;
343 struct cvmx_pcsx_txx_states_reg_s {
344 uint64_t reserved_7_63:57;
345 uint64_t xmit:2;
346 uint64_t tx_bad:1;
347 uint64_t ord_st:4;
348 } s;
349 struct cvmx_pcsx_txx_states_reg_s cn52xx;
350 struct cvmx_pcsx_txx_states_reg_s cn52xxp1;
351 struct cvmx_pcsx_txx_states_reg_s cn56xx;
352 struct cvmx_pcsx_txx_states_reg_s cn56xxp1;
353};
354
355union cvmx_pcsx_tx_rxx_polarity_reg {
356 uint64_t u64;
357 struct cvmx_pcsx_tx_rxx_polarity_reg_s {
358 uint64_t reserved_4_63:60;
359 uint64_t rxovrd:1;
360 uint64_t autorxpl:1;
361 uint64_t rxplrt:1;
362 uint64_t txplrt:1;
363 } s;
364 struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xx;
365 struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xxp1;
366 struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xx;
367 struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xxp1;
368};
369
370#endif
diff --git a/drivers/staging/octeon/cvmx-pcsxx-defs.h b/drivers/staging/octeon/cvmx-pcsxx-defs.h
new file mode 100644
index 000000000000..55d120fe8aed
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pcsxx-defs.h
@@ -0,0 +1,316 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28#ifndef __CVMX_PCSXX_DEFS_H__
29#define __CVMX_PCSXX_DEFS_H__
30
31#define CVMX_PCSXX_10GBX_STATUS_REG(block_id) \
32 CVMX_ADD_IO_SEG(0x00011800B0000828ull + (((block_id) & 1) * 0x8000000ull))
33#define CVMX_PCSXX_BIST_STATUS_REG(block_id) \
34 CVMX_ADD_IO_SEG(0x00011800B0000870ull + (((block_id) & 1) * 0x8000000ull))
35#define CVMX_PCSXX_BIT_LOCK_STATUS_REG(block_id) \
36 CVMX_ADD_IO_SEG(0x00011800B0000850ull + (((block_id) & 1) * 0x8000000ull))
37#define CVMX_PCSXX_CONTROL1_REG(block_id) \
38 CVMX_ADD_IO_SEG(0x00011800B0000800ull + (((block_id) & 1) * 0x8000000ull))
39#define CVMX_PCSXX_CONTROL2_REG(block_id) \
40 CVMX_ADD_IO_SEG(0x00011800B0000818ull + (((block_id) & 1) * 0x8000000ull))
41#define CVMX_PCSXX_INT_EN_REG(block_id) \
42 CVMX_ADD_IO_SEG(0x00011800B0000860ull + (((block_id) & 1) * 0x8000000ull))
43#define CVMX_PCSXX_INT_REG(block_id) \
44 CVMX_ADD_IO_SEG(0x00011800B0000858ull + (((block_id) & 1) * 0x8000000ull))
45#define CVMX_PCSXX_LOG_ANL_REG(block_id) \
46 CVMX_ADD_IO_SEG(0x00011800B0000868ull + (((block_id) & 1) * 0x8000000ull))
47#define CVMX_PCSXX_MISC_CTL_REG(block_id) \
48 CVMX_ADD_IO_SEG(0x00011800B0000848ull + (((block_id) & 1) * 0x8000000ull))
49#define CVMX_PCSXX_RX_SYNC_STATES_REG(block_id) \
50 CVMX_ADD_IO_SEG(0x00011800B0000838ull + (((block_id) & 1) * 0x8000000ull))
51#define CVMX_PCSXX_SPD_ABIL_REG(block_id) \
52 CVMX_ADD_IO_SEG(0x00011800B0000810ull + (((block_id) & 1) * 0x8000000ull))
53#define CVMX_PCSXX_STATUS1_REG(block_id) \
54 CVMX_ADD_IO_SEG(0x00011800B0000808ull + (((block_id) & 1) * 0x8000000ull))
55#define CVMX_PCSXX_STATUS2_REG(block_id) \
56 CVMX_ADD_IO_SEG(0x00011800B0000820ull + (((block_id) & 1) * 0x8000000ull))
57#define CVMX_PCSXX_TX_RX_POLARITY_REG(block_id) \
58 CVMX_ADD_IO_SEG(0x00011800B0000840ull + (((block_id) & 1) * 0x8000000ull))
59#define CVMX_PCSXX_TX_RX_STATES_REG(block_id) \
60 CVMX_ADD_IO_SEG(0x00011800B0000830ull + (((block_id) & 1) * 0x8000000ull))
61
62union cvmx_pcsxx_10gbx_status_reg {
63 uint64_t u64;
64 struct cvmx_pcsxx_10gbx_status_reg_s {
65 uint64_t reserved_13_63:51;
66 uint64_t alignd:1;
67 uint64_t pattst:1;
68 uint64_t reserved_4_10:7;
69 uint64_t l3sync:1;
70 uint64_t l2sync:1;
71 uint64_t l1sync:1;
72 uint64_t l0sync:1;
73 } s;
74 struct cvmx_pcsxx_10gbx_status_reg_s cn52xx;
75 struct cvmx_pcsxx_10gbx_status_reg_s cn52xxp1;
76 struct cvmx_pcsxx_10gbx_status_reg_s cn56xx;
77 struct cvmx_pcsxx_10gbx_status_reg_s cn56xxp1;
78};
79
80union cvmx_pcsxx_bist_status_reg {
81 uint64_t u64;
82 struct cvmx_pcsxx_bist_status_reg_s {
83 uint64_t reserved_1_63:63;
84 uint64_t bist_status:1;
85 } s;
86 struct cvmx_pcsxx_bist_status_reg_s cn52xx;
87 struct cvmx_pcsxx_bist_status_reg_s cn52xxp1;
88 struct cvmx_pcsxx_bist_status_reg_s cn56xx;
89 struct cvmx_pcsxx_bist_status_reg_s cn56xxp1;
90};
91
92union cvmx_pcsxx_bit_lock_status_reg {
93 uint64_t u64;
94 struct cvmx_pcsxx_bit_lock_status_reg_s {
95 uint64_t reserved_4_63:60;
96 uint64_t bitlck3:1;
97 uint64_t bitlck2:1;
98 uint64_t bitlck1:1;
99 uint64_t bitlck0:1;
100 } s;
101 struct cvmx_pcsxx_bit_lock_status_reg_s cn52xx;
102 struct cvmx_pcsxx_bit_lock_status_reg_s cn52xxp1;
103 struct cvmx_pcsxx_bit_lock_status_reg_s cn56xx;
104 struct cvmx_pcsxx_bit_lock_status_reg_s cn56xxp1;
105};
106
107union cvmx_pcsxx_control1_reg {
108 uint64_t u64;
109 struct cvmx_pcsxx_control1_reg_s {
110 uint64_t reserved_16_63:48;
111 uint64_t reset:1;
112 uint64_t loopbck1:1;
113 uint64_t spdsel1:1;
114 uint64_t reserved_12_12:1;
115 uint64_t lo_pwr:1;
116 uint64_t reserved_7_10:4;
117 uint64_t spdsel0:1;
118 uint64_t spd:4;
119 uint64_t reserved_0_1:2;
120 } s;
121 struct cvmx_pcsxx_control1_reg_s cn52xx;
122 struct cvmx_pcsxx_control1_reg_s cn52xxp1;
123 struct cvmx_pcsxx_control1_reg_s cn56xx;
124 struct cvmx_pcsxx_control1_reg_s cn56xxp1;
125};
126
127union cvmx_pcsxx_control2_reg {
128 uint64_t u64;
129 struct cvmx_pcsxx_control2_reg_s {
130 uint64_t reserved_2_63:62;
131 uint64_t type:2;
132 } s;
133 struct cvmx_pcsxx_control2_reg_s cn52xx;
134 struct cvmx_pcsxx_control2_reg_s cn52xxp1;
135 struct cvmx_pcsxx_control2_reg_s cn56xx;
136 struct cvmx_pcsxx_control2_reg_s cn56xxp1;
137};
138
139union cvmx_pcsxx_int_en_reg {
140 uint64_t u64;
141 struct cvmx_pcsxx_int_en_reg_s {
142 uint64_t reserved_6_63:58;
143 uint64_t algnlos_en:1;
144 uint64_t synlos_en:1;
145 uint64_t bitlckls_en:1;
146 uint64_t rxsynbad_en:1;
147 uint64_t rxbad_en:1;
148 uint64_t txflt_en:1;
149 } s;
150 struct cvmx_pcsxx_int_en_reg_s cn52xx;
151 struct cvmx_pcsxx_int_en_reg_s cn52xxp1;
152 struct cvmx_pcsxx_int_en_reg_s cn56xx;
153 struct cvmx_pcsxx_int_en_reg_s cn56xxp1;
154};
155
156union cvmx_pcsxx_int_reg {
157 uint64_t u64;
158 struct cvmx_pcsxx_int_reg_s {
159 uint64_t reserved_6_63:58;
160 uint64_t algnlos:1;
161 uint64_t synlos:1;
162 uint64_t bitlckls:1;
163 uint64_t rxsynbad:1;
164 uint64_t rxbad:1;
165 uint64_t txflt:1;
166 } s;
167 struct cvmx_pcsxx_int_reg_s cn52xx;
168 struct cvmx_pcsxx_int_reg_s cn52xxp1;
169 struct cvmx_pcsxx_int_reg_s cn56xx;
170 struct cvmx_pcsxx_int_reg_s cn56xxp1;
171};
172
173union cvmx_pcsxx_log_anl_reg {
174 uint64_t u64;
175 struct cvmx_pcsxx_log_anl_reg_s {
176 uint64_t reserved_7_63:57;
177 uint64_t enc_mode:1;
178 uint64_t drop_ln:2;
179 uint64_t lafifovfl:1;
180 uint64_t la_en:1;
181 uint64_t pkt_sz:2;
182 } s;
183 struct cvmx_pcsxx_log_anl_reg_s cn52xx;
184 struct cvmx_pcsxx_log_anl_reg_s cn52xxp1;
185 struct cvmx_pcsxx_log_anl_reg_s cn56xx;
186 struct cvmx_pcsxx_log_anl_reg_s cn56xxp1;
187};
188
189union cvmx_pcsxx_misc_ctl_reg {
190 uint64_t u64;
191 struct cvmx_pcsxx_misc_ctl_reg_s {
192 uint64_t reserved_4_63:60;
193 uint64_t tx_swap:1;
194 uint64_t rx_swap:1;
195 uint64_t xaui:1;
196 uint64_t gmxeno:1;
197 } s;
198 struct cvmx_pcsxx_misc_ctl_reg_s cn52xx;
199 struct cvmx_pcsxx_misc_ctl_reg_s cn52xxp1;
200 struct cvmx_pcsxx_misc_ctl_reg_s cn56xx;
201 struct cvmx_pcsxx_misc_ctl_reg_s cn56xxp1;
202};
203
204union cvmx_pcsxx_rx_sync_states_reg {
205 uint64_t u64;
206 struct cvmx_pcsxx_rx_sync_states_reg_s {
207 uint64_t reserved_16_63:48;
208 uint64_t sync3st:4;
209 uint64_t sync2st:4;
210 uint64_t sync1st:4;
211 uint64_t sync0st:4;
212 } s;
213 struct cvmx_pcsxx_rx_sync_states_reg_s cn52xx;
214 struct cvmx_pcsxx_rx_sync_states_reg_s cn52xxp1;
215 struct cvmx_pcsxx_rx_sync_states_reg_s cn56xx;
216 struct cvmx_pcsxx_rx_sync_states_reg_s cn56xxp1;
217};
218
219union cvmx_pcsxx_spd_abil_reg {
220 uint64_t u64;
221 struct cvmx_pcsxx_spd_abil_reg_s {
222 uint64_t reserved_2_63:62;
223 uint64_t tenpasst:1;
224 uint64_t tengb:1;
225 } s;
226 struct cvmx_pcsxx_spd_abil_reg_s cn52xx;
227 struct cvmx_pcsxx_spd_abil_reg_s cn52xxp1;
228 struct cvmx_pcsxx_spd_abil_reg_s cn56xx;
229 struct cvmx_pcsxx_spd_abil_reg_s cn56xxp1;
230};
231
232union cvmx_pcsxx_status1_reg {
233 uint64_t u64;
234 struct cvmx_pcsxx_status1_reg_s {
235 uint64_t reserved_8_63:56;
236 uint64_t flt:1;
237 uint64_t reserved_3_6:4;
238 uint64_t rcv_lnk:1;
239 uint64_t lpable:1;
240 uint64_t reserved_0_0:1;
241 } s;
242 struct cvmx_pcsxx_status1_reg_s cn52xx;
243 struct cvmx_pcsxx_status1_reg_s cn52xxp1;
244 struct cvmx_pcsxx_status1_reg_s cn56xx;
245 struct cvmx_pcsxx_status1_reg_s cn56xxp1;
246};
247
248union cvmx_pcsxx_status2_reg {
249 uint64_t u64;
250 struct cvmx_pcsxx_status2_reg_s {
251 uint64_t reserved_16_63:48;
252 uint64_t dev:2;
253 uint64_t reserved_12_13:2;
254 uint64_t xmtflt:1;
255 uint64_t rcvflt:1;
256 uint64_t reserved_3_9:7;
257 uint64_t tengb_w:1;
258 uint64_t tengb_x:1;
259 uint64_t tengb_r:1;
260 } s;
261 struct cvmx_pcsxx_status2_reg_s cn52xx;
262 struct cvmx_pcsxx_status2_reg_s cn52xxp1;
263 struct cvmx_pcsxx_status2_reg_s cn56xx;
264 struct cvmx_pcsxx_status2_reg_s cn56xxp1;
265};
266
267union cvmx_pcsxx_tx_rx_polarity_reg {
268 uint64_t u64;
269 struct cvmx_pcsxx_tx_rx_polarity_reg_s {
270 uint64_t reserved_10_63:54;
271 uint64_t xor_rxplrt:4;
272 uint64_t xor_txplrt:4;
273 uint64_t rxplrt:1;
274 uint64_t txplrt:1;
275 } s;
276 struct cvmx_pcsxx_tx_rx_polarity_reg_s cn52xx;
277 struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 {
278 uint64_t reserved_2_63:62;
279 uint64_t rxplrt:1;
280 uint64_t txplrt:1;
281 } cn52xxp1;
282 struct cvmx_pcsxx_tx_rx_polarity_reg_s cn56xx;
283 struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 cn56xxp1;
284};
285
286union cvmx_pcsxx_tx_rx_states_reg {
287 uint64_t u64;
288 struct cvmx_pcsxx_tx_rx_states_reg_s {
289 uint64_t reserved_14_63:50;
290 uint64_t term_err:1;
291 uint64_t syn3bad:1;
292 uint64_t syn2bad:1;
293 uint64_t syn1bad:1;
294 uint64_t syn0bad:1;
295 uint64_t rxbad:1;
296 uint64_t algn_st:3;
297 uint64_t rx_st:2;
298 uint64_t tx_st:3;
299 } s;
300 struct cvmx_pcsxx_tx_rx_states_reg_s cn52xx;
301 struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 {
302 uint64_t reserved_13_63:51;
303 uint64_t syn3bad:1;
304 uint64_t syn2bad:1;
305 uint64_t syn1bad:1;
306 uint64_t syn0bad:1;
307 uint64_t rxbad:1;
308 uint64_t algn_st:3;
309 uint64_t rx_st:2;
310 uint64_t tx_st:3;
311 } cn52xxp1;
312 struct cvmx_pcsxx_tx_rx_states_reg_s cn56xx;
313 struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 cn56xxp1;
314};
315
316#endif
diff --git a/drivers/staging/octeon/cvmx-pip-defs.h b/drivers/staging/octeon/cvmx-pip-defs.h
new file mode 100644
index 000000000000..5a369100ca68
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pip-defs.h
@@ -0,0 +1,1267 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28#ifndef __CVMX_PIP_DEFS_H__
29#define __CVMX_PIP_DEFS_H__
30
31/*
32 * Enumeration representing the amount of packet processing
33 * and validation performed by the input hardware.
34 */
35enum cvmx_pip_port_parse_mode {
36 /*
37 * Packet input doesn't perform any processing of the input
38 * packet.
39 */
40 CVMX_PIP_PORT_CFG_MODE_NONE = 0ull,
41 /*
42 * Full packet processing is performed with pointer starting
43 * at the L2 (ethernet MAC) header.
44 */
45 CVMX_PIP_PORT_CFG_MODE_SKIPL2 = 1ull,
46 /*
47 * Input packets are assumed to be IP. Results from non IP
48 * packets is undefined. Pointers reference the beginning of
49 * the IP header.
50 */
51 CVMX_PIP_PORT_CFG_MODE_SKIPIP = 2ull
52};
53
54#define CVMX_PIP_BCK_PRS \
55 CVMX_ADD_IO_SEG(0x00011800A0000038ull)
56#define CVMX_PIP_BIST_STATUS \
57 CVMX_ADD_IO_SEG(0x00011800A0000000ull)
58#define CVMX_PIP_CRC_CTLX(offset) \
59 CVMX_ADD_IO_SEG(0x00011800A0000040ull + (((offset) & 1) * 8))
60#define CVMX_PIP_CRC_IVX(offset) \
61 CVMX_ADD_IO_SEG(0x00011800A0000050ull + (((offset) & 1) * 8))
62#define CVMX_PIP_DEC_IPSECX(offset) \
63 CVMX_ADD_IO_SEG(0x00011800A0000080ull + (((offset) & 3) * 8))
64#define CVMX_PIP_DSA_SRC_GRP \
65 CVMX_ADD_IO_SEG(0x00011800A0000190ull)
66#define CVMX_PIP_DSA_VID_GRP \
67 CVMX_ADD_IO_SEG(0x00011800A0000198ull)
68#define CVMX_PIP_FRM_LEN_CHKX(offset) \
69 CVMX_ADD_IO_SEG(0x00011800A0000180ull + (((offset) & 1) * 8))
70#define CVMX_PIP_GBL_CFG \
71 CVMX_ADD_IO_SEG(0x00011800A0000028ull)
72#define CVMX_PIP_GBL_CTL \
73 CVMX_ADD_IO_SEG(0x00011800A0000020ull)
74#define CVMX_PIP_HG_PRI_QOS \
75 CVMX_ADD_IO_SEG(0x00011800A00001A0ull)
76#define CVMX_PIP_INT_EN \
77 CVMX_ADD_IO_SEG(0x00011800A0000010ull)
78#define CVMX_PIP_INT_REG \
79 CVMX_ADD_IO_SEG(0x00011800A0000008ull)
80#define CVMX_PIP_IP_OFFSET \
81 CVMX_ADD_IO_SEG(0x00011800A0000060ull)
82#define CVMX_PIP_PRT_CFGX(offset) \
83 CVMX_ADD_IO_SEG(0x00011800A0000200ull + (((offset) & 63) * 8))
84#define CVMX_PIP_PRT_TAGX(offset) \
85 CVMX_ADD_IO_SEG(0x00011800A0000400ull + (((offset) & 63) * 8))
86#define CVMX_PIP_QOS_DIFFX(offset) \
87 CVMX_ADD_IO_SEG(0x00011800A0000600ull + (((offset) & 63) * 8))
88#define CVMX_PIP_QOS_VLANX(offset) \
89 CVMX_ADD_IO_SEG(0x00011800A00000C0ull + (((offset) & 7) * 8))
90#define CVMX_PIP_QOS_WATCHX(offset) \
91 CVMX_ADD_IO_SEG(0x00011800A0000100ull + (((offset) & 7) * 8))
92#define CVMX_PIP_RAW_WORD \
93 CVMX_ADD_IO_SEG(0x00011800A00000B0ull)
94#define CVMX_PIP_SFT_RST \
95 CVMX_ADD_IO_SEG(0x00011800A0000030ull)
96#define CVMX_PIP_STAT0_PRTX(offset) \
97 CVMX_ADD_IO_SEG(0x00011800A0000800ull + (((offset) & 63) * 80))
98#define CVMX_PIP_STAT1_PRTX(offset) \
99 CVMX_ADD_IO_SEG(0x00011800A0000808ull + (((offset) & 63) * 80))
100#define CVMX_PIP_STAT2_PRTX(offset) \
101 CVMX_ADD_IO_SEG(0x00011800A0000810ull + (((offset) & 63) * 80))
102#define CVMX_PIP_STAT3_PRTX(offset) \
103 CVMX_ADD_IO_SEG(0x00011800A0000818ull + (((offset) & 63) * 80))
104#define CVMX_PIP_STAT4_PRTX(offset) \
105 CVMX_ADD_IO_SEG(0x00011800A0000820ull + (((offset) & 63) * 80))
106#define CVMX_PIP_STAT5_PRTX(offset) \
107 CVMX_ADD_IO_SEG(0x00011800A0000828ull + (((offset) & 63) * 80))
108#define CVMX_PIP_STAT6_PRTX(offset) \
109 CVMX_ADD_IO_SEG(0x00011800A0000830ull + (((offset) & 63) * 80))
110#define CVMX_PIP_STAT7_PRTX(offset) \
111 CVMX_ADD_IO_SEG(0x00011800A0000838ull + (((offset) & 63) * 80))
112#define CVMX_PIP_STAT8_PRTX(offset) \
113 CVMX_ADD_IO_SEG(0x00011800A0000840ull + (((offset) & 63) * 80))
114#define CVMX_PIP_STAT9_PRTX(offset) \
115 CVMX_ADD_IO_SEG(0x00011800A0000848ull + (((offset) & 63) * 80))
116#define CVMX_PIP_STAT_CTL \
117 CVMX_ADD_IO_SEG(0x00011800A0000018ull)
118#define CVMX_PIP_STAT_INB_ERRSX(offset) \
119 CVMX_ADD_IO_SEG(0x00011800A0001A10ull + (((offset) & 63) * 32))
120#define CVMX_PIP_STAT_INB_OCTSX(offset) \
121 CVMX_ADD_IO_SEG(0x00011800A0001A08ull + (((offset) & 63) * 32))
122#define CVMX_PIP_STAT_INB_PKTSX(offset) \
123 CVMX_ADD_IO_SEG(0x00011800A0001A00ull + (((offset) & 63) * 32))
124#define CVMX_PIP_TAG_INCX(offset) \
125 CVMX_ADD_IO_SEG(0x00011800A0001800ull + (((offset) & 63) * 8))
126#define CVMX_PIP_TAG_MASK \
127 CVMX_ADD_IO_SEG(0x00011800A0000070ull)
128#define CVMX_PIP_TAG_SECRET \
129 CVMX_ADD_IO_SEG(0x00011800A0000068ull)
130#define CVMX_PIP_TODO_ENTRY \
131 CVMX_ADD_IO_SEG(0x00011800A0000078ull)
132
133union cvmx_pip_bck_prs {
134 uint64_t u64;
135 struct cvmx_pip_bck_prs_s {
136 uint64_t bckprs:1;
137 uint64_t reserved_13_62:50;
138 uint64_t hiwater:5;
139 uint64_t reserved_5_7:3;
140 uint64_t lowater:5;
141 } s;
142 struct cvmx_pip_bck_prs_s cn38xx;
143 struct cvmx_pip_bck_prs_s cn38xxp2;
144 struct cvmx_pip_bck_prs_s cn56xx;
145 struct cvmx_pip_bck_prs_s cn56xxp1;
146 struct cvmx_pip_bck_prs_s cn58xx;
147 struct cvmx_pip_bck_prs_s cn58xxp1;
148};
149
150union cvmx_pip_bist_status {
151 uint64_t u64;
152 struct cvmx_pip_bist_status_s {
153 uint64_t reserved_18_63:46;
154 uint64_t bist:18;
155 } s;
156 struct cvmx_pip_bist_status_s cn30xx;
157 struct cvmx_pip_bist_status_s cn31xx;
158 struct cvmx_pip_bist_status_s cn38xx;
159 struct cvmx_pip_bist_status_s cn38xxp2;
160 struct cvmx_pip_bist_status_cn50xx {
161 uint64_t reserved_17_63:47;
162 uint64_t bist:17;
163 } cn50xx;
164 struct cvmx_pip_bist_status_s cn52xx;
165 struct cvmx_pip_bist_status_s cn52xxp1;
166 struct cvmx_pip_bist_status_s cn56xx;
167 struct cvmx_pip_bist_status_s cn56xxp1;
168 struct cvmx_pip_bist_status_s cn58xx;
169 struct cvmx_pip_bist_status_s cn58xxp1;
170};
171
172union cvmx_pip_crc_ctlx {
173 uint64_t u64;
174 struct cvmx_pip_crc_ctlx_s {
175 uint64_t reserved_2_63:62;
176 uint64_t invres:1;
177 uint64_t reflect:1;
178 } s;
179 struct cvmx_pip_crc_ctlx_s cn38xx;
180 struct cvmx_pip_crc_ctlx_s cn38xxp2;
181 struct cvmx_pip_crc_ctlx_s cn58xx;
182 struct cvmx_pip_crc_ctlx_s cn58xxp1;
183};
184
185union cvmx_pip_crc_ivx {
186 uint64_t u64;
187 struct cvmx_pip_crc_ivx_s {
188 uint64_t reserved_32_63:32;
189 uint64_t iv:32;
190 } s;
191 struct cvmx_pip_crc_ivx_s cn38xx;
192 struct cvmx_pip_crc_ivx_s cn38xxp2;
193 struct cvmx_pip_crc_ivx_s cn58xx;
194 struct cvmx_pip_crc_ivx_s cn58xxp1;
195};
196
197union cvmx_pip_dec_ipsecx {
198 uint64_t u64;
199 struct cvmx_pip_dec_ipsecx_s {
200 uint64_t reserved_18_63:46;
201 uint64_t tcp:1;
202 uint64_t udp:1;
203 uint64_t dprt:16;
204 } s;
205 struct cvmx_pip_dec_ipsecx_s cn30xx;
206 struct cvmx_pip_dec_ipsecx_s cn31xx;
207 struct cvmx_pip_dec_ipsecx_s cn38xx;
208 struct cvmx_pip_dec_ipsecx_s cn38xxp2;
209 struct cvmx_pip_dec_ipsecx_s cn50xx;
210 struct cvmx_pip_dec_ipsecx_s cn52xx;
211 struct cvmx_pip_dec_ipsecx_s cn52xxp1;
212 struct cvmx_pip_dec_ipsecx_s cn56xx;
213 struct cvmx_pip_dec_ipsecx_s cn56xxp1;
214 struct cvmx_pip_dec_ipsecx_s cn58xx;
215 struct cvmx_pip_dec_ipsecx_s cn58xxp1;
216};
217
218union cvmx_pip_dsa_src_grp {
219 uint64_t u64;
220 struct cvmx_pip_dsa_src_grp_s {
221 uint64_t map15:4;
222 uint64_t map14:4;
223 uint64_t map13:4;
224 uint64_t map12:4;
225 uint64_t map11:4;
226 uint64_t map10:4;
227 uint64_t map9:4;
228 uint64_t map8:4;
229 uint64_t map7:4;
230 uint64_t map6:4;
231 uint64_t map5:4;
232 uint64_t map4:4;
233 uint64_t map3:4;
234 uint64_t map2:4;
235 uint64_t map1:4;
236 uint64_t map0:4;
237 } s;
238 struct cvmx_pip_dsa_src_grp_s cn52xx;
239 struct cvmx_pip_dsa_src_grp_s cn52xxp1;
240 struct cvmx_pip_dsa_src_grp_s cn56xx;
241};
242
243union cvmx_pip_dsa_vid_grp {
244 uint64_t u64;
245 struct cvmx_pip_dsa_vid_grp_s {
246 uint64_t map15:4;
247 uint64_t map14:4;
248 uint64_t map13:4;
249 uint64_t map12:4;
250 uint64_t map11:4;
251 uint64_t map10:4;
252 uint64_t map9:4;
253 uint64_t map8:4;
254 uint64_t map7:4;
255 uint64_t map6:4;
256 uint64_t map5:4;
257 uint64_t map4:4;
258 uint64_t map3:4;
259 uint64_t map2:4;
260 uint64_t map1:4;
261 uint64_t map0:4;
262 } s;
263 struct cvmx_pip_dsa_vid_grp_s cn52xx;
264 struct cvmx_pip_dsa_vid_grp_s cn52xxp1;
265 struct cvmx_pip_dsa_vid_grp_s cn56xx;
266};
267
268union cvmx_pip_frm_len_chkx {
269 uint64_t u64;
270 struct cvmx_pip_frm_len_chkx_s {
271 uint64_t reserved_32_63:32;
272 uint64_t maxlen:16;
273 uint64_t minlen:16;
274 } s;
275 struct cvmx_pip_frm_len_chkx_s cn50xx;
276 struct cvmx_pip_frm_len_chkx_s cn52xx;
277 struct cvmx_pip_frm_len_chkx_s cn52xxp1;
278 struct cvmx_pip_frm_len_chkx_s cn56xx;
279 struct cvmx_pip_frm_len_chkx_s cn56xxp1;
280};
281
282union cvmx_pip_gbl_cfg {
283 uint64_t u64;
284 struct cvmx_pip_gbl_cfg_s {
285 uint64_t reserved_19_63:45;
286 uint64_t tag_syn:1;
287 uint64_t ip6_udp:1;
288 uint64_t max_l2:1;
289 uint64_t reserved_11_15:5;
290 uint64_t raw_shf:3;
291 uint64_t reserved_3_7:5;
292 uint64_t nip_shf:3;
293 } s;
294 struct cvmx_pip_gbl_cfg_s cn30xx;
295 struct cvmx_pip_gbl_cfg_s cn31xx;
296 struct cvmx_pip_gbl_cfg_s cn38xx;
297 struct cvmx_pip_gbl_cfg_s cn38xxp2;
298 struct cvmx_pip_gbl_cfg_s cn50xx;
299 struct cvmx_pip_gbl_cfg_s cn52xx;
300 struct cvmx_pip_gbl_cfg_s cn52xxp1;
301 struct cvmx_pip_gbl_cfg_s cn56xx;
302 struct cvmx_pip_gbl_cfg_s cn56xxp1;
303 struct cvmx_pip_gbl_cfg_s cn58xx;
304 struct cvmx_pip_gbl_cfg_s cn58xxp1;
305};
306
307union cvmx_pip_gbl_ctl {
308 uint64_t u64;
309 struct cvmx_pip_gbl_ctl_s {
310 uint64_t reserved_27_63:37;
311 uint64_t dsa_grp_tvid:1;
312 uint64_t dsa_grp_scmd:1;
313 uint64_t dsa_grp_sid:1;
314 uint64_t reserved_21_23:3;
315 uint64_t ring_en:1;
316 uint64_t reserved_17_19:3;
317 uint64_t ignrs:1;
318 uint64_t vs_wqe:1;
319 uint64_t vs_qos:1;
320 uint64_t l2_mal:1;
321 uint64_t tcp_flag:1;
322 uint64_t l4_len:1;
323 uint64_t l4_chk:1;
324 uint64_t l4_prt:1;
325 uint64_t l4_mal:1;
326 uint64_t reserved_6_7:2;
327 uint64_t ip6_eext:2;
328 uint64_t ip4_opts:1;
329 uint64_t ip_hop:1;
330 uint64_t ip_mal:1;
331 uint64_t ip_chk:1;
332 } s;
333 struct cvmx_pip_gbl_ctl_cn30xx {
334 uint64_t reserved_17_63:47;
335 uint64_t ignrs:1;
336 uint64_t vs_wqe:1;
337 uint64_t vs_qos:1;
338 uint64_t l2_mal:1;
339 uint64_t tcp_flag:1;
340 uint64_t l4_len:1;
341 uint64_t l4_chk:1;
342 uint64_t l4_prt:1;
343 uint64_t l4_mal:1;
344 uint64_t reserved_6_7:2;
345 uint64_t ip6_eext:2;
346 uint64_t ip4_opts:1;
347 uint64_t ip_hop:1;
348 uint64_t ip_mal:1;
349 uint64_t ip_chk:1;
350 } cn30xx;
351 struct cvmx_pip_gbl_ctl_cn30xx cn31xx;
352 struct cvmx_pip_gbl_ctl_cn30xx cn38xx;
353 struct cvmx_pip_gbl_ctl_cn30xx cn38xxp2;
354 struct cvmx_pip_gbl_ctl_cn30xx cn50xx;
355 struct cvmx_pip_gbl_ctl_s cn52xx;
356 struct cvmx_pip_gbl_ctl_s cn52xxp1;
357 struct cvmx_pip_gbl_ctl_s cn56xx;
358 struct cvmx_pip_gbl_ctl_cn56xxp1 {
359 uint64_t reserved_21_63:43;
360 uint64_t ring_en:1;
361 uint64_t reserved_17_19:3;
362 uint64_t ignrs:1;
363 uint64_t vs_wqe:1;
364 uint64_t vs_qos:1;
365 uint64_t l2_mal:1;
366 uint64_t tcp_flag:1;
367 uint64_t l4_len:1;
368 uint64_t l4_chk:1;
369 uint64_t l4_prt:1;
370 uint64_t l4_mal:1;
371 uint64_t reserved_6_7:2;
372 uint64_t ip6_eext:2;
373 uint64_t ip4_opts:1;
374 uint64_t ip_hop:1;
375 uint64_t ip_mal:1;
376 uint64_t ip_chk:1;
377 } cn56xxp1;
378 struct cvmx_pip_gbl_ctl_cn30xx cn58xx;
379 struct cvmx_pip_gbl_ctl_cn30xx cn58xxp1;
380};
381
382union cvmx_pip_hg_pri_qos {
383 uint64_t u64;
384 struct cvmx_pip_hg_pri_qos_s {
385 uint64_t reserved_11_63:53;
386 uint64_t qos:3;
387 uint64_t reserved_6_7:2;
388 uint64_t pri:6;
389 } s;
390 struct cvmx_pip_hg_pri_qos_s cn52xx;
391 struct cvmx_pip_hg_pri_qos_s cn52xxp1;
392 struct cvmx_pip_hg_pri_qos_s cn56xx;
393};
394
395union cvmx_pip_int_en {
396 uint64_t u64;
397 struct cvmx_pip_int_en_s {
398 uint64_t reserved_13_63:51;
399 uint64_t punyerr:1;
400 uint64_t lenerr:1;
401 uint64_t maxerr:1;
402 uint64_t minerr:1;
403 uint64_t beperr:1;
404 uint64_t feperr:1;
405 uint64_t todoovr:1;
406 uint64_t skprunt:1;
407 uint64_t badtag:1;
408 uint64_t prtnxa:1;
409 uint64_t bckprs:1;
410 uint64_t crcerr:1;
411 uint64_t pktdrp:1;
412 } s;
413 struct cvmx_pip_int_en_cn30xx {
414 uint64_t reserved_9_63:55;
415 uint64_t beperr:1;
416 uint64_t feperr:1;
417 uint64_t todoovr:1;
418 uint64_t skprunt:1;
419 uint64_t badtag:1;
420 uint64_t prtnxa:1;
421 uint64_t bckprs:1;
422 uint64_t crcerr:1;
423 uint64_t pktdrp:1;
424 } cn30xx;
425 struct cvmx_pip_int_en_cn30xx cn31xx;
426 struct cvmx_pip_int_en_cn30xx cn38xx;
427 struct cvmx_pip_int_en_cn30xx cn38xxp2;
428 struct cvmx_pip_int_en_cn50xx {
429 uint64_t reserved_12_63:52;
430 uint64_t lenerr:1;
431 uint64_t maxerr:1;
432 uint64_t minerr:1;
433 uint64_t beperr:1;
434 uint64_t feperr:1;
435 uint64_t todoovr:1;
436 uint64_t skprunt:1;
437 uint64_t badtag:1;
438 uint64_t prtnxa:1;
439 uint64_t bckprs:1;
440 uint64_t reserved_1_1:1;
441 uint64_t pktdrp:1;
442 } cn50xx;
443 struct cvmx_pip_int_en_cn52xx {
444 uint64_t reserved_13_63:51;
445 uint64_t punyerr:1;
446 uint64_t lenerr:1;
447 uint64_t maxerr:1;
448 uint64_t minerr:1;
449 uint64_t beperr:1;
450 uint64_t feperr:1;
451 uint64_t todoovr:1;
452 uint64_t skprunt:1;
453 uint64_t badtag:1;
454 uint64_t prtnxa:1;
455 uint64_t bckprs:1;
456 uint64_t reserved_1_1:1;
457 uint64_t pktdrp:1;
458 } cn52xx;
459 struct cvmx_pip_int_en_cn52xx cn52xxp1;
460 struct cvmx_pip_int_en_s cn56xx;
461 struct cvmx_pip_int_en_cn56xxp1 {
462 uint64_t reserved_12_63:52;
463 uint64_t lenerr:1;
464 uint64_t maxerr:1;
465 uint64_t minerr:1;
466 uint64_t beperr:1;
467 uint64_t feperr:1;
468 uint64_t todoovr:1;
469 uint64_t skprunt:1;
470 uint64_t badtag:1;
471 uint64_t prtnxa:1;
472 uint64_t bckprs:1;
473 uint64_t crcerr:1;
474 uint64_t pktdrp:1;
475 } cn56xxp1;
476 struct cvmx_pip_int_en_cn58xx {
477 uint64_t reserved_13_63:51;
478 uint64_t punyerr:1;
479 uint64_t reserved_9_11:3;
480 uint64_t beperr:1;
481 uint64_t feperr:1;
482 uint64_t todoovr:1;
483 uint64_t skprunt:1;
484 uint64_t badtag:1;
485 uint64_t prtnxa:1;
486 uint64_t bckprs:1;
487 uint64_t crcerr:1;
488 uint64_t pktdrp:1;
489 } cn58xx;
490 struct cvmx_pip_int_en_cn30xx cn58xxp1;
491};
492
493union cvmx_pip_int_reg {
494 uint64_t u64;
495 struct cvmx_pip_int_reg_s {
496 uint64_t reserved_13_63:51;
497 uint64_t punyerr:1;
498 uint64_t lenerr:1;
499 uint64_t maxerr:1;
500 uint64_t minerr:1;
501 uint64_t beperr:1;
502 uint64_t feperr:1;
503 uint64_t todoovr:1;
504 uint64_t skprunt:1;
505 uint64_t badtag:1;
506 uint64_t prtnxa:1;
507 uint64_t bckprs:1;
508 uint64_t crcerr:1;
509 uint64_t pktdrp:1;
510 } s;
511 struct cvmx_pip_int_reg_cn30xx {
512 uint64_t reserved_9_63:55;
513 uint64_t beperr:1;
514 uint64_t feperr:1;
515 uint64_t todoovr:1;
516 uint64_t skprunt:1;
517 uint64_t badtag:1;
518 uint64_t prtnxa:1;
519 uint64_t bckprs:1;
520 uint64_t crcerr:1;
521 uint64_t pktdrp:1;
522 } cn30xx;
523 struct cvmx_pip_int_reg_cn30xx cn31xx;
524 struct cvmx_pip_int_reg_cn30xx cn38xx;
525 struct cvmx_pip_int_reg_cn30xx cn38xxp2;
526 struct cvmx_pip_int_reg_cn50xx {
527 uint64_t reserved_12_63:52;
528 uint64_t lenerr:1;
529 uint64_t maxerr:1;
530 uint64_t minerr:1;
531 uint64_t beperr:1;
532 uint64_t feperr:1;
533 uint64_t todoovr:1;
534 uint64_t skprunt:1;
535 uint64_t badtag:1;
536 uint64_t prtnxa:1;
537 uint64_t bckprs:1;
538 uint64_t reserved_1_1:1;
539 uint64_t pktdrp:1;
540 } cn50xx;
541 struct cvmx_pip_int_reg_cn52xx {
542 uint64_t reserved_13_63:51;
543 uint64_t punyerr:1;
544 uint64_t lenerr:1;
545 uint64_t maxerr:1;
546 uint64_t minerr:1;
547 uint64_t beperr:1;
548 uint64_t feperr:1;
549 uint64_t todoovr:1;
550 uint64_t skprunt:1;
551 uint64_t badtag:1;
552 uint64_t prtnxa:1;
553 uint64_t bckprs:1;
554 uint64_t reserved_1_1:1;
555 uint64_t pktdrp:1;
556 } cn52xx;
557 struct cvmx_pip_int_reg_cn52xx cn52xxp1;
558 struct cvmx_pip_int_reg_s cn56xx;
559 struct cvmx_pip_int_reg_cn56xxp1 {
560 uint64_t reserved_12_63:52;
561 uint64_t lenerr:1;
562 uint64_t maxerr:1;
563 uint64_t minerr:1;
564 uint64_t beperr:1;
565 uint64_t feperr:1;
566 uint64_t todoovr:1;
567 uint64_t skprunt:1;
568 uint64_t badtag:1;
569 uint64_t prtnxa:1;
570 uint64_t bckprs:1;
571 uint64_t crcerr:1;
572 uint64_t pktdrp:1;
573 } cn56xxp1;
574 struct cvmx_pip_int_reg_cn58xx {
575 uint64_t reserved_13_63:51;
576 uint64_t punyerr:1;
577 uint64_t reserved_9_11:3;
578 uint64_t beperr:1;
579 uint64_t feperr:1;
580 uint64_t todoovr:1;
581 uint64_t skprunt:1;
582 uint64_t badtag:1;
583 uint64_t prtnxa:1;
584 uint64_t bckprs:1;
585 uint64_t crcerr:1;
586 uint64_t pktdrp:1;
587 } cn58xx;
588 struct cvmx_pip_int_reg_cn30xx cn58xxp1;
589};
590
591union cvmx_pip_ip_offset {
592 uint64_t u64;
593 struct cvmx_pip_ip_offset_s {
594 uint64_t reserved_3_63:61;
595 uint64_t offset:3;
596 } s;
597 struct cvmx_pip_ip_offset_s cn30xx;
598 struct cvmx_pip_ip_offset_s cn31xx;
599 struct cvmx_pip_ip_offset_s cn38xx;
600 struct cvmx_pip_ip_offset_s cn38xxp2;
601 struct cvmx_pip_ip_offset_s cn50xx;
602 struct cvmx_pip_ip_offset_s cn52xx;
603 struct cvmx_pip_ip_offset_s cn52xxp1;
604 struct cvmx_pip_ip_offset_s cn56xx;
605 struct cvmx_pip_ip_offset_s cn56xxp1;
606 struct cvmx_pip_ip_offset_s cn58xx;
607 struct cvmx_pip_ip_offset_s cn58xxp1;
608};
609
610union cvmx_pip_prt_cfgx {
611 uint64_t u64;
612 struct cvmx_pip_prt_cfgx_s {
613 uint64_t reserved_53_63:11;
614 uint64_t pad_len:1;
615 uint64_t vlan_len:1;
616 uint64_t lenerr_en:1;
617 uint64_t maxerr_en:1;
618 uint64_t minerr_en:1;
619 uint64_t grp_wat_47:4;
620 uint64_t qos_wat_47:4;
621 uint64_t reserved_37_39:3;
622 uint64_t rawdrp:1;
623 uint64_t tag_inc:2;
624 uint64_t dyn_rs:1;
625 uint64_t inst_hdr:1;
626 uint64_t grp_wat:4;
627 uint64_t hg_qos:1;
628 uint64_t qos:3;
629 uint64_t qos_wat:4;
630 uint64_t qos_vsel:1;
631 uint64_t qos_vod:1;
632 uint64_t qos_diff:1;
633 uint64_t qos_vlan:1;
634 uint64_t reserved_13_15:3;
635 uint64_t crc_en:1;
636 uint64_t higig_en:1;
637 uint64_t dsa_en:1;
638 uint64_t mode:2;
639 uint64_t reserved_7_7:1;
640 uint64_t skip:7;
641 } s;
642 struct cvmx_pip_prt_cfgx_cn30xx {
643 uint64_t reserved_37_63:27;
644 uint64_t rawdrp:1;
645 uint64_t tag_inc:2;
646 uint64_t dyn_rs:1;
647 uint64_t inst_hdr:1;
648 uint64_t grp_wat:4;
649 uint64_t reserved_27_27:1;
650 uint64_t qos:3;
651 uint64_t qos_wat:4;
652 uint64_t reserved_18_19:2;
653 uint64_t qos_diff:1;
654 uint64_t qos_vlan:1;
655 uint64_t reserved_10_15:6;
656 uint64_t mode:2;
657 uint64_t reserved_7_7:1;
658 uint64_t skip:7;
659 } cn30xx;
660 struct cvmx_pip_prt_cfgx_cn30xx cn31xx;
661 struct cvmx_pip_prt_cfgx_cn38xx {
662 uint64_t reserved_37_63:27;
663 uint64_t rawdrp:1;
664 uint64_t tag_inc:2;
665 uint64_t dyn_rs:1;
666 uint64_t inst_hdr:1;
667 uint64_t grp_wat:4;
668 uint64_t reserved_27_27:1;
669 uint64_t qos:3;
670 uint64_t qos_wat:4;
671 uint64_t reserved_18_19:2;
672 uint64_t qos_diff:1;
673 uint64_t qos_vlan:1;
674 uint64_t reserved_13_15:3;
675 uint64_t crc_en:1;
676 uint64_t reserved_10_11:2;
677 uint64_t mode:2;
678 uint64_t reserved_7_7:1;
679 uint64_t skip:7;
680 } cn38xx;
681 struct cvmx_pip_prt_cfgx_cn38xx cn38xxp2;
682 struct cvmx_pip_prt_cfgx_cn50xx {
683 uint64_t reserved_53_63:11;
684 uint64_t pad_len:1;
685 uint64_t vlan_len:1;
686 uint64_t lenerr_en:1;
687 uint64_t maxerr_en:1;
688 uint64_t minerr_en:1;
689 uint64_t grp_wat_47:4;
690 uint64_t qos_wat_47:4;
691 uint64_t reserved_37_39:3;
692 uint64_t rawdrp:1;
693 uint64_t tag_inc:2;
694 uint64_t dyn_rs:1;
695 uint64_t inst_hdr:1;
696 uint64_t grp_wat:4;
697 uint64_t reserved_27_27:1;
698 uint64_t qos:3;
699 uint64_t qos_wat:4;
700 uint64_t reserved_19_19:1;
701 uint64_t qos_vod:1;
702 uint64_t qos_diff:1;
703 uint64_t qos_vlan:1;
704 uint64_t reserved_13_15:3;
705 uint64_t crc_en:1;
706 uint64_t reserved_10_11:2;
707 uint64_t mode:2;
708 uint64_t reserved_7_7:1;
709 uint64_t skip:7;
710 } cn50xx;
711 struct cvmx_pip_prt_cfgx_s cn52xx;
712 struct cvmx_pip_prt_cfgx_s cn52xxp1;
713 struct cvmx_pip_prt_cfgx_s cn56xx;
714 struct cvmx_pip_prt_cfgx_cn50xx cn56xxp1;
715 struct cvmx_pip_prt_cfgx_cn58xx {
716 uint64_t reserved_37_63:27;
717 uint64_t rawdrp:1;
718 uint64_t tag_inc:2;
719 uint64_t dyn_rs:1;
720 uint64_t inst_hdr:1;
721 uint64_t grp_wat:4;
722 uint64_t reserved_27_27:1;
723 uint64_t qos:3;
724 uint64_t qos_wat:4;
725 uint64_t reserved_19_19:1;
726 uint64_t qos_vod:1;
727 uint64_t qos_diff:1;
728 uint64_t qos_vlan:1;
729 uint64_t reserved_13_15:3;
730 uint64_t crc_en:1;
731 uint64_t reserved_10_11:2;
732 uint64_t mode:2;
733 uint64_t reserved_7_7:1;
734 uint64_t skip:7;
735 } cn58xx;
736 struct cvmx_pip_prt_cfgx_cn58xx cn58xxp1;
737};
738
739union cvmx_pip_prt_tagx {
740 uint64_t u64;
741 struct cvmx_pip_prt_tagx_s {
742 uint64_t reserved_40_63:24;
743 uint64_t grptagbase:4;
744 uint64_t grptagmask:4;
745 uint64_t grptag:1;
746 uint64_t grptag_mskip:1;
747 uint64_t tag_mode:2;
748 uint64_t inc_vs:2;
749 uint64_t inc_vlan:1;
750 uint64_t inc_prt_flag:1;
751 uint64_t ip6_dprt_flag:1;
752 uint64_t ip4_dprt_flag:1;
753 uint64_t ip6_sprt_flag:1;
754 uint64_t ip4_sprt_flag:1;
755 uint64_t ip6_nxth_flag:1;
756 uint64_t ip4_pctl_flag:1;
757 uint64_t ip6_dst_flag:1;
758 uint64_t ip4_dst_flag:1;
759 uint64_t ip6_src_flag:1;
760 uint64_t ip4_src_flag:1;
761 uint64_t tcp6_tag_type:2;
762 uint64_t tcp4_tag_type:2;
763 uint64_t ip6_tag_type:2;
764 uint64_t ip4_tag_type:2;
765 uint64_t non_tag_type:2;
766 uint64_t grp:4;
767 } s;
768 struct cvmx_pip_prt_tagx_cn30xx {
769 uint64_t reserved_40_63:24;
770 uint64_t grptagbase:4;
771 uint64_t grptagmask:4;
772 uint64_t grptag:1;
773 uint64_t reserved_30_30:1;
774 uint64_t tag_mode:2;
775 uint64_t inc_vs:2;
776 uint64_t inc_vlan:1;
777 uint64_t inc_prt_flag:1;
778 uint64_t ip6_dprt_flag:1;
779 uint64_t ip4_dprt_flag:1;
780 uint64_t ip6_sprt_flag:1;
781 uint64_t ip4_sprt_flag:1;
782 uint64_t ip6_nxth_flag:1;
783 uint64_t ip4_pctl_flag:1;
784 uint64_t ip6_dst_flag:1;
785 uint64_t ip4_dst_flag:1;
786 uint64_t ip6_src_flag:1;
787 uint64_t ip4_src_flag:1;
788 uint64_t tcp6_tag_type:2;
789 uint64_t tcp4_tag_type:2;
790 uint64_t ip6_tag_type:2;
791 uint64_t ip4_tag_type:2;
792 uint64_t non_tag_type:2;
793 uint64_t grp:4;
794 } cn30xx;
795 struct cvmx_pip_prt_tagx_cn30xx cn31xx;
796 struct cvmx_pip_prt_tagx_cn30xx cn38xx;
797 struct cvmx_pip_prt_tagx_cn30xx cn38xxp2;
798 struct cvmx_pip_prt_tagx_s cn50xx;
799 struct cvmx_pip_prt_tagx_s cn52xx;
800 struct cvmx_pip_prt_tagx_s cn52xxp1;
801 struct cvmx_pip_prt_tagx_s cn56xx;
802 struct cvmx_pip_prt_tagx_s cn56xxp1;
803 struct cvmx_pip_prt_tagx_cn30xx cn58xx;
804 struct cvmx_pip_prt_tagx_cn30xx cn58xxp1;
805};
806
807union cvmx_pip_qos_diffx {
808 uint64_t u64;
809 struct cvmx_pip_qos_diffx_s {
810 uint64_t reserved_3_63:61;
811 uint64_t qos:3;
812 } s;
813 struct cvmx_pip_qos_diffx_s cn30xx;
814 struct cvmx_pip_qos_diffx_s cn31xx;
815 struct cvmx_pip_qos_diffx_s cn38xx;
816 struct cvmx_pip_qos_diffx_s cn38xxp2;
817 struct cvmx_pip_qos_diffx_s cn50xx;
818 struct cvmx_pip_qos_diffx_s cn52xx;
819 struct cvmx_pip_qos_diffx_s cn52xxp1;
820 struct cvmx_pip_qos_diffx_s cn56xx;
821 struct cvmx_pip_qos_diffx_s cn56xxp1;
822 struct cvmx_pip_qos_diffx_s cn58xx;
823 struct cvmx_pip_qos_diffx_s cn58xxp1;
824};
825
826union cvmx_pip_qos_vlanx {
827 uint64_t u64;
828 struct cvmx_pip_qos_vlanx_s {
829 uint64_t reserved_7_63:57;
830 uint64_t qos1:3;
831 uint64_t reserved_3_3:1;
832 uint64_t qos:3;
833 } s;
834 struct cvmx_pip_qos_vlanx_cn30xx {
835 uint64_t reserved_3_63:61;
836 uint64_t qos:3;
837 } cn30xx;
838 struct cvmx_pip_qos_vlanx_cn30xx cn31xx;
839 struct cvmx_pip_qos_vlanx_cn30xx cn38xx;
840 struct cvmx_pip_qos_vlanx_cn30xx cn38xxp2;
841 struct cvmx_pip_qos_vlanx_cn30xx cn50xx;
842 struct cvmx_pip_qos_vlanx_s cn52xx;
843 struct cvmx_pip_qos_vlanx_s cn52xxp1;
844 struct cvmx_pip_qos_vlanx_s cn56xx;
845 struct cvmx_pip_qos_vlanx_cn30xx cn56xxp1;
846 struct cvmx_pip_qos_vlanx_cn30xx cn58xx;
847 struct cvmx_pip_qos_vlanx_cn30xx cn58xxp1;
848};
849
850union cvmx_pip_qos_watchx {
851 uint64_t u64;
852 struct cvmx_pip_qos_watchx_s {
853 uint64_t reserved_48_63:16;
854 uint64_t mask:16;
855 uint64_t reserved_28_31:4;
856 uint64_t grp:4;
857 uint64_t reserved_23_23:1;
858 uint64_t qos:3;
859 uint64_t reserved_19_19:1;
860 uint64_t match_type:3;
861 uint64_t match_value:16;
862 } s;
863 struct cvmx_pip_qos_watchx_cn30xx {
864 uint64_t reserved_48_63:16;
865 uint64_t mask:16;
866 uint64_t reserved_28_31:4;
867 uint64_t grp:4;
868 uint64_t reserved_23_23:1;
869 uint64_t qos:3;
870 uint64_t reserved_18_19:2;
871 uint64_t match_type:2;
872 uint64_t match_value:16;
873 } cn30xx;
874 struct cvmx_pip_qos_watchx_cn30xx cn31xx;
875 struct cvmx_pip_qos_watchx_cn30xx cn38xx;
876 struct cvmx_pip_qos_watchx_cn30xx cn38xxp2;
877 struct cvmx_pip_qos_watchx_s cn50xx;
878 struct cvmx_pip_qos_watchx_s cn52xx;
879 struct cvmx_pip_qos_watchx_s cn52xxp1;
880 struct cvmx_pip_qos_watchx_s cn56xx;
881 struct cvmx_pip_qos_watchx_s cn56xxp1;
882 struct cvmx_pip_qos_watchx_cn30xx cn58xx;
883 struct cvmx_pip_qos_watchx_cn30xx cn58xxp1;
884};
885
886union cvmx_pip_raw_word {
887 uint64_t u64;
888 struct cvmx_pip_raw_word_s {
889 uint64_t reserved_56_63:8;
890 uint64_t word:56;
891 } s;
892 struct cvmx_pip_raw_word_s cn30xx;
893 struct cvmx_pip_raw_word_s cn31xx;
894 struct cvmx_pip_raw_word_s cn38xx;
895 struct cvmx_pip_raw_word_s cn38xxp2;
896 struct cvmx_pip_raw_word_s cn50xx;
897 struct cvmx_pip_raw_word_s cn52xx;
898 struct cvmx_pip_raw_word_s cn52xxp1;
899 struct cvmx_pip_raw_word_s cn56xx;
900 struct cvmx_pip_raw_word_s cn56xxp1;
901 struct cvmx_pip_raw_word_s cn58xx;
902 struct cvmx_pip_raw_word_s cn58xxp1;
903};
904
905union cvmx_pip_sft_rst {
906 uint64_t u64;
907 struct cvmx_pip_sft_rst_s {
908 uint64_t reserved_1_63:63;
909 uint64_t rst:1;
910 } s;
911 struct cvmx_pip_sft_rst_s cn30xx;
912 struct cvmx_pip_sft_rst_s cn31xx;
913 struct cvmx_pip_sft_rst_s cn38xx;
914 struct cvmx_pip_sft_rst_s cn50xx;
915 struct cvmx_pip_sft_rst_s cn52xx;
916 struct cvmx_pip_sft_rst_s cn52xxp1;
917 struct cvmx_pip_sft_rst_s cn56xx;
918 struct cvmx_pip_sft_rst_s cn56xxp1;
919 struct cvmx_pip_sft_rst_s cn58xx;
920 struct cvmx_pip_sft_rst_s cn58xxp1;
921};
922
923union cvmx_pip_stat0_prtx {
924 uint64_t u64;
925 struct cvmx_pip_stat0_prtx_s {
926 uint64_t drp_pkts:32;
927 uint64_t drp_octs:32;
928 } s;
929 struct cvmx_pip_stat0_prtx_s cn30xx;
930 struct cvmx_pip_stat0_prtx_s cn31xx;
931 struct cvmx_pip_stat0_prtx_s cn38xx;
932 struct cvmx_pip_stat0_prtx_s cn38xxp2;
933 struct cvmx_pip_stat0_prtx_s cn50xx;
934 struct cvmx_pip_stat0_prtx_s cn52xx;
935 struct cvmx_pip_stat0_prtx_s cn52xxp1;
936 struct cvmx_pip_stat0_prtx_s cn56xx;
937 struct cvmx_pip_stat0_prtx_s cn56xxp1;
938 struct cvmx_pip_stat0_prtx_s cn58xx;
939 struct cvmx_pip_stat0_prtx_s cn58xxp1;
940};
941
942union cvmx_pip_stat1_prtx {
943 uint64_t u64;
944 struct cvmx_pip_stat1_prtx_s {
945 uint64_t reserved_48_63:16;
946 uint64_t octs:48;
947 } s;
948 struct cvmx_pip_stat1_prtx_s cn30xx;
949 struct cvmx_pip_stat1_prtx_s cn31xx;
950 struct cvmx_pip_stat1_prtx_s cn38xx;
951 struct cvmx_pip_stat1_prtx_s cn38xxp2;
952 struct cvmx_pip_stat1_prtx_s cn50xx;
953 struct cvmx_pip_stat1_prtx_s cn52xx;
954 struct cvmx_pip_stat1_prtx_s cn52xxp1;
955 struct cvmx_pip_stat1_prtx_s cn56xx;
956 struct cvmx_pip_stat1_prtx_s cn56xxp1;
957 struct cvmx_pip_stat1_prtx_s cn58xx;
958 struct cvmx_pip_stat1_prtx_s cn58xxp1;
959};
960
961union cvmx_pip_stat2_prtx {
962 uint64_t u64;
963 struct cvmx_pip_stat2_prtx_s {
964 uint64_t pkts:32;
965 uint64_t raw:32;
966 } s;
967 struct cvmx_pip_stat2_prtx_s cn30xx;
968 struct cvmx_pip_stat2_prtx_s cn31xx;
969 struct cvmx_pip_stat2_prtx_s cn38xx;
970 struct cvmx_pip_stat2_prtx_s cn38xxp2;
971 struct cvmx_pip_stat2_prtx_s cn50xx;
972 struct cvmx_pip_stat2_prtx_s cn52xx;
973 struct cvmx_pip_stat2_prtx_s cn52xxp1;
974 struct cvmx_pip_stat2_prtx_s cn56xx;
975 struct cvmx_pip_stat2_prtx_s cn56xxp1;
976 struct cvmx_pip_stat2_prtx_s cn58xx;
977 struct cvmx_pip_stat2_prtx_s cn58xxp1;
978};
979
980union cvmx_pip_stat3_prtx {
981 uint64_t u64;
982 struct cvmx_pip_stat3_prtx_s {
983 uint64_t bcst:32;
984 uint64_t mcst:32;
985 } s;
986 struct cvmx_pip_stat3_prtx_s cn30xx;
987 struct cvmx_pip_stat3_prtx_s cn31xx;
988 struct cvmx_pip_stat3_prtx_s cn38xx;
989 struct cvmx_pip_stat3_prtx_s cn38xxp2;
990 struct cvmx_pip_stat3_prtx_s cn50xx;
991 struct cvmx_pip_stat3_prtx_s cn52xx;
992 struct cvmx_pip_stat3_prtx_s cn52xxp1;
993 struct cvmx_pip_stat3_prtx_s cn56xx;
994 struct cvmx_pip_stat3_prtx_s cn56xxp1;
995 struct cvmx_pip_stat3_prtx_s cn58xx;
996 struct cvmx_pip_stat3_prtx_s cn58xxp1;
997};
998
999union cvmx_pip_stat4_prtx {
1000 uint64_t u64;
1001 struct cvmx_pip_stat4_prtx_s {
1002 uint64_t h65to127:32;
1003 uint64_t h64:32;
1004 } s;
1005 struct cvmx_pip_stat4_prtx_s cn30xx;
1006 struct cvmx_pip_stat4_prtx_s cn31xx;
1007 struct cvmx_pip_stat4_prtx_s cn38xx;
1008 struct cvmx_pip_stat4_prtx_s cn38xxp2;
1009 struct cvmx_pip_stat4_prtx_s cn50xx;
1010 struct cvmx_pip_stat4_prtx_s cn52xx;
1011 struct cvmx_pip_stat4_prtx_s cn52xxp1;
1012 struct cvmx_pip_stat4_prtx_s cn56xx;
1013 struct cvmx_pip_stat4_prtx_s cn56xxp1;
1014 struct cvmx_pip_stat4_prtx_s cn58xx;
1015 struct cvmx_pip_stat4_prtx_s cn58xxp1;
1016};
1017
1018union cvmx_pip_stat5_prtx {
1019 uint64_t u64;
1020 struct cvmx_pip_stat5_prtx_s {
1021 uint64_t h256to511:32;
1022 uint64_t h128to255:32;
1023 } s;
1024 struct cvmx_pip_stat5_prtx_s cn30xx;
1025 struct cvmx_pip_stat5_prtx_s cn31xx;
1026 struct cvmx_pip_stat5_prtx_s cn38xx;
1027 struct cvmx_pip_stat5_prtx_s cn38xxp2;
1028 struct cvmx_pip_stat5_prtx_s cn50xx;
1029 struct cvmx_pip_stat5_prtx_s cn52xx;
1030 struct cvmx_pip_stat5_prtx_s cn52xxp1;
1031 struct cvmx_pip_stat5_prtx_s cn56xx;
1032 struct cvmx_pip_stat5_prtx_s cn56xxp1;
1033 struct cvmx_pip_stat5_prtx_s cn58xx;
1034 struct cvmx_pip_stat5_prtx_s cn58xxp1;
1035};
1036
1037union cvmx_pip_stat6_prtx {
1038 uint64_t u64;
1039 struct cvmx_pip_stat6_prtx_s {
1040 uint64_t h1024to1518:32;
1041 uint64_t h512to1023:32;
1042 } s;
1043 struct cvmx_pip_stat6_prtx_s cn30xx;
1044 struct cvmx_pip_stat6_prtx_s cn31xx;
1045 struct cvmx_pip_stat6_prtx_s cn38xx;
1046 struct cvmx_pip_stat6_prtx_s cn38xxp2;
1047 struct cvmx_pip_stat6_prtx_s cn50xx;
1048 struct cvmx_pip_stat6_prtx_s cn52xx;
1049 struct cvmx_pip_stat6_prtx_s cn52xxp1;
1050 struct cvmx_pip_stat6_prtx_s cn56xx;
1051 struct cvmx_pip_stat6_prtx_s cn56xxp1;
1052 struct cvmx_pip_stat6_prtx_s cn58xx;
1053 struct cvmx_pip_stat6_prtx_s cn58xxp1;
1054};
1055
1056union cvmx_pip_stat7_prtx {
1057 uint64_t u64;
1058 struct cvmx_pip_stat7_prtx_s {
1059 uint64_t fcs:32;
1060 uint64_t h1519:32;
1061 } s;
1062 struct cvmx_pip_stat7_prtx_s cn30xx;
1063 struct cvmx_pip_stat7_prtx_s cn31xx;
1064 struct cvmx_pip_stat7_prtx_s cn38xx;
1065 struct cvmx_pip_stat7_prtx_s cn38xxp2;
1066 struct cvmx_pip_stat7_prtx_s cn50xx;
1067 struct cvmx_pip_stat7_prtx_s cn52xx;
1068 struct cvmx_pip_stat7_prtx_s cn52xxp1;
1069 struct cvmx_pip_stat7_prtx_s cn56xx;
1070 struct cvmx_pip_stat7_prtx_s cn56xxp1;
1071 struct cvmx_pip_stat7_prtx_s cn58xx;
1072 struct cvmx_pip_stat7_prtx_s cn58xxp1;
1073};
1074
1075union cvmx_pip_stat8_prtx {
1076 uint64_t u64;
1077 struct cvmx_pip_stat8_prtx_s {
1078 uint64_t frag:32;
1079 uint64_t undersz:32;
1080 } s;
1081 struct cvmx_pip_stat8_prtx_s cn30xx;
1082 struct cvmx_pip_stat8_prtx_s cn31xx;
1083 struct cvmx_pip_stat8_prtx_s cn38xx;
1084 struct cvmx_pip_stat8_prtx_s cn38xxp2;
1085 struct cvmx_pip_stat8_prtx_s cn50xx;
1086 struct cvmx_pip_stat8_prtx_s cn52xx;
1087 struct cvmx_pip_stat8_prtx_s cn52xxp1;
1088 struct cvmx_pip_stat8_prtx_s cn56xx;
1089 struct cvmx_pip_stat8_prtx_s cn56xxp1;
1090 struct cvmx_pip_stat8_prtx_s cn58xx;
1091 struct cvmx_pip_stat8_prtx_s cn58xxp1;
1092};
1093
1094union cvmx_pip_stat9_prtx {
1095 uint64_t u64;
1096 struct cvmx_pip_stat9_prtx_s {
1097 uint64_t jabber:32;
1098 uint64_t oversz:32;
1099 } s;
1100 struct cvmx_pip_stat9_prtx_s cn30xx;
1101 struct cvmx_pip_stat9_prtx_s cn31xx;
1102 struct cvmx_pip_stat9_prtx_s cn38xx;
1103 struct cvmx_pip_stat9_prtx_s cn38xxp2;
1104 struct cvmx_pip_stat9_prtx_s cn50xx;
1105 struct cvmx_pip_stat9_prtx_s cn52xx;
1106 struct cvmx_pip_stat9_prtx_s cn52xxp1;
1107 struct cvmx_pip_stat9_prtx_s cn56xx;
1108 struct cvmx_pip_stat9_prtx_s cn56xxp1;
1109 struct cvmx_pip_stat9_prtx_s cn58xx;
1110 struct cvmx_pip_stat9_prtx_s cn58xxp1;
1111};
1112
1113union cvmx_pip_stat_ctl {
1114 uint64_t u64;
1115 struct cvmx_pip_stat_ctl_s {
1116 uint64_t reserved_1_63:63;
1117 uint64_t rdclr:1;
1118 } s;
1119 struct cvmx_pip_stat_ctl_s cn30xx;
1120 struct cvmx_pip_stat_ctl_s cn31xx;
1121 struct cvmx_pip_stat_ctl_s cn38xx;
1122 struct cvmx_pip_stat_ctl_s cn38xxp2;
1123 struct cvmx_pip_stat_ctl_s cn50xx;
1124 struct cvmx_pip_stat_ctl_s cn52xx;
1125 struct cvmx_pip_stat_ctl_s cn52xxp1;
1126 struct cvmx_pip_stat_ctl_s cn56xx;
1127 struct cvmx_pip_stat_ctl_s cn56xxp1;
1128 struct cvmx_pip_stat_ctl_s cn58xx;
1129 struct cvmx_pip_stat_ctl_s cn58xxp1;
1130};
1131
1132union cvmx_pip_stat_inb_errsx {
1133 uint64_t u64;
1134 struct cvmx_pip_stat_inb_errsx_s {
1135 uint64_t reserved_16_63:48;
1136 uint64_t errs:16;
1137 } s;
1138 struct cvmx_pip_stat_inb_errsx_s cn30xx;
1139 struct cvmx_pip_stat_inb_errsx_s cn31xx;
1140 struct cvmx_pip_stat_inb_errsx_s cn38xx;
1141 struct cvmx_pip_stat_inb_errsx_s cn38xxp2;
1142 struct cvmx_pip_stat_inb_errsx_s cn50xx;
1143 struct cvmx_pip_stat_inb_errsx_s cn52xx;
1144 struct cvmx_pip_stat_inb_errsx_s cn52xxp1;
1145 struct cvmx_pip_stat_inb_errsx_s cn56xx;
1146 struct cvmx_pip_stat_inb_errsx_s cn56xxp1;
1147 struct cvmx_pip_stat_inb_errsx_s cn58xx;
1148 struct cvmx_pip_stat_inb_errsx_s cn58xxp1;
1149};
1150
1151union cvmx_pip_stat_inb_octsx {
1152 uint64_t u64;
1153 struct cvmx_pip_stat_inb_octsx_s {
1154 uint64_t reserved_48_63:16;
1155 uint64_t octs:48;
1156 } s;
1157 struct cvmx_pip_stat_inb_octsx_s cn30xx;
1158 struct cvmx_pip_stat_inb_octsx_s cn31xx;
1159 struct cvmx_pip_stat_inb_octsx_s cn38xx;
1160 struct cvmx_pip_stat_inb_octsx_s cn38xxp2;
1161 struct cvmx_pip_stat_inb_octsx_s cn50xx;
1162 struct cvmx_pip_stat_inb_octsx_s cn52xx;
1163 struct cvmx_pip_stat_inb_octsx_s cn52xxp1;
1164 struct cvmx_pip_stat_inb_octsx_s cn56xx;
1165 struct cvmx_pip_stat_inb_octsx_s cn56xxp1;
1166 struct cvmx_pip_stat_inb_octsx_s cn58xx;
1167 struct cvmx_pip_stat_inb_octsx_s cn58xxp1;
1168};
1169
1170union cvmx_pip_stat_inb_pktsx {
1171 uint64_t u64;
1172 struct cvmx_pip_stat_inb_pktsx_s {
1173 uint64_t reserved_32_63:32;
1174 uint64_t pkts:32;
1175 } s;
1176 struct cvmx_pip_stat_inb_pktsx_s cn30xx;
1177 struct cvmx_pip_stat_inb_pktsx_s cn31xx;
1178 struct cvmx_pip_stat_inb_pktsx_s cn38xx;
1179 struct cvmx_pip_stat_inb_pktsx_s cn38xxp2;
1180 struct cvmx_pip_stat_inb_pktsx_s cn50xx;
1181 struct cvmx_pip_stat_inb_pktsx_s cn52xx;
1182 struct cvmx_pip_stat_inb_pktsx_s cn52xxp1;
1183 struct cvmx_pip_stat_inb_pktsx_s cn56xx;
1184 struct cvmx_pip_stat_inb_pktsx_s cn56xxp1;
1185 struct cvmx_pip_stat_inb_pktsx_s cn58xx;
1186 struct cvmx_pip_stat_inb_pktsx_s cn58xxp1;
1187};
1188
1189union cvmx_pip_tag_incx {
1190 uint64_t u64;
1191 struct cvmx_pip_tag_incx_s {
1192 uint64_t reserved_8_63:56;
1193 uint64_t en:8;
1194 } s;
1195 struct cvmx_pip_tag_incx_s cn30xx;
1196 struct cvmx_pip_tag_incx_s cn31xx;
1197 struct cvmx_pip_tag_incx_s cn38xx;
1198 struct cvmx_pip_tag_incx_s cn38xxp2;
1199 struct cvmx_pip_tag_incx_s cn50xx;
1200 struct cvmx_pip_tag_incx_s cn52xx;
1201 struct cvmx_pip_tag_incx_s cn52xxp1;
1202 struct cvmx_pip_tag_incx_s cn56xx;
1203 struct cvmx_pip_tag_incx_s cn56xxp1;
1204 struct cvmx_pip_tag_incx_s cn58xx;
1205 struct cvmx_pip_tag_incx_s cn58xxp1;
1206};
1207
1208union cvmx_pip_tag_mask {
1209 uint64_t u64;
1210 struct cvmx_pip_tag_mask_s {
1211 uint64_t reserved_16_63:48;
1212 uint64_t mask:16;
1213 } s;
1214 struct cvmx_pip_tag_mask_s cn30xx;
1215 struct cvmx_pip_tag_mask_s cn31xx;
1216 struct cvmx_pip_tag_mask_s cn38xx;
1217 struct cvmx_pip_tag_mask_s cn38xxp2;
1218 struct cvmx_pip_tag_mask_s cn50xx;
1219 struct cvmx_pip_tag_mask_s cn52xx;
1220 struct cvmx_pip_tag_mask_s cn52xxp1;
1221 struct cvmx_pip_tag_mask_s cn56xx;
1222 struct cvmx_pip_tag_mask_s cn56xxp1;
1223 struct cvmx_pip_tag_mask_s cn58xx;
1224 struct cvmx_pip_tag_mask_s cn58xxp1;
1225};
1226
1227union cvmx_pip_tag_secret {
1228 uint64_t u64;
1229 struct cvmx_pip_tag_secret_s {
1230 uint64_t reserved_32_63:32;
1231 uint64_t dst:16;
1232 uint64_t src:16;
1233 } s;
1234 struct cvmx_pip_tag_secret_s cn30xx;
1235 struct cvmx_pip_tag_secret_s cn31xx;
1236 struct cvmx_pip_tag_secret_s cn38xx;
1237 struct cvmx_pip_tag_secret_s cn38xxp2;
1238 struct cvmx_pip_tag_secret_s cn50xx;
1239 struct cvmx_pip_tag_secret_s cn52xx;
1240 struct cvmx_pip_tag_secret_s cn52xxp1;
1241 struct cvmx_pip_tag_secret_s cn56xx;
1242 struct cvmx_pip_tag_secret_s cn56xxp1;
1243 struct cvmx_pip_tag_secret_s cn58xx;
1244 struct cvmx_pip_tag_secret_s cn58xxp1;
1245};
1246
1247union cvmx_pip_todo_entry {
1248 uint64_t u64;
1249 struct cvmx_pip_todo_entry_s {
1250 uint64_t val:1;
1251 uint64_t reserved_62_62:1;
1252 uint64_t entry:62;
1253 } s;
1254 struct cvmx_pip_todo_entry_s cn30xx;
1255 struct cvmx_pip_todo_entry_s cn31xx;
1256 struct cvmx_pip_todo_entry_s cn38xx;
1257 struct cvmx_pip_todo_entry_s cn38xxp2;
1258 struct cvmx_pip_todo_entry_s cn50xx;
1259 struct cvmx_pip_todo_entry_s cn52xx;
1260 struct cvmx_pip_todo_entry_s cn52xxp1;
1261 struct cvmx_pip_todo_entry_s cn56xx;
1262 struct cvmx_pip_todo_entry_s cn56xxp1;
1263 struct cvmx_pip_todo_entry_s cn58xx;
1264 struct cvmx_pip_todo_entry_s cn58xxp1;
1265};
1266
1267#endif
diff --git a/drivers/staging/octeon/cvmx-pip.h b/drivers/staging/octeon/cvmx-pip.h
new file mode 100644
index 000000000000..78dbce8f2c5e
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pip.h
@@ -0,0 +1,524 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Interface to the hardware Packet Input Processing unit.
30 *
31 */
32
33#ifndef __CVMX_PIP_H__
34#define __CVMX_PIP_H__
35
36#include "cvmx-wqe.h"
37#include "cvmx-fpa.h"
38#include "cvmx-pip-defs.h"
39
40#define CVMX_PIP_NUM_INPUT_PORTS 40
41#define CVMX_PIP_NUM_WATCHERS 4
42
43/*
44 * Encodes the different error and exception codes
45 */
46typedef enum {
47 CVMX_PIP_L4_NO_ERR = 0ull,
48 /*
49 * 1 = TCP (UDP) packet not long enough to cover TCP (UDP)
50 * header
51 */
52 CVMX_PIP_L4_MAL_ERR = 1ull,
53 /* 2 = TCP/UDP checksum failure */
54 CVMX_PIP_CHK_ERR = 2ull,
55 /*
56 * 3 = TCP/UDP length check (TCP/UDP length does not match IP
57 * length).
58 */
59 CVMX_PIP_L4_LENGTH_ERR = 3ull,
60 /* 4 = illegal TCP/UDP port (either source or dest port is zero) */
61 CVMX_PIP_BAD_PRT_ERR = 4ull,
62 /* 8 = TCP flags = FIN only */
63 CVMX_PIP_TCP_FLG8_ERR = 8ull,
64 /* 9 = TCP flags = 0 */
65 CVMX_PIP_TCP_FLG9_ERR = 9ull,
66 /* 10 = TCP flags = FIN+RST+* */
67 CVMX_PIP_TCP_FLG10_ERR = 10ull,
68 /* 11 = TCP flags = SYN+URG+* */
69 CVMX_PIP_TCP_FLG11_ERR = 11ull,
70 /* 12 = TCP flags = SYN+RST+* */
71 CVMX_PIP_TCP_FLG12_ERR = 12ull,
72 /* 13 = TCP flags = SYN+FIN+* */
73 CVMX_PIP_TCP_FLG13_ERR = 13ull
74} cvmx_pip_l4_err_t;
75
76typedef enum {
77
78 CVMX_PIP_IP_NO_ERR = 0ull,
79 /* 1 = not IPv4 or IPv6 */
80 CVMX_PIP_NOT_IP = 1ull,
81 /* 2 = IPv4 header checksum violation */
82 CVMX_PIP_IPV4_HDR_CHK = 2ull,
83 /* 3 = malformed (packet not long enough to cover IP hdr) */
84 CVMX_PIP_IP_MAL_HDR = 3ull,
85 /* 4 = malformed (packet not long enough to cover len in IP hdr) */
86 CVMX_PIP_IP_MAL_PKT = 4ull,
87 /* 5 = TTL / hop count equal zero */
88 CVMX_PIP_TTL_HOP = 5ull,
89 /* 6 = IPv4 options / IPv6 early extension headers */
90 CVMX_PIP_OPTS = 6ull
91} cvmx_pip_ip_exc_t;
92
93/**
94 * NOTES
95 * late collision (data received before collision)
96 * late collisions cannot be detected by the receiver
97 * they would appear as JAM bits which would appear as bad FCS
98 * or carrier extend error which is CVMX_PIP_EXTEND_ERR
99 */
100typedef enum {
101 /* No error */
102 CVMX_PIP_RX_NO_ERR = 0ull,
103 /* RGM+SPI 1 = partially received packet (buffering/bandwidth
104 * not adequate) */
105 CVMX_PIP_PARTIAL_ERR = 1ull,
106 /* RGM+SPI 2 = receive packet too large and truncated */
107 CVMX_PIP_JABBER_ERR = 2ull,
108 /*
109 * RGM 3 = max frame error (pkt len > max frame len) (with FCS
110 * error)
111 */
112 CVMX_PIP_OVER_FCS_ERR = 3ull,
113 /* RGM+SPI 4 = max frame error (pkt len > max frame len) */
114 CVMX_PIP_OVER_ERR = 4ull,
115 /*
116 * RGM 5 = nibble error (data not byte multiple - 100M and 10M
117 * only)
118 */
119 CVMX_PIP_ALIGN_ERR = 5ull,
120 /*
121 * RGM 6 = min frame error (pkt len < min frame len) (with FCS
122 * error)
123 */
124 CVMX_PIP_UNDER_FCS_ERR = 6ull,
125 /* RGM 7 = FCS error */
126 CVMX_PIP_GMX_FCS_ERR = 7ull,
127 /* RGM+SPI 8 = min frame error (pkt len < min frame len) */
128 CVMX_PIP_UNDER_ERR = 8ull,
129 /* RGM 9 = Frame carrier extend error */
130 CVMX_PIP_EXTEND_ERR = 9ull,
131 /*
132 * RGM 10 = length mismatch (len did not match len in L2
133 * length/type)
134 */
135 CVMX_PIP_LENGTH_ERR = 10ull,
136 /* RGM 11 = Frame error (some or all data bits marked err) */
137 CVMX_PIP_DAT_ERR = 11ull,
138 /* SPI 11 = DIP4 error */
139 CVMX_PIP_DIP_ERR = 11ull,
140 /*
141 * RGM 12 = packet was not large enough to pass the skipper -
142 * no inspection could occur.
143 */
144 CVMX_PIP_SKIP_ERR = 12ull,
145 /*
146 * RGM 13 = studder error (data not repeated - 100M and 10M
147 * only)
148 */
149 CVMX_PIP_NIBBLE_ERR = 13ull,
150 /* RGM+SPI 16 = FCS error */
151 CVMX_PIP_PIP_FCS = 16L,
152 /*
153 * RGM+SPI+PCI 17 = packet was not large enough to pass the
154 * skipper - no inspection could occur.
155 */
156 CVMX_PIP_PIP_SKIP_ERR = 17L,
157 /*
158 * RGM+SPI+PCI 18 = malformed l2 (packet not long enough to
159 * cover L2 hdr).
160 */
161 CVMX_PIP_PIP_L2_MAL_HDR = 18L
162 /*
163 * NOTES: xx = late collision (data received before collision)
164 * late collisions cannot be detected by the receiver
165 * they would appear as JAM bits which would appear as
166 * bad FCS or carrier extend error which is
167 * CVMX_PIP_EXTEND_ERR
168 */
169} cvmx_pip_rcv_err_t;
170
171/**
172 * This defines the err_code field errors in the work Q entry
173 */
174typedef union {
175 cvmx_pip_l4_err_t l4_err;
176 cvmx_pip_ip_exc_t ip_exc;
177 cvmx_pip_rcv_err_t rcv_err;
178} cvmx_pip_err_t;
179
180/**
181 * Status statistics for a port
182 */
183typedef struct {
184 /* Inbound octets marked to be dropped by the IPD */
185 uint32_t dropped_octets;
186 /* Inbound packets marked to be dropped by the IPD */
187 uint32_t dropped_packets;
188 /* RAW PCI Packets received by PIP per port */
189 uint32_t pci_raw_packets;
190 /* Number of octets processed by PIP */
191 uint32_t octets;
192 /* Number of packets processed by PIP */
193 uint32_t packets;
194 /*
195 * Number of indentified L2 multicast packets. Does not
196 * include broadcast packets. Only includes packets whose
197 * parse mode is SKIP_TO_L2
198 */
199 uint32_t multicast_packets;
200 /*
201 * Number of indentified L2 broadcast packets. Does not
202 * include multicast packets. Only includes packets whose
203 * parse mode is SKIP_TO_L2
204 */
205 uint32_t broadcast_packets;
206 /* Number of 64B packets */
207 uint32_t len_64_packets;
208 /* Number of 65-127B packets */
209 uint32_t len_65_127_packets;
210 /* Number of 128-255B packets */
211 uint32_t len_128_255_packets;
212 /* Number of 256-511B packets */
213 uint32_t len_256_511_packets;
214 /* Number of 512-1023B packets */
215 uint32_t len_512_1023_packets;
216 /* Number of 1024-1518B packets */
217 uint32_t len_1024_1518_packets;
218 /* Number of 1519-max packets */
219 uint32_t len_1519_max_packets;
220 /* Number of packets with FCS or Align opcode errors */
221 uint32_t fcs_align_err_packets;
222 /* Number of packets with length < min */
223 uint32_t runt_packets;
224 /* Number of packets with length < min and FCS error */
225 uint32_t runt_crc_packets;
226 /* Number of packets with length > max */
227 uint32_t oversize_packets;
228 /* Number of packets with length > max and FCS error */
229 uint32_t oversize_crc_packets;
230 /* Number of packets without GMX/SPX/PCI errors received by PIP */
231 uint32_t inb_packets;
232 /*
233 * Total number of octets from all packets received by PIP,
234 * including CRC
235 */
236 uint64_t inb_octets;
237 /* Number of packets with GMX/SPX/PCI errors received by PIP */
238 uint16_t inb_errors;
239} cvmx_pip_port_status_t;
240
241/**
242 * Definition of the PIP custom header that can be prepended
243 * to a packet by external hardware.
244 */
245typedef union {
246 uint64_t u64;
247 struct {
248 /*
249 * Documented as R - Set if the Packet is RAWFULL. If
250 * set, this header must be the full 8 bytes.
251 */
252 uint64_t rawfull:1;
253 /* Must be zero */
254 uint64_t reserved0:5;
255 /* PIP parse mode for this packet */
256 uint64_t parse_mode:2;
257 /* Must be zero */
258 uint64_t reserved1:1;
259 /*
260 * Skip amount, including this header, to the
261 * beginning of the packet
262 */
263 uint64_t skip_len:7;
264 /* Must be zero */
265 uint64_t reserved2:6;
266 /* POW input queue for this packet */
267 uint64_t qos:3;
268 /* POW input group for this packet */
269 uint64_t grp:4;
270 /*
271 * Flag to store this packet in the work queue entry,
272 * if possible
273 */
274 uint64_t rs:1;
275 /* POW input tag type */
276 uint64_t tag_type:2;
277 /* POW input tag */
278 uint64_t tag:32;
279 } s;
280} cvmx_pip_pkt_inst_hdr_t;
281
282/* CSR typedefs have been moved to cvmx-csr-*.h */
283
284/**
285 * Configure an ethernet input port
286 *
287 * @port_num: Port number to configure
288 * @port_cfg: Port hardware configuration
289 * @port_tag_cfg:
290 * Port POW tagging configuration
291 */
292static inline void cvmx_pip_config_port(uint64_t port_num,
293 union cvmx_pip_prt_cfgx port_cfg,
294 union cvmx_pip_prt_tagx port_tag_cfg)
295{
296 cvmx_write_csr(CVMX_PIP_PRT_CFGX(port_num), port_cfg.u64);
297 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port_num), port_tag_cfg.u64);
298}
299#if 0
300/**
301 * @deprecated This function is a thin wrapper around the Pass1 version
302 * of the CVMX_PIP_QOS_WATCHX CSR; Pass2 has added a field for
303 * setting the group that is incompatible with this function,
304 * the preferred upgrade path is to use the CSR directly.
305 *
306 * Configure the global QoS packet watchers. Each watcher is
307 * capable of matching a field in a packet to determine the
308 * QoS queue for scheduling.
309 *
310 * @watcher: Watcher number to configure (0 - 3).
311 * @match_type: Watcher match type
312 * @match_value:
313 * Value the watcher will match against
314 * @qos: QoS queue for packets matching this watcher
315 */
316static inline void cvmx_pip_config_watcher(uint64_t watcher,
317 cvmx_pip_qos_watch_types match_type,
318 uint64_t match_value, uint64_t qos)
319{
320 cvmx_pip_port_watcher_cfg_t watcher_config;
321
322 watcher_config.u64 = 0;
323 watcher_config.s.match_type = match_type;
324 watcher_config.s.match_value = match_value;
325 watcher_config.s.qos = qos;
326
327 cvmx_write_csr(CVMX_PIP_QOS_WATCHX(watcher), watcher_config.u64);
328}
329#endif
330/**
331 * Configure the VLAN priority to QoS queue mapping.
332 *
333 * @vlan_priority:
334 * VLAN priority (0-7)
335 * @qos: QoS queue for packets matching this watcher
336 */
337static inline void cvmx_pip_config_vlan_qos(uint64_t vlan_priority,
338 uint64_t qos)
339{
340 union cvmx_pip_qos_vlanx pip_qos_vlanx;
341 pip_qos_vlanx.u64 = 0;
342 pip_qos_vlanx.s.qos = qos;
343 cvmx_write_csr(CVMX_PIP_QOS_VLANX(vlan_priority), pip_qos_vlanx.u64);
344}
345
346/**
347 * Configure the Diffserv to QoS queue mapping.
348 *
349 * @diffserv: Diffserv field value (0-63)
350 * @qos: QoS queue for packets matching this watcher
351 */
352static inline void cvmx_pip_config_diffserv_qos(uint64_t diffserv, uint64_t qos)
353{
354 union cvmx_pip_qos_diffx pip_qos_diffx;
355 pip_qos_diffx.u64 = 0;
356 pip_qos_diffx.s.qos = qos;
357 cvmx_write_csr(CVMX_PIP_QOS_DIFFX(diffserv), pip_qos_diffx.u64);
358}
359
360/**
361 * Get the status counters for a port.
362 *
363 * @port_num: Port number to get statistics for.
364 * @clear: Set to 1 to clear the counters after they are read
365 * @status: Where to put the results.
366 */
367static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
368 cvmx_pip_port_status_t *status)
369{
370 union cvmx_pip_stat_ctl pip_stat_ctl;
371 union cvmx_pip_stat0_prtx stat0;
372 union cvmx_pip_stat1_prtx stat1;
373 union cvmx_pip_stat2_prtx stat2;
374 union cvmx_pip_stat3_prtx stat3;
375 union cvmx_pip_stat4_prtx stat4;
376 union cvmx_pip_stat5_prtx stat5;
377 union cvmx_pip_stat6_prtx stat6;
378 union cvmx_pip_stat7_prtx stat7;
379 union cvmx_pip_stat8_prtx stat8;
380 union cvmx_pip_stat9_prtx stat9;
381 union cvmx_pip_stat_inb_pktsx pip_stat_inb_pktsx;
382 union cvmx_pip_stat_inb_octsx pip_stat_inb_octsx;
383 union cvmx_pip_stat_inb_errsx pip_stat_inb_errsx;
384
385 pip_stat_ctl.u64 = 0;
386 pip_stat_ctl.s.rdclr = clear;
387 cvmx_write_csr(CVMX_PIP_STAT_CTL, pip_stat_ctl.u64);
388
389 stat0.u64 = cvmx_read_csr(CVMX_PIP_STAT0_PRTX(port_num));
390 stat1.u64 = cvmx_read_csr(CVMX_PIP_STAT1_PRTX(port_num));
391 stat2.u64 = cvmx_read_csr(CVMX_PIP_STAT2_PRTX(port_num));
392 stat3.u64 = cvmx_read_csr(CVMX_PIP_STAT3_PRTX(port_num));
393 stat4.u64 = cvmx_read_csr(CVMX_PIP_STAT4_PRTX(port_num));
394 stat5.u64 = cvmx_read_csr(CVMX_PIP_STAT5_PRTX(port_num));
395 stat6.u64 = cvmx_read_csr(CVMX_PIP_STAT6_PRTX(port_num));
396 stat7.u64 = cvmx_read_csr(CVMX_PIP_STAT7_PRTX(port_num));
397 stat8.u64 = cvmx_read_csr(CVMX_PIP_STAT8_PRTX(port_num));
398 stat9.u64 = cvmx_read_csr(CVMX_PIP_STAT9_PRTX(port_num));
399 pip_stat_inb_pktsx.u64 =
400 cvmx_read_csr(CVMX_PIP_STAT_INB_PKTSX(port_num));
401 pip_stat_inb_octsx.u64 =
402 cvmx_read_csr(CVMX_PIP_STAT_INB_OCTSX(port_num));
403 pip_stat_inb_errsx.u64 =
404 cvmx_read_csr(CVMX_PIP_STAT_INB_ERRSX(port_num));
405
406 status->dropped_octets = stat0.s.drp_octs;
407 status->dropped_packets = stat0.s.drp_pkts;
408 status->octets = stat1.s.octs;
409 status->pci_raw_packets = stat2.s.raw;
410 status->packets = stat2.s.pkts;
411 status->multicast_packets = stat3.s.mcst;
412 status->broadcast_packets = stat3.s.bcst;
413 status->len_64_packets = stat4.s.h64;
414 status->len_65_127_packets = stat4.s.h65to127;
415 status->len_128_255_packets = stat5.s.h128to255;
416 status->len_256_511_packets = stat5.s.h256to511;
417 status->len_512_1023_packets = stat6.s.h512to1023;
418 status->len_1024_1518_packets = stat6.s.h1024to1518;
419 status->len_1519_max_packets = stat7.s.h1519;
420 status->fcs_align_err_packets = stat7.s.fcs;
421 status->runt_packets = stat8.s.undersz;
422 status->runt_crc_packets = stat8.s.frag;
423 status->oversize_packets = stat9.s.oversz;
424 status->oversize_crc_packets = stat9.s.jabber;
425 status->inb_packets = pip_stat_inb_pktsx.s.pkts;
426 status->inb_octets = pip_stat_inb_octsx.s.octs;
427 status->inb_errors = pip_stat_inb_errsx.s.errs;
428
429 if (cvmx_octeon_is_pass1()) {
430 /*
431 * Kludge to fix Octeon Pass 1 errata - Drop counts
432 * don't work.
433 */
434 if (status->inb_packets > status->packets)
435 status->dropped_packets =
436 status->inb_packets - status->packets;
437 else
438 status->dropped_packets = 0;
439 if (status->inb_octets - status->inb_packets * 4 >
440 status->octets)
441 status->dropped_octets =
442 status->inb_octets - status->inb_packets * 4 -
443 status->octets;
444 else
445 status->dropped_octets = 0;
446 }
447}
448
449/**
450 * Configure the hardware CRC engine
451 *
452 * @interface: Interface to configure (0 or 1)
453 * @invert_result:
454 * Invert the result of the CRC
455 * @reflect: Reflect
456 * @initialization_vector:
457 * CRC initialization vector
458 */
459static inline void cvmx_pip_config_crc(uint64_t interface,
460 uint64_t invert_result, uint64_t reflect,
461 uint32_t initialization_vector)
462{
463 if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
464 union cvmx_pip_crc_ctlx config;
465 union cvmx_pip_crc_ivx pip_crc_ivx;
466
467 config.u64 = 0;
468 config.s.invres = invert_result;
469 config.s.reflect = reflect;
470 cvmx_write_csr(CVMX_PIP_CRC_CTLX(interface), config.u64);
471
472 pip_crc_ivx.u64 = 0;
473 pip_crc_ivx.s.iv = initialization_vector;
474 cvmx_write_csr(CVMX_PIP_CRC_IVX(interface), pip_crc_ivx.u64);
475 }
476}
477
478/**
479 * Clear all bits in a tag mask. This should be called on
480 * startup before any calls to cvmx_pip_tag_mask_set. Each bit
481 * set in the final mask represent a byte used in the packet for
482 * tag generation.
483 *
484 * @mask_index: Which tag mask to clear (0..3)
485 */
486static inline void cvmx_pip_tag_mask_clear(uint64_t mask_index)
487{
488 uint64_t index;
489 union cvmx_pip_tag_incx pip_tag_incx;
490 pip_tag_incx.u64 = 0;
491 pip_tag_incx.s.en = 0;
492 for (index = mask_index * 16; index < (mask_index + 1) * 16; index++)
493 cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);
494}
495
496/**
497 * Sets a range of bits in the tag mask. The tag mask is used
498 * when the cvmx_pip_port_tag_cfg_t tag_mode is non zero.
499 * There are four separate masks that can be configured.
500 *
501 * @mask_index: Which tag mask to modify (0..3)
502 * @offset: Offset into the bitmask to set bits at. Use the GCC macro
503 * offsetof() to determine the offsets into packet headers.
504 * For example, offsetof(ethhdr, protocol) returns the offset
505 * of the ethernet protocol field. The bitmask selects which
506 * bytes to include the the tag, with bit offset X selecting
507 * byte at offset X from the beginning of the packet data.
508 * @len: Number of bytes to include. Usually this is the sizeof()
509 * the field.
510 */
511static inline void cvmx_pip_tag_mask_set(uint64_t mask_index, uint64_t offset,
512 uint64_t len)
513{
514 while (len--) {
515 union cvmx_pip_tag_incx pip_tag_incx;
516 uint64_t index = mask_index * 16 + offset / 8;
517 pip_tag_incx.u64 = cvmx_read_csr(CVMX_PIP_TAG_INCX(index));
518 pip_tag_incx.s.en |= 0x80 >> (offset & 0x7);
519 cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);
520 offset++;
521 }
522}
523
524#endif /* __CVMX_PIP_H__ */
diff --git a/drivers/staging/octeon/cvmx-pko-defs.h b/drivers/staging/octeon/cvmx-pko-defs.h
new file mode 100644
index 000000000000..50e779cf1ad8
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pko-defs.h
@@ -0,0 +1,1133 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28#ifndef __CVMX_PKO_DEFS_H__
29#define __CVMX_PKO_DEFS_H__
30
31#define CVMX_PKO_MEM_COUNT0 \
32 CVMX_ADD_IO_SEG(0x0001180050001080ull)
33#define CVMX_PKO_MEM_COUNT1 \
34 CVMX_ADD_IO_SEG(0x0001180050001088ull)
35#define CVMX_PKO_MEM_DEBUG0 \
36 CVMX_ADD_IO_SEG(0x0001180050001100ull)
37#define CVMX_PKO_MEM_DEBUG1 \
38 CVMX_ADD_IO_SEG(0x0001180050001108ull)
39#define CVMX_PKO_MEM_DEBUG10 \
40 CVMX_ADD_IO_SEG(0x0001180050001150ull)
41#define CVMX_PKO_MEM_DEBUG11 \
42 CVMX_ADD_IO_SEG(0x0001180050001158ull)
43#define CVMX_PKO_MEM_DEBUG12 \
44 CVMX_ADD_IO_SEG(0x0001180050001160ull)
45#define CVMX_PKO_MEM_DEBUG13 \
46 CVMX_ADD_IO_SEG(0x0001180050001168ull)
47#define CVMX_PKO_MEM_DEBUG14 \
48 CVMX_ADD_IO_SEG(0x0001180050001170ull)
49#define CVMX_PKO_MEM_DEBUG2 \
50 CVMX_ADD_IO_SEG(0x0001180050001110ull)
51#define CVMX_PKO_MEM_DEBUG3 \
52 CVMX_ADD_IO_SEG(0x0001180050001118ull)
53#define CVMX_PKO_MEM_DEBUG4 \
54 CVMX_ADD_IO_SEG(0x0001180050001120ull)
55#define CVMX_PKO_MEM_DEBUG5 \
56 CVMX_ADD_IO_SEG(0x0001180050001128ull)
57#define CVMX_PKO_MEM_DEBUG6 \
58 CVMX_ADD_IO_SEG(0x0001180050001130ull)
59#define CVMX_PKO_MEM_DEBUG7 \
60 CVMX_ADD_IO_SEG(0x0001180050001138ull)
61#define CVMX_PKO_MEM_DEBUG8 \
62 CVMX_ADD_IO_SEG(0x0001180050001140ull)
63#define CVMX_PKO_MEM_DEBUG9 \
64 CVMX_ADD_IO_SEG(0x0001180050001148ull)
65#define CVMX_PKO_MEM_PORT_PTRS \
66 CVMX_ADD_IO_SEG(0x0001180050001010ull)
67#define CVMX_PKO_MEM_PORT_QOS \
68 CVMX_ADD_IO_SEG(0x0001180050001018ull)
69#define CVMX_PKO_MEM_PORT_RATE0 \
70 CVMX_ADD_IO_SEG(0x0001180050001020ull)
71#define CVMX_PKO_MEM_PORT_RATE1 \
72 CVMX_ADD_IO_SEG(0x0001180050001028ull)
73#define CVMX_PKO_MEM_QUEUE_PTRS \
74 CVMX_ADD_IO_SEG(0x0001180050001000ull)
75#define CVMX_PKO_MEM_QUEUE_QOS \
76 CVMX_ADD_IO_SEG(0x0001180050001008ull)
77#define CVMX_PKO_REG_BIST_RESULT \
78 CVMX_ADD_IO_SEG(0x0001180050000080ull)
79#define CVMX_PKO_REG_CMD_BUF \
80 CVMX_ADD_IO_SEG(0x0001180050000010ull)
81#define CVMX_PKO_REG_CRC_CTLX(offset) \
82 CVMX_ADD_IO_SEG(0x0001180050000028ull + (((offset) & 1) * 8))
83#define CVMX_PKO_REG_CRC_ENABLE \
84 CVMX_ADD_IO_SEG(0x0001180050000020ull)
85#define CVMX_PKO_REG_CRC_IVX(offset) \
86 CVMX_ADD_IO_SEG(0x0001180050000038ull + (((offset) & 1) * 8))
87#define CVMX_PKO_REG_DEBUG0 \
88 CVMX_ADD_IO_SEG(0x0001180050000098ull)
89#define CVMX_PKO_REG_DEBUG1 \
90 CVMX_ADD_IO_SEG(0x00011800500000A0ull)
91#define CVMX_PKO_REG_DEBUG2 \
92 CVMX_ADD_IO_SEG(0x00011800500000A8ull)
93#define CVMX_PKO_REG_DEBUG3 \
94 CVMX_ADD_IO_SEG(0x00011800500000B0ull)
95#define CVMX_PKO_REG_ENGINE_INFLIGHT \
96 CVMX_ADD_IO_SEG(0x0001180050000050ull)
97#define CVMX_PKO_REG_ENGINE_THRESH \
98 CVMX_ADD_IO_SEG(0x0001180050000058ull)
99#define CVMX_PKO_REG_ERROR \
100 CVMX_ADD_IO_SEG(0x0001180050000088ull)
101#define CVMX_PKO_REG_FLAGS \
102 CVMX_ADD_IO_SEG(0x0001180050000000ull)
103#define CVMX_PKO_REG_GMX_PORT_MODE \
104 CVMX_ADD_IO_SEG(0x0001180050000018ull)
105#define CVMX_PKO_REG_INT_MASK \
106 CVMX_ADD_IO_SEG(0x0001180050000090ull)
107#define CVMX_PKO_REG_QUEUE_MODE \
108 CVMX_ADD_IO_SEG(0x0001180050000048ull)
109#define CVMX_PKO_REG_QUEUE_PTRS1 \
110 CVMX_ADD_IO_SEG(0x0001180050000100ull)
111#define CVMX_PKO_REG_READ_IDX \
112 CVMX_ADD_IO_SEG(0x0001180050000008ull)
113
114union cvmx_pko_mem_count0 {
115 uint64_t u64;
116 struct cvmx_pko_mem_count0_s {
117 uint64_t reserved_32_63:32;
118 uint64_t count:32;
119 } s;
120 struct cvmx_pko_mem_count0_s cn30xx;
121 struct cvmx_pko_mem_count0_s cn31xx;
122 struct cvmx_pko_mem_count0_s cn38xx;
123 struct cvmx_pko_mem_count0_s cn38xxp2;
124 struct cvmx_pko_mem_count0_s cn50xx;
125 struct cvmx_pko_mem_count0_s cn52xx;
126 struct cvmx_pko_mem_count0_s cn52xxp1;
127 struct cvmx_pko_mem_count0_s cn56xx;
128 struct cvmx_pko_mem_count0_s cn56xxp1;
129 struct cvmx_pko_mem_count0_s cn58xx;
130 struct cvmx_pko_mem_count0_s cn58xxp1;
131};
132
133union cvmx_pko_mem_count1 {
134 uint64_t u64;
135 struct cvmx_pko_mem_count1_s {
136 uint64_t reserved_48_63:16;
137 uint64_t count:48;
138 } s;
139 struct cvmx_pko_mem_count1_s cn30xx;
140 struct cvmx_pko_mem_count1_s cn31xx;
141 struct cvmx_pko_mem_count1_s cn38xx;
142 struct cvmx_pko_mem_count1_s cn38xxp2;
143 struct cvmx_pko_mem_count1_s cn50xx;
144 struct cvmx_pko_mem_count1_s cn52xx;
145 struct cvmx_pko_mem_count1_s cn52xxp1;
146 struct cvmx_pko_mem_count1_s cn56xx;
147 struct cvmx_pko_mem_count1_s cn56xxp1;
148 struct cvmx_pko_mem_count1_s cn58xx;
149 struct cvmx_pko_mem_count1_s cn58xxp1;
150};
151
152union cvmx_pko_mem_debug0 {
153 uint64_t u64;
154 struct cvmx_pko_mem_debug0_s {
155 uint64_t fau:28;
156 uint64_t cmd:14;
157 uint64_t segs:6;
158 uint64_t size:16;
159 } s;
160 struct cvmx_pko_mem_debug0_s cn30xx;
161 struct cvmx_pko_mem_debug0_s cn31xx;
162 struct cvmx_pko_mem_debug0_s cn38xx;
163 struct cvmx_pko_mem_debug0_s cn38xxp2;
164 struct cvmx_pko_mem_debug0_s cn50xx;
165 struct cvmx_pko_mem_debug0_s cn52xx;
166 struct cvmx_pko_mem_debug0_s cn52xxp1;
167 struct cvmx_pko_mem_debug0_s cn56xx;
168 struct cvmx_pko_mem_debug0_s cn56xxp1;
169 struct cvmx_pko_mem_debug0_s cn58xx;
170 struct cvmx_pko_mem_debug0_s cn58xxp1;
171};
172
173union cvmx_pko_mem_debug1 {
174 uint64_t u64;
175 struct cvmx_pko_mem_debug1_s {
176 uint64_t i:1;
177 uint64_t back:4;
178 uint64_t pool:3;
179 uint64_t size:16;
180 uint64_t ptr:40;
181 } s;
182 struct cvmx_pko_mem_debug1_s cn30xx;
183 struct cvmx_pko_mem_debug1_s cn31xx;
184 struct cvmx_pko_mem_debug1_s cn38xx;
185 struct cvmx_pko_mem_debug1_s cn38xxp2;
186 struct cvmx_pko_mem_debug1_s cn50xx;
187 struct cvmx_pko_mem_debug1_s cn52xx;
188 struct cvmx_pko_mem_debug1_s cn52xxp1;
189 struct cvmx_pko_mem_debug1_s cn56xx;
190 struct cvmx_pko_mem_debug1_s cn56xxp1;
191 struct cvmx_pko_mem_debug1_s cn58xx;
192 struct cvmx_pko_mem_debug1_s cn58xxp1;
193};
194
195union cvmx_pko_mem_debug10 {
196 uint64_t u64;
197 struct cvmx_pko_mem_debug10_s {
198 uint64_t reserved_0_63:64;
199 } s;
200 struct cvmx_pko_mem_debug10_cn30xx {
201 uint64_t fau:28;
202 uint64_t cmd:14;
203 uint64_t segs:6;
204 uint64_t size:16;
205 } cn30xx;
206 struct cvmx_pko_mem_debug10_cn30xx cn31xx;
207 struct cvmx_pko_mem_debug10_cn30xx cn38xx;
208 struct cvmx_pko_mem_debug10_cn30xx cn38xxp2;
209 struct cvmx_pko_mem_debug10_cn50xx {
210 uint64_t reserved_49_63:15;
211 uint64_t ptrs1:17;
212 uint64_t reserved_17_31:15;
213 uint64_t ptrs2:17;
214 } cn50xx;
215 struct cvmx_pko_mem_debug10_cn50xx cn52xx;
216 struct cvmx_pko_mem_debug10_cn50xx cn52xxp1;
217 struct cvmx_pko_mem_debug10_cn50xx cn56xx;
218 struct cvmx_pko_mem_debug10_cn50xx cn56xxp1;
219 struct cvmx_pko_mem_debug10_cn50xx cn58xx;
220 struct cvmx_pko_mem_debug10_cn50xx cn58xxp1;
221};
222
223union cvmx_pko_mem_debug11 {
224 uint64_t u64;
225 struct cvmx_pko_mem_debug11_s {
226 uint64_t i:1;
227 uint64_t back:4;
228 uint64_t pool:3;
229 uint64_t size:16;
230 uint64_t reserved_0_39:40;
231 } s;
232 struct cvmx_pko_mem_debug11_cn30xx {
233 uint64_t i:1;
234 uint64_t back:4;
235 uint64_t pool:3;
236 uint64_t size:16;
237 uint64_t ptr:40;
238 } cn30xx;
239 struct cvmx_pko_mem_debug11_cn30xx cn31xx;
240 struct cvmx_pko_mem_debug11_cn30xx cn38xx;
241 struct cvmx_pko_mem_debug11_cn30xx cn38xxp2;
242 struct cvmx_pko_mem_debug11_cn50xx {
243 uint64_t reserved_23_63:41;
244 uint64_t maj:1;
245 uint64_t uid:3;
246 uint64_t sop:1;
247 uint64_t len:1;
248 uint64_t chk:1;
249 uint64_t cnt:13;
250 uint64_t mod:3;
251 } cn50xx;
252 struct cvmx_pko_mem_debug11_cn50xx cn52xx;
253 struct cvmx_pko_mem_debug11_cn50xx cn52xxp1;
254 struct cvmx_pko_mem_debug11_cn50xx cn56xx;
255 struct cvmx_pko_mem_debug11_cn50xx cn56xxp1;
256 struct cvmx_pko_mem_debug11_cn50xx cn58xx;
257 struct cvmx_pko_mem_debug11_cn50xx cn58xxp1;
258};
259
260union cvmx_pko_mem_debug12 {
261 uint64_t u64;
262 struct cvmx_pko_mem_debug12_s {
263 uint64_t reserved_0_63:64;
264 } s;
265 struct cvmx_pko_mem_debug12_cn30xx {
266 uint64_t data:64;
267 } cn30xx;
268 struct cvmx_pko_mem_debug12_cn30xx cn31xx;
269 struct cvmx_pko_mem_debug12_cn30xx cn38xx;
270 struct cvmx_pko_mem_debug12_cn30xx cn38xxp2;
271 struct cvmx_pko_mem_debug12_cn50xx {
272 uint64_t fau:28;
273 uint64_t cmd:14;
274 uint64_t segs:6;
275 uint64_t size:16;
276 } cn50xx;
277 struct cvmx_pko_mem_debug12_cn50xx cn52xx;
278 struct cvmx_pko_mem_debug12_cn50xx cn52xxp1;
279 struct cvmx_pko_mem_debug12_cn50xx cn56xx;
280 struct cvmx_pko_mem_debug12_cn50xx cn56xxp1;
281 struct cvmx_pko_mem_debug12_cn50xx cn58xx;
282 struct cvmx_pko_mem_debug12_cn50xx cn58xxp1;
283};
284
285union cvmx_pko_mem_debug13 {
286 uint64_t u64;
287 struct cvmx_pko_mem_debug13_s {
288 uint64_t i:1;
289 uint64_t back:4;
290 uint64_t pool:3;
291 uint64_t reserved_0_55:56;
292 } s;
293 struct cvmx_pko_mem_debug13_cn30xx {
294 uint64_t reserved_51_63:13;
295 uint64_t widx:17;
296 uint64_t ridx2:17;
297 uint64_t widx2:17;
298 } cn30xx;
299 struct cvmx_pko_mem_debug13_cn30xx cn31xx;
300 struct cvmx_pko_mem_debug13_cn30xx cn38xx;
301 struct cvmx_pko_mem_debug13_cn30xx cn38xxp2;
302 struct cvmx_pko_mem_debug13_cn50xx {
303 uint64_t i:1;
304 uint64_t back:4;
305 uint64_t pool:3;
306 uint64_t size:16;
307 uint64_t ptr:40;
308 } cn50xx;
309 struct cvmx_pko_mem_debug13_cn50xx cn52xx;
310 struct cvmx_pko_mem_debug13_cn50xx cn52xxp1;
311 struct cvmx_pko_mem_debug13_cn50xx cn56xx;
312 struct cvmx_pko_mem_debug13_cn50xx cn56xxp1;
313 struct cvmx_pko_mem_debug13_cn50xx cn58xx;
314 struct cvmx_pko_mem_debug13_cn50xx cn58xxp1;
315};
316
317union cvmx_pko_mem_debug14 {
318 uint64_t u64;
319 struct cvmx_pko_mem_debug14_s {
320 uint64_t reserved_0_63:64;
321 } s;
322 struct cvmx_pko_mem_debug14_cn30xx {
323 uint64_t reserved_17_63:47;
324 uint64_t ridx:17;
325 } cn30xx;
326 struct cvmx_pko_mem_debug14_cn30xx cn31xx;
327 struct cvmx_pko_mem_debug14_cn30xx cn38xx;
328 struct cvmx_pko_mem_debug14_cn30xx cn38xxp2;
329 struct cvmx_pko_mem_debug14_cn52xx {
330 uint64_t data:64;
331 } cn52xx;
332 struct cvmx_pko_mem_debug14_cn52xx cn52xxp1;
333 struct cvmx_pko_mem_debug14_cn52xx cn56xx;
334 struct cvmx_pko_mem_debug14_cn52xx cn56xxp1;
335};
336
337union cvmx_pko_mem_debug2 {
338 uint64_t u64;
339 struct cvmx_pko_mem_debug2_s {
340 uint64_t i:1;
341 uint64_t back:4;
342 uint64_t pool:3;
343 uint64_t size:16;
344 uint64_t ptr:40;
345 } s;
346 struct cvmx_pko_mem_debug2_s cn30xx;
347 struct cvmx_pko_mem_debug2_s cn31xx;
348 struct cvmx_pko_mem_debug2_s cn38xx;
349 struct cvmx_pko_mem_debug2_s cn38xxp2;
350 struct cvmx_pko_mem_debug2_s cn50xx;
351 struct cvmx_pko_mem_debug2_s cn52xx;
352 struct cvmx_pko_mem_debug2_s cn52xxp1;
353 struct cvmx_pko_mem_debug2_s cn56xx;
354 struct cvmx_pko_mem_debug2_s cn56xxp1;
355 struct cvmx_pko_mem_debug2_s cn58xx;
356 struct cvmx_pko_mem_debug2_s cn58xxp1;
357};
358
359union cvmx_pko_mem_debug3 {
360 uint64_t u64;
361 struct cvmx_pko_mem_debug3_s {
362 uint64_t reserved_0_63:64;
363 } s;
364 struct cvmx_pko_mem_debug3_cn30xx {
365 uint64_t i:1;
366 uint64_t back:4;
367 uint64_t pool:3;
368 uint64_t size:16;
369 uint64_t ptr:40;
370 } cn30xx;
371 struct cvmx_pko_mem_debug3_cn30xx cn31xx;
372 struct cvmx_pko_mem_debug3_cn30xx cn38xx;
373 struct cvmx_pko_mem_debug3_cn30xx cn38xxp2;
374 struct cvmx_pko_mem_debug3_cn50xx {
375 uint64_t data:64;
376 } cn50xx;
377 struct cvmx_pko_mem_debug3_cn50xx cn52xx;
378 struct cvmx_pko_mem_debug3_cn50xx cn52xxp1;
379 struct cvmx_pko_mem_debug3_cn50xx cn56xx;
380 struct cvmx_pko_mem_debug3_cn50xx cn56xxp1;
381 struct cvmx_pko_mem_debug3_cn50xx cn58xx;
382 struct cvmx_pko_mem_debug3_cn50xx cn58xxp1;
383};
384
385union cvmx_pko_mem_debug4 {
386 uint64_t u64;
387 struct cvmx_pko_mem_debug4_s {
388 uint64_t reserved_0_63:64;
389 } s;
390 struct cvmx_pko_mem_debug4_cn30xx {
391 uint64_t data:64;
392 } cn30xx;
393 struct cvmx_pko_mem_debug4_cn30xx cn31xx;
394 struct cvmx_pko_mem_debug4_cn30xx cn38xx;
395 struct cvmx_pko_mem_debug4_cn30xx cn38xxp2;
396 struct cvmx_pko_mem_debug4_cn50xx {
397 uint64_t cmnd_segs:3;
398 uint64_t cmnd_siz:16;
399 uint64_t cmnd_off:6;
400 uint64_t uid:3;
401 uint64_t dread_sop:1;
402 uint64_t init_dwrite:1;
403 uint64_t chk_once:1;
404 uint64_t chk_mode:1;
405 uint64_t active:1;
406 uint64_t static_p:1;
407 uint64_t qos:3;
408 uint64_t qcb_ridx:5;
409 uint64_t qid_off_max:4;
410 uint64_t qid_off:4;
411 uint64_t qid_base:8;
412 uint64_t wait:1;
413 uint64_t minor:2;
414 uint64_t major:3;
415 } cn50xx;
416 struct cvmx_pko_mem_debug4_cn52xx {
417 uint64_t curr_siz:8;
418 uint64_t curr_off:16;
419 uint64_t cmnd_segs:6;
420 uint64_t cmnd_siz:16;
421 uint64_t cmnd_off:6;
422 uint64_t uid:2;
423 uint64_t dread_sop:1;
424 uint64_t init_dwrite:1;
425 uint64_t chk_once:1;
426 uint64_t chk_mode:1;
427 uint64_t wait:1;
428 uint64_t minor:2;
429 uint64_t major:3;
430 } cn52xx;
431 struct cvmx_pko_mem_debug4_cn52xx cn52xxp1;
432 struct cvmx_pko_mem_debug4_cn52xx cn56xx;
433 struct cvmx_pko_mem_debug4_cn52xx cn56xxp1;
434 struct cvmx_pko_mem_debug4_cn50xx cn58xx;
435 struct cvmx_pko_mem_debug4_cn50xx cn58xxp1;
436};
437
438union cvmx_pko_mem_debug5 {
439 uint64_t u64;
440 struct cvmx_pko_mem_debug5_s {
441 uint64_t reserved_0_63:64;
442 } s;
443 struct cvmx_pko_mem_debug5_cn30xx {
444 uint64_t dwri_mod:1;
445 uint64_t dwri_sop:1;
446 uint64_t dwri_len:1;
447 uint64_t dwri_cnt:13;
448 uint64_t cmnd_siz:16;
449 uint64_t uid:1;
450 uint64_t xfer_wor:1;
451 uint64_t xfer_dwr:1;
452 uint64_t cbuf_fre:1;
453 uint64_t reserved_27_27:1;
454 uint64_t chk_mode:1;
455 uint64_t active:1;
456 uint64_t qos:3;
457 uint64_t qcb_ridx:5;
458 uint64_t qid_off:3;
459 uint64_t qid_base:7;
460 uint64_t wait:1;
461 uint64_t minor:2;
462 uint64_t major:4;
463 } cn30xx;
464 struct cvmx_pko_mem_debug5_cn30xx cn31xx;
465 struct cvmx_pko_mem_debug5_cn30xx cn38xx;
466 struct cvmx_pko_mem_debug5_cn30xx cn38xxp2;
467 struct cvmx_pko_mem_debug5_cn50xx {
468 uint64_t curr_ptr:29;
469 uint64_t curr_siz:16;
470 uint64_t curr_off:16;
471 uint64_t cmnd_segs:3;
472 } cn50xx;
473 struct cvmx_pko_mem_debug5_cn52xx {
474 uint64_t reserved_54_63:10;
475 uint64_t nxt_inflt:6;
476 uint64_t curr_ptr:40;
477 uint64_t curr_siz:8;
478 } cn52xx;
479 struct cvmx_pko_mem_debug5_cn52xx cn52xxp1;
480 struct cvmx_pko_mem_debug5_cn52xx cn56xx;
481 struct cvmx_pko_mem_debug5_cn52xx cn56xxp1;
482 struct cvmx_pko_mem_debug5_cn50xx cn58xx;
483 struct cvmx_pko_mem_debug5_cn50xx cn58xxp1;
484};
485
486union cvmx_pko_mem_debug6 {
487 uint64_t u64;
488 struct cvmx_pko_mem_debug6_s {
489 uint64_t reserved_37_63:27;
490 uint64_t qid_offres:4;
491 uint64_t qid_offths:4;
492 uint64_t preempter:1;
493 uint64_t preemptee:1;
494 uint64_t preempted:1;
495 uint64_t active:1;
496 uint64_t statc:1;
497 uint64_t qos:3;
498 uint64_t qcb_ridx:5;
499 uint64_t qid_offmax:4;
500 uint64_t reserved_0_11:12;
501 } s;
502 struct cvmx_pko_mem_debug6_cn30xx {
503 uint64_t reserved_11_63:53;
504 uint64_t qid_offm:3;
505 uint64_t static_p:1;
506 uint64_t work_min:3;
507 uint64_t dwri_chk:1;
508 uint64_t dwri_uid:1;
509 uint64_t dwri_mod:2;
510 } cn30xx;
511 struct cvmx_pko_mem_debug6_cn30xx cn31xx;
512 struct cvmx_pko_mem_debug6_cn30xx cn38xx;
513 struct cvmx_pko_mem_debug6_cn30xx cn38xxp2;
514 struct cvmx_pko_mem_debug6_cn50xx {
515 uint64_t reserved_11_63:53;
516 uint64_t curr_ptr:11;
517 } cn50xx;
518 struct cvmx_pko_mem_debug6_cn52xx {
519 uint64_t reserved_37_63:27;
520 uint64_t qid_offres:4;
521 uint64_t qid_offths:4;
522 uint64_t preempter:1;
523 uint64_t preemptee:1;
524 uint64_t preempted:1;
525 uint64_t active:1;
526 uint64_t statc:1;
527 uint64_t qos:3;
528 uint64_t qcb_ridx:5;
529 uint64_t qid_offmax:4;
530 uint64_t qid_off:4;
531 uint64_t qid_base:8;
532 } cn52xx;
533 struct cvmx_pko_mem_debug6_cn52xx cn52xxp1;
534 struct cvmx_pko_mem_debug6_cn52xx cn56xx;
535 struct cvmx_pko_mem_debug6_cn52xx cn56xxp1;
536 struct cvmx_pko_mem_debug6_cn50xx cn58xx;
537 struct cvmx_pko_mem_debug6_cn50xx cn58xxp1;
538};
539
540union cvmx_pko_mem_debug7 {
541 uint64_t u64;
542 struct cvmx_pko_mem_debug7_s {
543 uint64_t qos:5;
544 uint64_t tail:1;
545 uint64_t reserved_0_57:58;
546 } s;
547 struct cvmx_pko_mem_debug7_cn30xx {
548 uint64_t reserved_58_63:6;
549 uint64_t dwb:9;
550 uint64_t start:33;
551 uint64_t size:16;
552 } cn30xx;
553 struct cvmx_pko_mem_debug7_cn30xx cn31xx;
554 struct cvmx_pko_mem_debug7_cn30xx cn38xx;
555 struct cvmx_pko_mem_debug7_cn30xx cn38xxp2;
556 struct cvmx_pko_mem_debug7_cn50xx {
557 uint64_t qos:5;
558 uint64_t tail:1;
559 uint64_t buf_siz:13;
560 uint64_t buf_ptr:33;
561 uint64_t qcb_widx:6;
562 uint64_t qcb_ridx:6;
563 } cn50xx;
564 struct cvmx_pko_mem_debug7_cn50xx cn52xx;
565 struct cvmx_pko_mem_debug7_cn50xx cn52xxp1;
566 struct cvmx_pko_mem_debug7_cn50xx cn56xx;
567 struct cvmx_pko_mem_debug7_cn50xx cn56xxp1;
568 struct cvmx_pko_mem_debug7_cn50xx cn58xx;
569 struct cvmx_pko_mem_debug7_cn50xx cn58xxp1;
570};
571
572union cvmx_pko_mem_debug8 {
573 uint64_t u64;
574 struct cvmx_pko_mem_debug8_s {
575 uint64_t reserved_59_63:5;
576 uint64_t tail:1;
577 uint64_t buf_siz:13;
578 uint64_t reserved_0_44:45;
579 } s;
580 struct cvmx_pko_mem_debug8_cn30xx {
581 uint64_t qos:5;
582 uint64_t tail:1;
583 uint64_t buf_siz:13;
584 uint64_t buf_ptr:33;
585 uint64_t qcb_widx:6;
586 uint64_t qcb_ridx:6;
587 } cn30xx;
588 struct cvmx_pko_mem_debug8_cn30xx cn31xx;
589 struct cvmx_pko_mem_debug8_cn30xx cn38xx;
590 struct cvmx_pko_mem_debug8_cn30xx cn38xxp2;
591 struct cvmx_pko_mem_debug8_cn50xx {
592 uint64_t reserved_28_63:36;
593 uint64_t doorbell:20;
594 uint64_t reserved_6_7:2;
595 uint64_t static_p:1;
596 uint64_t s_tail:1;
597 uint64_t static_q:1;
598 uint64_t qos:3;
599 } cn50xx;
600 struct cvmx_pko_mem_debug8_cn52xx {
601 uint64_t reserved_29_63:35;
602 uint64_t preempter:1;
603 uint64_t doorbell:20;
604 uint64_t reserved_7_7:1;
605 uint64_t preemptee:1;
606 uint64_t static_p:1;
607 uint64_t s_tail:1;
608 uint64_t static_q:1;
609 uint64_t qos:3;
610 } cn52xx;
611 struct cvmx_pko_mem_debug8_cn52xx cn52xxp1;
612 struct cvmx_pko_mem_debug8_cn52xx cn56xx;
613 struct cvmx_pko_mem_debug8_cn52xx cn56xxp1;
614 struct cvmx_pko_mem_debug8_cn50xx cn58xx;
615 struct cvmx_pko_mem_debug8_cn50xx cn58xxp1;
616};
617
618union cvmx_pko_mem_debug9 {
619 uint64_t u64;
620 struct cvmx_pko_mem_debug9_s {
621 uint64_t reserved_49_63:15;
622 uint64_t ptrs0:17;
623 uint64_t reserved_0_31:32;
624 } s;
625 struct cvmx_pko_mem_debug9_cn30xx {
626 uint64_t reserved_28_63:36;
627 uint64_t doorbell:20;
628 uint64_t reserved_5_7:3;
629 uint64_t s_tail:1;
630 uint64_t static_q:1;
631 uint64_t qos:3;
632 } cn30xx;
633 struct cvmx_pko_mem_debug9_cn30xx cn31xx;
634 struct cvmx_pko_mem_debug9_cn38xx {
635 uint64_t reserved_28_63:36;
636 uint64_t doorbell:20;
637 uint64_t reserved_6_7:2;
638 uint64_t static_p:1;
639 uint64_t s_tail:1;
640 uint64_t static_q:1;
641 uint64_t qos:3;
642 } cn38xx;
643 struct cvmx_pko_mem_debug9_cn38xx cn38xxp2;
644 struct cvmx_pko_mem_debug9_cn50xx {
645 uint64_t reserved_49_63:15;
646 uint64_t ptrs0:17;
647 uint64_t reserved_17_31:15;
648 uint64_t ptrs3:17;
649 } cn50xx;
650 struct cvmx_pko_mem_debug9_cn50xx cn52xx;
651 struct cvmx_pko_mem_debug9_cn50xx cn52xxp1;
652 struct cvmx_pko_mem_debug9_cn50xx cn56xx;
653 struct cvmx_pko_mem_debug9_cn50xx cn56xxp1;
654 struct cvmx_pko_mem_debug9_cn50xx cn58xx;
655 struct cvmx_pko_mem_debug9_cn50xx cn58xxp1;
656};
657
658union cvmx_pko_mem_port_ptrs {
659 uint64_t u64;
660 struct cvmx_pko_mem_port_ptrs_s {
661 uint64_t reserved_62_63:2;
662 uint64_t static_p:1;
663 uint64_t qos_mask:8;
664 uint64_t reserved_16_52:37;
665 uint64_t bp_port:6;
666 uint64_t eid:4;
667 uint64_t pid:6;
668 } s;
669 struct cvmx_pko_mem_port_ptrs_s cn52xx;
670 struct cvmx_pko_mem_port_ptrs_s cn52xxp1;
671 struct cvmx_pko_mem_port_ptrs_s cn56xx;
672 struct cvmx_pko_mem_port_ptrs_s cn56xxp1;
673};
674
675union cvmx_pko_mem_port_qos {
676 uint64_t u64;
677 struct cvmx_pko_mem_port_qos_s {
678 uint64_t reserved_61_63:3;
679 uint64_t qos_mask:8;
680 uint64_t reserved_10_52:43;
681 uint64_t eid:4;
682 uint64_t pid:6;
683 } s;
684 struct cvmx_pko_mem_port_qos_s cn52xx;
685 struct cvmx_pko_mem_port_qos_s cn52xxp1;
686 struct cvmx_pko_mem_port_qos_s cn56xx;
687 struct cvmx_pko_mem_port_qos_s cn56xxp1;
688};
689
690union cvmx_pko_mem_port_rate0 {
691 uint64_t u64;
692 struct cvmx_pko_mem_port_rate0_s {
693 uint64_t reserved_51_63:13;
694 uint64_t rate_word:19;
695 uint64_t rate_pkt:24;
696 uint64_t reserved_6_7:2;
697 uint64_t pid:6;
698 } s;
699 struct cvmx_pko_mem_port_rate0_s cn52xx;
700 struct cvmx_pko_mem_port_rate0_s cn52xxp1;
701 struct cvmx_pko_mem_port_rate0_s cn56xx;
702 struct cvmx_pko_mem_port_rate0_s cn56xxp1;
703};
704
705union cvmx_pko_mem_port_rate1 {
706 uint64_t u64;
707 struct cvmx_pko_mem_port_rate1_s {
708 uint64_t reserved_32_63:32;
709 uint64_t rate_lim:24;
710 uint64_t reserved_6_7:2;
711 uint64_t pid:6;
712 } s;
713 struct cvmx_pko_mem_port_rate1_s cn52xx;
714 struct cvmx_pko_mem_port_rate1_s cn52xxp1;
715 struct cvmx_pko_mem_port_rate1_s cn56xx;
716 struct cvmx_pko_mem_port_rate1_s cn56xxp1;
717};
718
719union cvmx_pko_mem_queue_ptrs {
720 uint64_t u64;
721 struct cvmx_pko_mem_queue_ptrs_s {
722 uint64_t s_tail:1;
723 uint64_t static_p:1;
724 uint64_t static_q:1;
725 uint64_t qos_mask:8;
726 uint64_t buf_ptr:36;
727 uint64_t tail:1;
728 uint64_t index:3;
729 uint64_t port:6;
730 uint64_t queue:7;
731 } s;
732 struct cvmx_pko_mem_queue_ptrs_s cn30xx;
733 struct cvmx_pko_mem_queue_ptrs_s cn31xx;
734 struct cvmx_pko_mem_queue_ptrs_s cn38xx;
735 struct cvmx_pko_mem_queue_ptrs_s cn38xxp2;
736 struct cvmx_pko_mem_queue_ptrs_s cn50xx;
737 struct cvmx_pko_mem_queue_ptrs_s cn52xx;
738 struct cvmx_pko_mem_queue_ptrs_s cn52xxp1;
739 struct cvmx_pko_mem_queue_ptrs_s cn56xx;
740 struct cvmx_pko_mem_queue_ptrs_s cn56xxp1;
741 struct cvmx_pko_mem_queue_ptrs_s cn58xx;
742 struct cvmx_pko_mem_queue_ptrs_s cn58xxp1;
743};
744
745union cvmx_pko_mem_queue_qos {
746 uint64_t u64;
747 struct cvmx_pko_mem_queue_qos_s {
748 uint64_t reserved_61_63:3;
749 uint64_t qos_mask:8;
750 uint64_t reserved_13_52:40;
751 uint64_t pid:6;
752 uint64_t qid:7;
753 } s;
754 struct cvmx_pko_mem_queue_qos_s cn30xx;
755 struct cvmx_pko_mem_queue_qos_s cn31xx;
756 struct cvmx_pko_mem_queue_qos_s cn38xx;
757 struct cvmx_pko_mem_queue_qos_s cn38xxp2;
758 struct cvmx_pko_mem_queue_qos_s cn50xx;
759 struct cvmx_pko_mem_queue_qos_s cn52xx;
760 struct cvmx_pko_mem_queue_qos_s cn52xxp1;
761 struct cvmx_pko_mem_queue_qos_s cn56xx;
762 struct cvmx_pko_mem_queue_qos_s cn56xxp1;
763 struct cvmx_pko_mem_queue_qos_s cn58xx;
764 struct cvmx_pko_mem_queue_qos_s cn58xxp1;
765};
766
767union cvmx_pko_reg_bist_result {
768 uint64_t u64;
769 struct cvmx_pko_reg_bist_result_s {
770 uint64_t reserved_0_63:64;
771 } s;
772 struct cvmx_pko_reg_bist_result_cn30xx {
773 uint64_t reserved_27_63:37;
774 uint64_t psb2:5;
775 uint64_t count:1;
776 uint64_t rif:1;
777 uint64_t wif:1;
778 uint64_t ncb:1;
779 uint64_t out:1;
780 uint64_t crc:1;
781 uint64_t chk:1;
782 uint64_t qsb:2;
783 uint64_t qcb:2;
784 uint64_t pdb:4;
785 uint64_t psb:7;
786 } cn30xx;
787 struct cvmx_pko_reg_bist_result_cn30xx cn31xx;
788 struct cvmx_pko_reg_bist_result_cn30xx cn38xx;
789 struct cvmx_pko_reg_bist_result_cn30xx cn38xxp2;
790 struct cvmx_pko_reg_bist_result_cn50xx {
791 uint64_t reserved_33_63:31;
792 uint64_t csr:1;
793 uint64_t iob:1;
794 uint64_t out_crc:1;
795 uint64_t out_ctl:3;
796 uint64_t out_sta:1;
797 uint64_t out_wif:1;
798 uint64_t prt_chk:3;
799 uint64_t prt_nxt:1;
800 uint64_t prt_psb:6;
801 uint64_t ncb_inb:2;
802 uint64_t prt_qcb:2;
803 uint64_t prt_qsb:3;
804 uint64_t dat_dat:4;
805 uint64_t dat_ptr:4;
806 } cn50xx;
807 struct cvmx_pko_reg_bist_result_cn52xx {
808 uint64_t reserved_35_63:29;
809 uint64_t csr:1;
810 uint64_t iob:1;
811 uint64_t out_dat:1;
812 uint64_t out_ctl:3;
813 uint64_t out_sta:1;
814 uint64_t out_wif:1;
815 uint64_t prt_chk:3;
816 uint64_t prt_nxt:1;
817 uint64_t prt_psb:8;
818 uint64_t ncb_inb:2;
819 uint64_t prt_qcb:2;
820 uint64_t prt_qsb:3;
821 uint64_t prt_ctl:2;
822 uint64_t dat_dat:2;
823 uint64_t dat_ptr:4;
824 } cn52xx;
825 struct cvmx_pko_reg_bist_result_cn52xx cn52xxp1;
826 struct cvmx_pko_reg_bist_result_cn52xx cn56xx;
827 struct cvmx_pko_reg_bist_result_cn52xx cn56xxp1;
828 struct cvmx_pko_reg_bist_result_cn50xx cn58xx;
829 struct cvmx_pko_reg_bist_result_cn50xx cn58xxp1;
830};
831
832union cvmx_pko_reg_cmd_buf {
833 uint64_t u64;
834 struct cvmx_pko_reg_cmd_buf_s {
835 uint64_t reserved_23_63:41;
836 uint64_t pool:3;
837 uint64_t reserved_13_19:7;
838 uint64_t size:13;
839 } s;
840 struct cvmx_pko_reg_cmd_buf_s cn30xx;
841 struct cvmx_pko_reg_cmd_buf_s cn31xx;
842 struct cvmx_pko_reg_cmd_buf_s cn38xx;
843 struct cvmx_pko_reg_cmd_buf_s cn38xxp2;
844 struct cvmx_pko_reg_cmd_buf_s cn50xx;
845 struct cvmx_pko_reg_cmd_buf_s cn52xx;
846 struct cvmx_pko_reg_cmd_buf_s cn52xxp1;
847 struct cvmx_pko_reg_cmd_buf_s cn56xx;
848 struct cvmx_pko_reg_cmd_buf_s cn56xxp1;
849 struct cvmx_pko_reg_cmd_buf_s cn58xx;
850 struct cvmx_pko_reg_cmd_buf_s cn58xxp1;
851};
852
853union cvmx_pko_reg_crc_ctlx {
854 uint64_t u64;
855 struct cvmx_pko_reg_crc_ctlx_s {
856 uint64_t reserved_2_63:62;
857 uint64_t invres:1;
858 uint64_t refin:1;
859 } s;
860 struct cvmx_pko_reg_crc_ctlx_s cn38xx;
861 struct cvmx_pko_reg_crc_ctlx_s cn38xxp2;
862 struct cvmx_pko_reg_crc_ctlx_s cn58xx;
863 struct cvmx_pko_reg_crc_ctlx_s cn58xxp1;
864};
865
866union cvmx_pko_reg_crc_enable {
867 uint64_t u64;
868 struct cvmx_pko_reg_crc_enable_s {
869 uint64_t reserved_32_63:32;
870 uint64_t enable:32;
871 } s;
872 struct cvmx_pko_reg_crc_enable_s cn38xx;
873 struct cvmx_pko_reg_crc_enable_s cn38xxp2;
874 struct cvmx_pko_reg_crc_enable_s cn58xx;
875 struct cvmx_pko_reg_crc_enable_s cn58xxp1;
876};
877
878union cvmx_pko_reg_crc_ivx {
879 uint64_t u64;
880 struct cvmx_pko_reg_crc_ivx_s {
881 uint64_t reserved_32_63:32;
882 uint64_t iv:32;
883 } s;
884 struct cvmx_pko_reg_crc_ivx_s cn38xx;
885 struct cvmx_pko_reg_crc_ivx_s cn38xxp2;
886 struct cvmx_pko_reg_crc_ivx_s cn58xx;
887 struct cvmx_pko_reg_crc_ivx_s cn58xxp1;
888};
889
890union cvmx_pko_reg_debug0 {
891 uint64_t u64;
892 struct cvmx_pko_reg_debug0_s {
893 uint64_t asserts:64;
894 } s;
895 struct cvmx_pko_reg_debug0_cn30xx {
896 uint64_t reserved_17_63:47;
897 uint64_t asserts:17;
898 } cn30xx;
899 struct cvmx_pko_reg_debug0_cn30xx cn31xx;
900 struct cvmx_pko_reg_debug0_cn30xx cn38xx;
901 struct cvmx_pko_reg_debug0_cn30xx cn38xxp2;
902 struct cvmx_pko_reg_debug0_s cn50xx;
903 struct cvmx_pko_reg_debug0_s cn52xx;
904 struct cvmx_pko_reg_debug0_s cn52xxp1;
905 struct cvmx_pko_reg_debug0_s cn56xx;
906 struct cvmx_pko_reg_debug0_s cn56xxp1;
907 struct cvmx_pko_reg_debug0_s cn58xx;
908 struct cvmx_pko_reg_debug0_s cn58xxp1;
909};
910
911union cvmx_pko_reg_debug1 {
912 uint64_t u64;
913 struct cvmx_pko_reg_debug1_s {
914 uint64_t asserts:64;
915 } s;
916 struct cvmx_pko_reg_debug1_s cn50xx;
917 struct cvmx_pko_reg_debug1_s cn52xx;
918 struct cvmx_pko_reg_debug1_s cn52xxp1;
919 struct cvmx_pko_reg_debug1_s cn56xx;
920 struct cvmx_pko_reg_debug1_s cn56xxp1;
921 struct cvmx_pko_reg_debug1_s cn58xx;
922 struct cvmx_pko_reg_debug1_s cn58xxp1;
923};
924
925union cvmx_pko_reg_debug2 {
926 uint64_t u64;
927 struct cvmx_pko_reg_debug2_s {
928 uint64_t asserts:64;
929 } s;
930 struct cvmx_pko_reg_debug2_s cn50xx;
931 struct cvmx_pko_reg_debug2_s cn52xx;
932 struct cvmx_pko_reg_debug2_s cn52xxp1;
933 struct cvmx_pko_reg_debug2_s cn56xx;
934 struct cvmx_pko_reg_debug2_s cn56xxp1;
935 struct cvmx_pko_reg_debug2_s cn58xx;
936 struct cvmx_pko_reg_debug2_s cn58xxp1;
937};
938
939union cvmx_pko_reg_debug3 {
940 uint64_t u64;
941 struct cvmx_pko_reg_debug3_s {
942 uint64_t asserts:64;
943 } s;
944 struct cvmx_pko_reg_debug3_s cn50xx;
945 struct cvmx_pko_reg_debug3_s cn52xx;
946 struct cvmx_pko_reg_debug3_s cn52xxp1;
947 struct cvmx_pko_reg_debug3_s cn56xx;
948 struct cvmx_pko_reg_debug3_s cn56xxp1;
949 struct cvmx_pko_reg_debug3_s cn58xx;
950 struct cvmx_pko_reg_debug3_s cn58xxp1;
951};
952
953union cvmx_pko_reg_engine_inflight {
954 uint64_t u64;
955 struct cvmx_pko_reg_engine_inflight_s {
956 uint64_t reserved_40_63:24;
957 uint64_t engine9:4;
958 uint64_t engine8:4;
959 uint64_t engine7:4;
960 uint64_t engine6:4;
961 uint64_t engine5:4;
962 uint64_t engine4:4;
963 uint64_t engine3:4;
964 uint64_t engine2:4;
965 uint64_t engine1:4;
966 uint64_t engine0:4;
967 } s;
968 struct cvmx_pko_reg_engine_inflight_s cn52xx;
969 struct cvmx_pko_reg_engine_inflight_s cn52xxp1;
970 struct cvmx_pko_reg_engine_inflight_s cn56xx;
971 struct cvmx_pko_reg_engine_inflight_s cn56xxp1;
972};
973
974union cvmx_pko_reg_engine_thresh {
975 uint64_t u64;
976 struct cvmx_pko_reg_engine_thresh_s {
977 uint64_t reserved_10_63:54;
978 uint64_t mask:10;
979 } s;
980 struct cvmx_pko_reg_engine_thresh_s cn52xx;
981 struct cvmx_pko_reg_engine_thresh_s cn52xxp1;
982 struct cvmx_pko_reg_engine_thresh_s cn56xx;
983 struct cvmx_pko_reg_engine_thresh_s cn56xxp1;
984};
985
986union cvmx_pko_reg_error {
987 uint64_t u64;
988 struct cvmx_pko_reg_error_s {
989 uint64_t reserved_3_63:61;
990 uint64_t currzero:1;
991 uint64_t doorbell:1;
992 uint64_t parity:1;
993 } s;
994 struct cvmx_pko_reg_error_cn30xx {
995 uint64_t reserved_2_63:62;
996 uint64_t doorbell:1;
997 uint64_t parity:1;
998 } cn30xx;
999 struct cvmx_pko_reg_error_cn30xx cn31xx;
1000 struct cvmx_pko_reg_error_cn30xx cn38xx;
1001 struct cvmx_pko_reg_error_cn30xx cn38xxp2;
1002 struct cvmx_pko_reg_error_s cn50xx;
1003 struct cvmx_pko_reg_error_s cn52xx;
1004 struct cvmx_pko_reg_error_s cn52xxp1;
1005 struct cvmx_pko_reg_error_s cn56xx;
1006 struct cvmx_pko_reg_error_s cn56xxp1;
1007 struct cvmx_pko_reg_error_s cn58xx;
1008 struct cvmx_pko_reg_error_s cn58xxp1;
1009};
1010
1011union cvmx_pko_reg_flags {
1012 uint64_t u64;
1013 struct cvmx_pko_reg_flags_s {
1014 uint64_t reserved_4_63:60;
1015 uint64_t reset:1;
1016 uint64_t store_be:1;
1017 uint64_t ena_dwb:1;
1018 uint64_t ena_pko:1;
1019 } s;
1020 struct cvmx_pko_reg_flags_s cn30xx;
1021 struct cvmx_pko_reg_flags_s cn31xx;
1022 struct cvmx_pko_reg_flags_s cn38xx;
1023 struct cvmx_pko_reg_flags_s cn38xxp2;
1024 struct cvmx_pko_reg_flags_s cn50xx;
1025 struct cvmx_pko_reg_flags_s cn52xx;
1026 struct cvmx_pko_reg_flags_s cn52xxp1;
1027 struct cvmx_pko_reg_flags_s cn56xx;
1028 struct cvmx_pko_reg_flags_s cn56xxp1;
1029 struct cvmx_pko_reg_flags_s cn58xx;
1030 struct cvmx_pko_reg_flags_s cn58xxp1;
1031};
1032
1033union cvmx_pko_reg_gmx_port_mode {
1034 uint64_t u64;
1035 struct cvmx_pko_reg_gmx_port_mode_s {
1036 uint64_t reserved_6_63:58;
1037 uint64_t mode1:3;
1038 uint64_t mode0:3;
1039 } s;
1040 struct cvmx_pko_reg_gmx_port_mode_s cn30xx;
1041 struct cvmx_pko_reg_gmx_port_mode_s cn31xx;
1042 struct cvmx_pko_reg_gmx_port_mode_s cn38xx;
1043 struct cvmx_pko_reg_gmx_port_mode_s cn38xxp2;
1044 struct cvmx_pko_reg_gmx_port_mode_s cn50xx;
1045 struct cvmx_pko_reg_gmx_port_mode_s cn52xx;
1046 struct cvmx_pko_reg_gmx_port_mode_s cn52xxp1;
1047 struct cvmx_pko_reg_gmx_port_mode_s cn56xx;
1048 struct cvmx_pko_reg_gmx_port_mode_s cn56xxp1;
1049 struct cvmx_pko_reg_gmx_port_mode_s cn58xx;
1050 struct cvmx_pko_reg_gmx_port_mode_s cn58xxp1;
1051};
1052
1053union cvmx_pko_reg_int_mask {
1054 uint64_t u64;
1055 struct cvmx_pko_reg_int_mask_s {
1056 uint64_t reserved_3_63:61;
1057 uint64_t currzero:1;
1058 uint64_t doorbell:1;
1059 uint64_t parity:1;
1060 } s;
1061 struct cvmx_pko_reg_int_mask_cn30xx {
1062 uint64_t reserved_2_63:62;
1063 uint64_t doorbell:1;
1064 uint64_t parity:1;
1065 } cn30xx;
1066 struct cvmx_pko_reg_int_mask_cn30xx cn31xx;
1067 struct cvmx_pko_reg_int_mask_cn30xx cn38xx;
1068 struct cvmx_pko_reg_int_mask_cn30xx cn38xxp2;
1069 struct cvmx_pko_reg_int_mask_s cn50xx;
1070 struct cvmx_pko_reg_int_mask_s cn52xx;
1071 struct cvmx_pko_reg_int_mask_s cn52xxp1;
1072 struct cvmx_pko_reg_int_mask_s cn56xx;
1073 struct cvmx_pko_reg_int_mask_s cn56xxp1;
1074 struct cvmx_pko_reg_int_mask_s cn58xx;
1075 struct cvmx_pko_reg_int_mask_s cn58xxp1;
1076};
1077
1078union cvmx_pko_reg_queue_mode {
1079 uint64_t u64;
1080 struct cvmx_pko_reg_queue_mode_s {
1081 uint64_t reserved_2_63:62;
1082 uint64_t mode:2;
1083 } s;
1084 struct cvmx_pko_reg_queue_mode_s cn30xx;
1085 struct cvmx_pko_reg_queue_mode_s cn31xx;
1086 struct cvmx_pko_reg_queue_mode_s cn38xx;
1087 struct cvmx_pko_reg_queue_mode_s cn38xxp2;
1088 struct cvmx_pko_reg_queue_mode_s cn50xx;
1089 struct cvmx_pko_reg_queue_mode_s cn52xx;
1090 struct cvmx_pko_reg_queue_mode_s cn52xxp1;
1091 struct cvmx_pko_reg_queue_mode_s cn56xx;
1092 struct cvmx_pko_reg_queue_mode_s cn56xxp1;
1093 struct cvmx_pko_reg_queue_mode_s cn58xx;
1094 struct cvmx_pko_reg_queue_mode_s cn58xxp1;
1095};
1096
1097union cvmx_pko_reg_queue_ptrs1 {
1098 uint64_t u64;
1099 struct cvmx_pko_reg_queue_ptrs1_s {
1100 uint64_t reserved_2_63:62;
1101 uint64_t idx3:1;
1102 uint64_t qid7:1;
1103 } s;
1104 struct cvmx_pko_reg_queue_ptrs1_s cn50xx;
1105 struct cvmx_pko_reg_queue_ptrs1_s cn52xx;
1106 struct cvmx_pko_reg_queue_ptrs1_s cn52xxp1;
1107 struct cvmx_pko_reg_queue_ptrs1_s cn56xx;
1108 struct cvmx_pko_reg_queue_ptrs1_s cn56xxp1;
1109 struct cvmx_pko_reg_queue_ptrs1_s cn58xx;
1110 struct cvmx_pko_reg_queue_ptrs1_s cn58xxp1;
1111};
1112
1113union cvmx_pko_reg_read_idx {
1114 uint64_t u64;
1115 struct cvmx_pko_reg_read_idx_s {
1116 uint64_t reserved_16_63:48;
1117 uint64_t inc:8;
1118 uint64_t index:8;
1119 } s;
1120 struct cvmx_pko_reg_read_idx_s cn30xx;
1121 struct cvmx_pko_reg_read_idx_s cn31xx;
1122 struct cvmx_pko_reg_read_idx_s cn38xx;
1123 struct cvmx_pko_reg_read_idx_s cn38xxp2;
1124 struct cvmx_pko_reg_read_idx_s cn50xx;
1125 struct cvmx_pko_reg_read_idx_s cn52xx;
1126 struct cvmx_pko_reg_read_idx_s cn52xxp1;
1127 struct cvmx_pko_reg_read_idx_s cn56xx;
1128 struct cvmx_pko_reg_read_idx_s cn56xxp1;
1129 struct cvmx_pko_reg_read_idx_s cn58xx;
1130 struct cvmx_pko_reg_read_idx_s cn58xxp1;
1131};
1132
1133#endif
diff --git a/drivers/staging/octeon/cvmx-pko.c b/drivers/staging/octeon/cvmx-pko.c
new file mode 100644
index 000000000000..00db91529b19
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pko.c
@@ -0,0 +1,506 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 * Support library for the hardware Packet Output unit.
30 */
31
32#include <asm/octeon/octeon.h>
33
34#include "cvmx-config.h"
35#include "cvmx-pko.h"
36#include "cvmx-helper.h"
37
38/**
39 * Internal state of packet output
40 */
41
42/**
43 * Call before any other calls to initialize the packet
44 * output system. This does chip global config, and should only be
45 * done by one core.
46 */
47
48void cvmx_pko_initialize_global(void)
49{
50 int i;
51 uint64_t priority = 8;
52 union cvmx_pko_reg_cmd_buf config;
53
54 /*
55 * Set the size of the PKO command buffers to an odd number of
56 * 64bit words. This allows the normal two word send to stay
57 * aligned and never span a comamnd word buffer.
58 */
59 config.u64 = 0;
60 config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
61 config.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE / 8 - 1;
62
63 cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64);
64
65 for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++)
66 cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
67 &priority);
68
69 /*
70 * If we aren't using all of the queues optimize PKO's
71 * internal memory.
72 */
73 if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)
74 || OCTEON_IS_MODEL(OCTEON_CN56XX)
75 || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
76 int num_interfaces = cvmx_helper_get_number_of_interfaces();
77 int last_port =
78 cvmx_helper_get_last_ipd_port(num_interfaces - 1);
79 int max_queues =
80 cvmx_pko_get_base_queue(last_port) +
81 cvmx_pko_get_num_queues(last_port);
82 if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
83 if (max_queues <= 32)
84 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
85 else if (max_queues <= 64)
86 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
87 } else {
88 if (max_queues <= 64)
89 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
90 else if (max_queues <= 128)
91 cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
92 }
93 }
94}
95
96/**
97 * This function does per-core initialization required by the PKO routines.
98 * This must be called on all cores that will do packet output, and must
99 * be called after the FPA has been initialized and filled with pages.
100 *
101 * Returns 0 on success
102 * !0 on failure
103 */
104int cvmx_pko_initialize_local(void)
105{
106 /* Nothing to do */
107 return 0;
108}
109
110/**
111 * Enables the packet output hardware. It must already be
112 * configured.
113 */
114void cvmx_pko_enable(void)
115{
116 union cvmx_pko_reg_flags flags;
117
118 flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
119 if (flags.s.ena_pko)
120 cvmx_dprintf
121 ("Warning: Enabling PKO when PKO already enabled.\n");
122
123 flags.s.ena_dwb = 1;
124 flags.s.ena_pko = 1;
125 /*
126 * always enable big endian for 3-word command. Does nothing
127 * for 2-word.
128 */
129 flags.s.store_be = 1;
130 cvmx_write_csr(CVMX_PKO_REG_FLAGS, flags.u64);
131}
132
133/**
134 * Disables the packet output. Does not affect any configuration.
135 */
136void cvmx_pko_disable(void)
137{
138 union cvmx_pko_reg_flags pko_reg_flags;
139 pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
140 pko_reg_flags.s.ena_pko = 0;
141 cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
142}
143
144
145/**
146 * Reset the packet output.
147 */
148static void __cvmx_pko_reset(void)
149{
150 union cvmx_pko_reg_flags pko_reg_flags;
151 pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
152 pko_reg_flags.s.reset = 1;
153 cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
154}
155
156/**
157 * Shutdown and free resources required by packet output.
158 */
159void cvmx_pko_shutdown(void)
160{
161 union cvmx_pko_mem_queue_ptrs config;
162 int queue;
163
164 cvmx_pko_disable();
165
166 for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) {
167 config.u64 = 0;
168 config.s.tail = 1;
169 config.s.index = 0;
170 config.s.port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID;
171 config.s.queue = queue & 0x7f;
172 config.s.qos_mask = 0;
173 config.s.buf_ptr = 0;
174 if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
175 union cvmx_pko_reg_queue_ptrs1 config1;
176 config1.u64 = 0;
177 config1.s.qid7 = queue >> 7;
178 cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
179 }
180 cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
181 cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue));
182 }
183 __cvmx_pko_reset();
184}
185
186/**
187 * Configure a output port and the associated queues for use.
188 *
189 * @port: Port to configure.
190 * @base_queue: First queue number to associate with this port.
191 * @num_queues: Number of queues to associate with this port
192 * @priority: Array of priority levels for each queue. Values are
193 * allowed to be 0-8. A value of 8 get 8 times the traffic
194 * of a value of 1. A value of 0 indicates that no rounds
195 * will be participated in. These priorities can be changed
196 * on the fly while the pko is enabled. A priority of 9
197 * indicates that static priority should be used. If static
198 * priority is used all queues with static priority must be
199 * contiguous starting at the base_queue, and lower numbered
200 * queues have higher priority than higher numbered queues.
201 * There must be num_queues elements in the array.
202 */
203cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
204 uint64_t num_queues,
205 const uint64_t priority[])
206{
207 cvmx_pko_status_t result_code;
208 uint64_t queue;
209 union cvmx_pko_mem_queue_ptrs config;
210 union cvmx_pko_reg_queue_ptrs1 config1;
211 int static_priority_base = -1;
212 int static_priority_end = -1;
213
214 if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS)
215 && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) {
216 cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
217 (unsigned long long)port);
218 return CVMX_PKO_INVALID_PORT;
219 }
220
221 if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) {
222 cvmx_dprintf
223 ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n",
224 (unsigned long long)(base_queue + num_queues));
225 return CVMX_PKO_INVALID_QUEUE;
226 }
227
228 if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
229 /*
230 * Validate the static queue priority setup and set
231 * static_priority_base and static_priority_end
232 * accordingly.
233 */
234 for (queue = 0; queue < num_queues; queue++) {
235 /* Find first queue of static priority */
236 if (static_priority_base == -1
237 && priority[queue] ==
238 CVMX_PKO_QUEUE_STATIC_PRIORITY)
239 static_priority_base = queue;
240 /* Find last queue of static priority */
241 if (static_priority_base != -1
242 && static_priority_end == -1
243 && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY
244 && queue)
245 static_priority_end = queue - 1;
246 else if (static_priority_base != -1
247 && static_priority_end == -1
248 && queue == num_queues - 1)
249 /* all queues are static priority */
250 static_priority_end = queue;
251 /*
252 * Check to make sure all static priority
253 * queues are contiguous. Also catches some
254 * cases of static priorites not starting at
255 * queue 0.
256 */
257 if (static_priority_end != -1
258 && (int)queue > static_priority_end
259 && priority[queue] ==
260 CVMX_PKO_QUEUE_STATIC_PRIORITY) {
261 cvmx_dprintf("ERROR: cvmx_pko_config_port: "
262 "Static priority queues aren't "
263 "contiguous or don't start at "
264 "base queue. q: %d, eq: %d\n",
265 (int)queue, static_priority_end);
266 return CVMX_PKO_INVALID_PRIORITY;
267 }
268 }
269 if (static_priority_base > 0) {
270 cvmx_dprintf("ERROR: cvmx_pko_config_port: Static "
271 "priority queues don't start at base "
272 "queue. sq: %d\n",
273 static_priority_base);
274 return CVMX_PKO_INVALID_PRIORITY;
275 }
276#if 0
277 cvmx_dprintf("Port %d: Static priority queue base: %d, "
278 "end: %d\n", port,
279 static_priority_base, static_priority_end);
280#endif
281 }
282 /*
283 * At this point, static_priority_base and static_priority_end
284 * are either both -1, or are valid start/end queue
285 * numbers.
286 */
287
288 result_code = CVMX_PKO_SUCCESS;
289
290#ifdef PKO_DEBUG
291 cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues,
292 CVMX_PKO_QUEUES_PER_PORT_INTERFACE0,
293 CVMX_PKO_QUEUES_PER_PORT_INTERFACE1);
294#endif
295
296 for (queue = 0; queue < num_queues; queue++) {
297 uint64_t *buf_ptr = NULL;
298
299 config1.u64 = 0;
300 config1.s.idx3 = queue >> 3;
301 config1.s.qid7 = (base_queue + queue) >> 7;
302
303 config.u64 = 0;
304 config.s.tail = queue == (num_queues - 1);
305 config.s.index = queue;
306 config.s.port = port;
307 config.s.queue = base_queue + queue;
308
309 if (!cvmx_octeon_is_pass1()) {
310 config.s.static_p = static_priority_base >= 0;
311 config.s.static_q = (int)queue <= static_priority_end;
312 config.s.s_tail = (int)queue == static_priority_end;
313 }
314 /*
315 * Convert the priority into an enable bit field. Try
316 * to space the bits out evenly so the packet don't
317 * get grouped up
318 */
319 switch ((int)priority[queue]) {
320 case 0:
321 config.s.qos_mask = 0x00;
322 break;
323 case 1:
324 config.s.qos_mask = 0x01;
325 break;
326 case 2:
327 config.s.qos_mask = 0x11;
328 break;
329 case 3:
330 config.s.qos_mask = 0x49;
331 break;
332 case 4:
333 config.s.qos_mask = 0x55;
334 break;
335 case 5:
336 config.s.qos_mask = 0x57;
337 break;
338 case 6:
339 config.s.qos_mask = 0x77;
340 break;
341 case 7:
342 config.s.qos_mask = 0x7f;
343 break;
344 case 8:
345 config.s.qos_mask = 0xff;
346 break;
347 case CVMX_PKO_QUEUE_STATIC_PRIORITY:
348 /* Pass 1 will fall through to the error case */
349 if (!cvmx_octeon_is_pass1()) {
350 config.s.qos_mask = 0xff;
351 break;
352 }
353 default:
354 cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid "
355 "priority %llu\n",
356 (unsigned long long)priority[queue]);
357 config.s.qos_mask = 0xff;
358 result_code = CVMX_PKO_INVALID_PRIORITY;
359 break;
360 }
361
362 if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
363 cvmx_cmd_queue_result_t cmd_res =
364 cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO
365 (base_queue + queue),
366 CVMX_PKO_MAX_QUEUE_DEPTH,
367 CVMX_FPA_OUTPUT_BUFFER_POOL,
368 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
369 -
370 CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST
371 * 8);
372 if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
373 switch (cmd_res) {
374 case CVMX_CMD_QUEUE_NO_MEMORY:
375 cvmx_dprintf("ERROR: "
376 "cvmx_pko_config_port: "
377 "Unable to allocate "
378 "output buffer.\n");
379 return CVMX_PKO_NO_MEMORY;
380 case CVMX_CMD_QUEUE_ALREADY_SETUP:
381 cvmx_dprintf
382 ("ERROR: cvmx_pko_config_port: Port already setup.\n");
383 return CVMX_PKO_PORT_ALREADY_SETUP;
384 case CVMX_CMD_QUEUE_INVALID_PARAM:
385 default:
386 cvmx_dprintf
387 ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n");
388 return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
389 }
390 }
391
392 buf_ptr =
393 (uint64_t *)
394 cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO
395 (base_queue + queue));
396 config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
397 } else
398 config.s.buf_ptr = 0;
399
400 CVMX_SYNCWS;
401
402 if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
403 cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
404 cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
405 }
406
407 return result_code;
408}
409
410#ifdef PKO_DEBUG
411/**
412 * Show map of ports -> queues for different cores.
413 */
414void cvmx_pko_show_queue_map()
415{
416 int core, port;
417 int pko_output_ports = 36;
418
419 cvmx_dprintf("port");
420 for (port = 0; port < pko_output_ports; port++)
421 cvmx_dprintf("%3d ", port);
422 cvmx_dprintf("\n");
423
424 for (core = 0; core < CVMX_MAX_CORES; core++) {
425 cvmx_dprintf("\n%2d: ", core);
426 for (port = 0; port < pko_output_ports; port++) {
427 cvmx_dprintf("%3d ",
428 cvmx_pko_get_base_queue_per_core(port,
429 core));
430 }
431 }
432 cvmx_dprintf("\n");
433}
434#endif
435
436/**
437 * Rate limit a PKO port to a max packets/sec. This function is only
438 * supported on CN51XX and higher, excluding CN58XX.
439 *
440 * @port: Port to rate limit
441 * @packets_s: Maximum packet/sec
442 * @burst: Maximum number of packets to burst in a row before rate
443 * limiting cuts in.
444 *
445 * Returns Zero on success, negative on failure
446 */
447int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst)
448{
449 union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
450 union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
451
452 pko_mem_port_rate0.u64 = 0;
453 pko_mem_port_rate0.s.pid = port;
454 pko_mem_port_rate0.s.rate_pkt =
455 cvmx_sysinfo_get()->cpu_clock_hz / packets_s / 16;
456 /* No cost per word since we are limited by packets/sec, not bits/sec */
457 pko_mem_port_rate0.s.rate_word = 0;
458
459 pko_mem_port_rate1.u64 = 0;
460 pko_mem_port_rate1.s.pid = port;
461 pko_mem_port_rate1.s.rate_lim =
462 ((uint64_t) pko_mem_port_rate0.s.rate_pkt * burst) >> 8;
463
464 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
465 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
466 return 0;
467}
468
469/**
470 * Rate limit a PKO port to a max bits/sec. This function is only
471 * supported on CN51XX and higher, excluding CN58XX.
472 *
473 * @port: Port to rate limit
474 * @bits_s: PKO rate limit in bits/sec
475 * @burst: Maximum number of bits to burst before rate
476 * limiting cuts in.
477 *
478 * Returns Zero on success, negative on failure
479 */
480int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst)
481{
482 union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
483 union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
484 uint64_t clock_rate = cvmx_sysinfo_get()->cpu_clock_hz;
485 uint64_t tokens_per_bit = clock_rate * 16 / bits_s;
486
487 pko_mem_port_rate0.u64 = 0;
488 pko_mem_port_rate0.s.pid = port;
489 /*
490 * Each packet has a 12 bytes of interframe gap, an 8 byte
491 * preamble, and a 4 byte CRC. These are not included in the
492 * per word count. Multiply by 8 to covert to bits and divide
493 * by 256 for limit granularity.
494 */
495 pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256;
496 /* Each 8 byte word has 64bits */
497 pko_mem_port_rate0.s.rate_word = 64 * tokens_per_bit;
498
499 pko_mem_port_rate1.u64 = 0;
500 pko_mem_port_rate1.s.pid = port;
501 pko_mem_port_rate1.s.rate_lim = tokens_per_bit * burst / 256;
502
503 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
504 cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
505 return 0;
506}
diff --git a/drivers/staging/octeon/cvmx-pko.h b/drivers/staging/octeon/cvmx-pko.h
new file mode 100644
index 000000000000..f068c1982497
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pko.h
@@ -0,0 +1,610 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 *
30 * Interface to the hardware Packet Output unit.
31 *
32 * Starting with SDK 1.7.0, the PKO output functions now support
33 * two types of locking. CVMX_PKO_LOCK_ATOMIC_TAG continues to
34 * function similarly to previous SDKs by using POW atomic tags
35 * to preserve ordering and exclusivity. As a new option, you
36 * can now pass CVMX_PKO_LOCK_CMD_QUEUE which uses a ll/sc
37 * memory based locking instead. This locking has the advantage
38 * of not affecting the tag state but doesn't preserve packet
39 * ordering. CVMX_PKO_LOCK_CMD_QUEUE is appropriate in most
40 * generic code while CVMX_PKO_LOCK_CMD_QUEUE should be used
41 * with hand tuned fast path code.
42 *
43 * Some of other SDK differences visible to the command command
44 * queuing:
45 * - PKO indexes are no longer stored in the FAU. A large
46 * percentage of the FAU register block used to be tied up
47 * maintaining PKO queue pointers. These are now stored in a
48 * global named block.
49 * - The PKO <b>use_locking</b> parameter can now have a global
50 * effect. Since all application use the same named block,
51 * queue locking correctly applies across all operating
52 * systems when using CVMX_PKO_LOCK_CMD_QUEUE.
53 * - PKO 3 word commands are now supported. Use
54 * cvmx_pko_send_packet_finish3().
55 *
56 */
57
58#ifndef __CVMX_PKO_H__
59#define __CVMX_PKO_H__
60
61#include "cvmx-fpa.h"
62#include "cvmx-pow.h"
63#include "cvmx-cmd-queue.h"
64#include "cvmx-pko-defs.h"
65
66/* Adjust the command buffer size by 1 word so that in the case of using only
67 * two word PKO commands no command words stradle buffers. The useful values
68 * for this are 0 and 1. */
69#define CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST (1)
70
71#define CVMX_PKO_MAX_OUTPUT_QUEUES_STATIC 256
72#define CVMX_PKO_MAX_OUTPUT_QUEUES ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \
73 OCTEON_IS_MODEL(OCTEON_CN3010) || OCTEON_IS_MODEL(OCTEON_CN3005) || \
74 OCTEON_IS_MODEL(OCTEON_CN50XX)) ? 32 : \
75 (OCTEON_IS_MODEL(OCTEON_CN58XX) || \
76 OCTEON_IS_MODEL(OCTEON_CN56XX)) ? 256 : 128)
77#define CVMX_PKO_NUM_OUTPUT_PORTS 40
78/* use this for queues that are not used */
79#define CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID 63
80#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9
81#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF
82#define CVMX_PKO_MAX_QUEUE_DEPTH 0
83
84typedef enum {
85 CVMX_PKO_SUCCESS,
86 CVMX_PKO_INVALID_PORT,
87 CVMX_PKO_INVALID_QUEUE,
88 CVMX_PKO_INVALID_PRIORITY,
89 CVMX_PKO_NO_MEMORY,
90 CVMX_PKO_PORT_ALREADY_SETUP,
91 CVMX_PKO_CMD_QUEUE_INIT_ERROR
92} cvmx_pko_status_t;
93
94/**
95 * This enumeration represents the differnet locking modes supported by PKO.
96 */
97typedef enum {
98 /*
99 * PKO doesn't do any locking. It is the responsibility of the
100 * application to make sure that no other core is accessing
101 * the same queue at the smae time
102 */
103 CVMX_PKO_LOCK_NONE = 0,
104 /*
105 * PKO performs an atomic tagswitch to insure exclusive access
106 * to the output queue. This will maintain packet ordering on
107 * output.
108 */
109 CVMX_PKO_LOCK_ATOMIC_TAG = 1,
110 /*
111 * PKO uses the common command queue locks to insure exclusive
112 * access to the output queue. This is a memory based
113 * ll/sc. This is the most portable locking mechanism.
114 */
115 CVMX_PKO_LOCK_CMD_QUEUE = 2,
116} cvmx_pko_lock_t;
117
118typedef struct {
119 uint32_t packets;
120 uint64_t octets;
121 uint64_t doorbell;
122} cvmx_pko_port_status_t;
123
124/**
125 * This structure defines the address to use on a packet enqueue
126 */
127typedef union {
128 uint64_t u64;
129 struct {
130 /* Must CVMX_IO_SEG */
131 uint64_t mem_space:2;
132 /* Must be zero */
133 uint64_t reserved:13;
134 /* Must be one */
135 uint64_t is_io:1;
136 /* The ID of the device on the non-coherent bus */
137 uint64_t did:8;
138 /* Must be zero */
139 uint64_t reserved2:4;
140 /* Must be zero */
141 uint64_t reserved3:18;
142 /*
143 * The hardware likes to have the output port in
144 * addition to the output queue,
145 */
146 uint64_t port:6;
147 /*
148 * The output queue to send the packet to (0-127 are
149 * legal)
150 */
151 uint64_t queue:9;
152 /* Must be zero */
153 uint64_t reserved4:3;
154 } s;
155} cvmx_pko_doorbell_address_t;
156
157/**
158 * Structure of the first packet output command word.
159 */
160typedef union {
161 uint64_t u64;
162 struct {
163 /*
164 * The size of the reg1 operation - could be 8, 16,
165 * 32, or 64 bits.
166 */
167 uint64_t size1:2;
168 /*
169 * The size of the reg0 operation - could be 8, 16,
170 * 32, or 64 bits.
171 */
172 uint64_t size0:2;
173 /*
174 * If set, subtract 1, if clear, subtract packet
175 * size.
176 */
177 uint64_t subone1:1;
178 /*
179 * The register, subtract will be done if reg1 is
180 * non-zero.
181 */
182 uint64_t reg1:11;
183 /* If set, subtract 1, if clear, subtract packet size */
184 uint64_t subone0:1;
185 /* The register, subtract will be done if reg0 is non-zero */
186 uint64_t reg0:11;
187 /*
188 * When set, interpret segment pointer and segment
189 * bytes in little endian order.
190 */
191 uint64_t le:1;
192 /*
193 * When set, packet data not allocated in L2 cache by
194 * PKO.
195 */
196 uint64_t n2:1;
197 /*
198 * If set and rsp is set, word3 contains a pointer to
199 * a work queue entry.
200 */
201 uint64_t wqp:1;
202 /* If set, the hardware will send a response when done */
203 uint64_t rsp:1;
204 /*
205 * If set, the supplied pkt_ptr is really a pointer to
206 * a list of pkt_ptr's.
207 */
208 uint64_t gather:1;
209 /*
210 * If ipoffp1 is non zero, (ipoffp1-1) is the number
211 * of bytes to IP header, and the hardware will
212 * calculate and insert the UDP/TCP checksum.
213 */
214 uint64_t ipoffp1:7;
215 /*
216 * If set, ignore the I bit (force to zero) from all
217 * pointer structures.
218 */
219 uint64_t ignore_i:1;
220 /*
221 * If clear, the hardware will attempt to free the
222 * buffers containing the packet.
223 */
224 uint64_t dontfree:1;
225 /*
226 * The total number of segs in the packet, if gather
227 * set, also gather list length.
228 */
229 uint64_t segs:6;
230 /* Including L2, but no trailing CRC */
231 uint64_t total_bytes:16;
232 } s;
233} cvmx_pko_command_word0_t;
234
235/* CSR typedefs have been moved to cvmx-csr-*.h */
236
237/**
238 * Definition of internal state for Packet output processing
239 */
240typedef struct {
241 /* ptr to start of buffer, offset kept in FAU reg */
242 uint64_t *start_ptr;
243} cvmx_pko_state_elem_t;
244
245/**
246 * Call before any other calls to initialize the packet
247 * output system.
248 */
249extern void cvmx_pko_initialize_global(void);
250extern int cvmx_pko_initialize_local(void);
251
252/**
253 * Enables the packet output hardware. It must already be
254 * configured.
255 */
256extern void cvmx_pko_enable(void);
257
258/**
259 * Disables the packet output. Does not affect any configuration.
260 */
261extern void cvmx_pko_disable(void);
262
263/**
264 * Shutdown and free resources required by packet output.
265 */
266
267extern void cvmx_pko_shutdown(void);
268
269/**
270 * Configure a output port and the associated queues for use.
271 *
272 * @port: Port to configure.
273 * @base_queue: First queue number to associate with this port.
274 * @num_queues: Number of queues t oassociate with this port
275 * @priority: Array of priority levels for each queue. Values are
276 * allowed to be 1-8. A value of 8 get 8 times the traffic
277 * of a value of 1. There must be num_queues elements in the
278 * array.
279 */
280extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port,
281 uint64_t base_queue,
282 uint64_t num_queues,
283 const uint64_t priority[]);
284
285/**
286 * Ring the packet output doorbell. This tells the packet
287 * output hardware that "len" command words have been added
288 * to its pending list. This command includes the required
289 * CVMX_SYNCWS before the doorbell ring.
290 *
291 * @port: Port the packet is for
292 * @queue: Queue the packet is for
293 * @len: Length of the command in 64 bit words
294 */
295static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue,
296 uint64_t len)
297{
298 cvmx_pko_doorbell_address_t ptr;
299
300 ptr.u64 = 0;
301 ptr.s.mem_space = CVMX_IO_SEG;
302 ptr.s.did = CVMX_OCT_DID_PKT_SEND;
303 ptr.s.is_io = 1;
304 ptr.s.port = port;
305 ptr.s.queue = queue;
306 /*
307 * Need to make sure output queue data is in DRAM before
308 * doorbell write.
309 */
310 CVMX_SYNCWS;
311 cvmx_write_io(ptr.u64, len);
312}
313
314/**
315 * Prepare to send a packet. This may initiate a tag switch to
316 * get exclusive access to the output queue structure, and
317 * performs other prep work for the packet send operation.
318 *
319 * cvmx_pko_send_packet_finish() MUST be called after this function is called,
320 * and must be called with the same port/queue/use_locking arguments.
321 *
322 * The use_locking parameter allows the caller to use three
323 * possible locking modes.
324 * - CVMX_PKO_LOCK_NONE
325 * - PKO doesn't do any locking. It is the responsibility
326 * of the application to make sure that no other core
327 * is accessing the same queue at the smae time.
328 * - CVMX_PKO_LOCK_ATOMIC_TAG
329 * - PKO performs an atomic tagswitch to insure exclusive
330 * access to the output queue. This will maintain
331 * packet ordering on output.
332 * - CVMX_PKO_LOCK_CMD_QUEUE
333 * - PKO uses the common command queue locks to insure
334 * exclusive access to the output queue. This is a
335 * memory based ll/sc. This is the most portable
336 * locking mechanism.
337 *
338 * NOTE: If atomic locking is used, the POW entry CANNOT be
339 * descheduled, as it does not contain a valid WQE pointer.
340 *
341 * @port: Port to send it on
342 * @queue: Queue to use
343 * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
344 * CVMX_PKO_LOCK_CMD_QUEUE
345 */
346
347static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
348 cvmx_pko_lock_t use_locking)
349{
350 if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) {
351 /*
352 * Must do a full switch here to handle all cases. We
353 * use a fake WQE pointer, as the POW does not access
354 * this memory. The WQE pointer and group are only
355 * used if this work is descheduled, which is not
356 * supported by the
357 * cvmx_pko_send_packet_prepare/cvmx_pko_send_packet_finish
358 * combination. Note that this is a special case in
359 * which these fake values can be used - this is not a
360 * general technique.
361 */
362 uint32_t tag =
363 CVMX_TAG_SW_BITS_INTERNAL << CVMX_TAG_SW_SHIFT |
364 CVMX_TAG_SUBGROUP_PKO << CVMX_TAG_SUBGROUP_SHIFT |
365 (CVMX_TAG_SUBGROUP_MASK & queue);
366 cvmx_pow_tag_sw_full((cvmx_wqe_t *) cvmx_phys_to_ptr(0x80), tag,
367 CVMX_POW_TAG_TYPE_ATOMIC, 0);
368 }
369}
370
371/**
372 * Complete packet output. cvmx_pko_send_packet_prepare() must be
373 * called exactly once before this, and the same parameters must be
374 * passed to both cvmx_pko_send_packet_prepare() and
375 * cvmx_pko_send_packet_finish().
376 *
377 * @port: Port to send it on
378 * @queue: Queue to use
379 * @pko_command:
380 * PKO HW command word
381 * @packet: Packet to send
382 * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
383 * CVMX_PKO_LOCK_CMD_QUEUE
384 *
385 * Returns returns CVMX_PKO_SUCCESS on success, or error code on
386 * failure of output
387 */
388static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(
389 uint64_t port,
390 uint64_t queue,
391 cvmx_pko_command_word0_t pko_command,
392 union cvmx_buf_ptr packet,
393 cvmx_pko_lock_t use_locking)
394{
395 cvmx_cmd_queue_result_t result;
396 if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
397 cvmx_pow_tag_sw_wait();
398 result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
399 (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
400 pko_command.u64, packet.u64);
401 if (likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
402 cvmx_pko_doorbell(port, queue, 2);
403 return CVMX_PKO_SUCCESS;
404 } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY)
405 || (result == CVMX_CMD_QUEUE_FULL)) {
406 return CVMX_PKO_NO_MEMORY;
407 } else {
408 return CVMX_PKO_INVALID_QUEUE;
409 }
410}
411
412/**
413 * Complete packet output. cvmx_pko_send_packet_prepare() must be
414 * called exactly once before this, and the same parameters must be
415 * passed to both cvmx_pko_send_packet_prepare() and
416 * cvmx_pko_send_packet_finish().
417 *
418 * @port: Port to send it on
419 * @queue: Queue to use
420 * @pko_command:
421 * PKO HW command word
422 * @packet: Packet to send
423 * @addr: Plysical address of a work queue entry or physical address
424 * to zero on complete.
425 * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
426 * CVMX_PKO_LOCK_CMD_QUEUE
427 *
428 * Returns returns CVMX_PKO_SUCCESS on success, or error code on
429 * failure of output
430 */
431static inline cvmx_pko_status_t cvmx_pko_send_packet_finish3(
432 uint64_t port,
433 uint64_t queue,
434 cvmx_pko_command_word0_t pko_command,
435 union cvmx_buf_ptr packet,
436 uint64_t addr,
437 cvmx_pko_lock_t use_locking)
438{
439 cvmx_cmd_queue_result_t result;
440 if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
441 cvmx_pow_tag_sw_wait();
442 result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
443 (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
444 pko_command.u64, packet.u64, addr);
445 if (likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
446 cvmx_pko_doorbell(port, queue, 3);
447 return CVMX_PKO_SUCCESS;
448 } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY)
449 || (result == CVMX_CMD_QUEUE_FULL)) {
450 return CVMX_PKO_NO_MEMORY;
451 } else {
452 return CVMX_PKO_INVALID_QUEUE;
453 }
454}
455
456/**
457 * Return the pko output queue associated with a port and a specific core.
458 * In normal mode (PKO lockless operation is disabled), the value returned
459 * is the base queue.
460 *
461 * @port: Port number
462 * @core: Core to get queue for
463 *
464 * Returns Core-specific output queue
465 */
466static inline int cvmx_pko_get_base_queue_per_core(int port, int core)
467{
468#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
469#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0 16
470#endif
471#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
472#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1 16
473#endif
474
475 if (port < CVMX_PKO_MAX_PORTS_INTERFACE0)
476 return port * CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + core;
477 else if (port >= 16 && port < 16 + CVMX_PKO_MAX_PORTS_INTERFACE1)
478 return CVMX_PKO_MAX_PORTS_INTERFACE0 *
479 CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + (port -
480 16) *
481 CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + core;
482 else if ((port >= 32) && (port < 36))
483 return CVMX_PKO_MAX_PORTS_INTERFACE0 *
484 CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
485 CVMX_PKO_MAX_PORTS_INTERFACE1 *
486 CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + (port -
487 32) *
488 CVMX_PKO_QUEUES_PER_PORT_PCI;
489 else if ((port >= 36) && (port < 40))
490 return CVMX_PKO_MAX_PORTS_INTERFACE0 *
491 CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
492 CVMX_PKO_MAX_PORTS_INTERFACE1 *
493 CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 +
494 4 * CVMX_PKO_QUEUES_PER_PORT_PCI + (port -
495 36) *
496 CVMX_PKO_QUEUES_PER_PORT_LOOP;
497 else
498 /* Given the limit on the number of ports we can map to
499 * CVMX_MAX_OUTPUT_QUEUES_STATIC queues (currently 256,
500 * divided among all cores), the remaining unmapped ports
501 * are assigned an illegal queue number */
502 return CVMX_PKO_ILLEGAL_QUEUE;
503}
504
505/**
506 * For a given port number, return the base pko output queue
507 * for the port.
508 *
509 * @port: Port number
510 * Returns Base output queue
511 */
512static inline int cvmx_pko_get_base_queue(int port)
513{
514 return cvmx_pko_get_base_queue_per_core(port, 0);
515}
516
517/**
518 * For a given port number, return the number of pko output queues.
519 *
520 * @port: Port number
521 * Returns Number of output queues
522 */
523static inline int cvmx_pko_get_num_queues(int port)
524{
525 if (port < 16)
526 return CVMX_PKO_QUEUES_PER_PORT_INTERFACE0;
527 else if (port < 32)
528 return CVMX_PKO_QUEUES_PER_PORT_INTERFACE1;
529 else if (port < 36)
530 return CVMX_PKO_QUEUES_PER_PORT_PCI;
531 else if (port < 40)
532 return CVMX_PKO_QUEUES_PER_PORT_LOOP;
533 else
534 return 0;
535}
536
537/**
538 * Get the status counters for a port.
539 *
540 * @port_num: Port number to get statistics for.
541 * @clear: Set to 1 to clear the counters after they are read
542 * @status: Where to put the results.
543 */
544static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
545 cvmx_pko_port_status_t *status)
546{
547 union cvmx_pko_reg_read_idx pko_reg_read_idx;
548 union cvmx_pko_mem_count0 pko_mem_count0;
549 union cvmx_pko_mem_count1 pko_mem_count1;
550
551 pko_reg_read_idx.u64 = 0;
552 pko_reg_read_idx.s.index = port_num;
553 cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
554
555 pko_mem_count0.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT0);
556 status->packets = pko_mem_count0.s.count;
557 if (clear) {
558 pko_mem_count0.s.count = port_num;
559 cvmx_write_csr(CVMX_PKO_MEM_COUNT0, pko_mem_count0.u64);
560 }
561
562 pko_mem_count1.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT1);
563 status->octets = pko_mem_count1.s.count;
564 if (clear) {
565 pko_mem_count1.s.count = port_num;
566 cvmx_write_csr(CVMX_PKO_MEM_COUNT1, pko_mem_count1.u64);
567 }
568
569 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
570 union cvmx_pko_mem_debug9 debug9;
571 pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num);
572 cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
573 debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
574 status->doorbell = debug9.cn38xx.doorbell;
575 } else {
576 union cvmx_pko_mem_debug8 debug8;
577 pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num);
578 cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
579 debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
580 status->doorbell = debug8.cn58xx.doorbell;
581 }
582}
583
584/**
585 * Rate limit a PKO port to a max packets/sec. This function is only
586 * supported on CN57XX, CN56XX, CN55XX, and CN54XX.
587 *
588 * @port: Port to rate limit
589 * @packets_s: Maximum packet/sec
590 * @burst: Maximum number of packets to burst in a row before rate
591 * limiting cuts in.
592 *
593 * Returns Zero on success, negative on failure
594 */
595extern int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst);
596
597/**
598 * Rate limit a PKO port to a max bits/sec. This function is only
599 * supported on CN57XX, CN56XX, CN55XX, and CN54XX.
600 *
601 * @port: Port to rate limit
602 * @bits_s: PKO rate limit in bits/sec
603 * @burst: Maximum number of bits to burst before rate
604 * limiting cuts in.
605 *
606 * Returns Zero on success, negative on failure
607 */
608extern int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst);
609
610#endif /* __CVMX_PKO_H__ */
diff --git a/drivers/staging/octeon/cvmx-pow.h b/drivers/staging/octeon/cvmx-pow.h
new file mode 100644
index 000000000000..c5d66f272b0d
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-pow.h
@@ -0,0 +1,1982 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 * Interface to the hardware Packet Order / Work unit.
30 *
31 * New, starting with SDK 1.7.0, cvmx-pow supports a number of
32 * extended consistency checks. The define
33 * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW
34 * internal state checks to find common programming errors. If
35 * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default
36 * enabled. For example, cvmx-pow will check for the following
37 * program errors or POW state inconsistency.
38 * - Requesting a POW operation with an active tag switch in
39 * progress.
40 * - Waiting for a tag switch to complete for an excessively
41 * long period. This is normally a sign of an error in locking
42 * causing deadlock.
43 * - Illegal tag switches from NULL_NULL.
44 * - Illegal tag switches from NULL.
45 * - Illegal deschedule request.
46 * - WQE pointer not matching the one attached to the core by
47 * the POW.
48 *
49 */
50
51#ifndef __CVMX_POW_H__
52#define __CVMX_POW_H__
53
54#include <asm/octeon/cvmx-pow-defs.h>
55
56#include "cvmx-scratch.h"
57#include "cvmx-wqe.h"
58
59/* Default to having all POW constancy checks turned on */
60#ifndef CVMX_ENABLE_POW_CHECKS
61#define CVMX_ENABLE_POW_CHECKS 1
62#endif
63
64enum cvmx_pow_tag_type {
65 /* Tag ordering is maintained */
66 CVMX_POW_TAG_TYPE_ORDERED = 0L,
67 /* Tag ordering is maintained, and at most one PP has the tag */
68 CVMX_POW_TAG_TYPE_ATOMIC = 1L,
69 /*
70 * The work queue entry from the order - NEVER tag switch from
71 * NULL to NULL
72 */
73 CVMX_POW_TAG_TYPE_NULL = 2L,
74 /* A tag switch to NULL, and there is no space reserved in POW
75 * - NEVER tag switch to NULL_NULL
76 * - NEVER tag switch from NULL_NULL
77 * - NULL_NULL is entered at the beginning of time and on a deschedule.
78 * - NULL_NULL can be exited by a new work request. A NULL_SWITCH
79 * load can also switch the state to NULL
80 */
81 CVMX_POW_TAG_TYPE_NULL_NULL = 3L
82};
83
84/**
85 * Wait flag values for pow functions.
86 */
87typedef enum {
88 CVMX_POW_WAIT = 1,
89 CVMX_POW_NO_WAIT = 0,
90} cvmx_pow_wait_t;
91
92/**
93 * POW tag operations. These are used in the data stored to the POW.
94 */
95typedef enum {
96 /*
97 * switch the tag (only) for this PP
98 * - the previous tag should be non-NULL in this case
99 * - tag switch response required
100 * - fields used: op, type, tag
101 */
102 CVMX_POW_TAG_OP_SWTAG = 0L,
103 /*
104 * switch the tag for this PP, with full information
105 * - this should be used when the previous tag is NULL
106 * - tag switch response required
107 * - fields used: address, op, grp, type, tag
108 */
109 CVMX_POW_TAG_OP_SWTAG_FULL = 1L,
110 /*
111 * switch the tag (and/or group) for this PP and de-schedule
112 * - OK to keep the tag the same and only change the group
113 * - fields used: op, no_sched, grp, type, tag
114 */
115 CVMX_POW_TAG_OP_SWTAG_DESCH = 2L,
116 /*
117 * just de-schedule
118 * - fields used: op, no_sched
119 */
120 CVMX_POW_TAG_OP_DESCH = 3L,
121 /*
122 * create an entirely new work queue entry
123 * - fields used: address, op, qos, grp, type, tag
124 */
125 CVMX_POW_TAG_OP_ADDWQ = 4L,
126 /*
127 * just update the work queue pointer and grp for this PP
128 * - fields used: address, op, grp
129 */
130 CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,
131 /*
132 * set the no_sched bit on the de-schedule list
133 *
134 * - does nothing if the selected entry is not on the
135 * de-schedule list
136 *
137 * - does nothing if the stored work queue pointer does not
138 * match the address field
139 *
140 * - fields used: address, index, op
141 *
142 * Before issuing a *_NSCHED operation, SW must guarantee
143 * that all prior deschedules and set/clr NSCHED operations
144 * are complete and all prior switches are complete. The
145 * hardware provides the opsdone bit and swdone bit for SW
146 * polling. After issuing a *_NSCHED operation, SW must
147 * guarantee that the set/clr NSCHED is complete before any
148 * subsequent operations.
149 */
150 CVMX_POW_TAG_OP_SET_NSCHED = 6L,
151 /*
152 * clears the no_sched bit on the de-schedule list
153 *
154 * - does nothing if the selected entry is not on the
155 * de-schedule list
156 *
157 * - does nothing if the stored work queue pointer does not
158 * match the address field
159 *
160 * - fields used: address, index, op
161 *
162 * Before issuing a *_NSCHED operation, SW must guarantee that
163 * all prior deschedules and set/clr NSCHED operations are
164 * complete and all prior switches are complete. The hardware
165 * provides the opsdone bit and swdone bit for SW
166 * polling. After issuing a *_NSCHED operation, SW must
167 * guarantee that the set/clr NSCHED is complete before any
168 * subsequent operations.
169 */
170 CVMX_POW_TAG_OP_CLR_NSCHED = 7L,
171 /* do nothing */
172 CVMX_POW_TAG_OP_NOP = 15L
173} cvmx_pow_tag_op_t;
174
175/**
176 * This structure defines the store data on a store to POW
177 */
178typedef union {
179 uint64_t u64;
180 struct {
181 /*
182 * Don't reschedule this entry. no_sched is used for
183 * CVMX_POW_TAG_OP_SWTAG_DESCH and
184 * CVMX_POW_TAG_OP_DESCH
185 */
186 uint64_t no_sched:1;
187 uint64_t unused:2;
188 /* Tontains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
189 uint64_t index:13;
190 /* The operation to perform */
191 cvmx_pow_tag_op_t op:4;
192 uint64_t unused2:2;
193 /*
194 * The QOS level for the packet. qos is only used for
195 * CVMX_POW_TAG_OP_ADDWQ
196 */
197 uint64_t qos:3;
198 /*
199 * The group that the work queue entry will be
200 * scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ,
201 * CVMX_POW_TAG_OP_SWTAG_FULL,
202 * CVMX_POW_TAG_OP_SWTAG_DESCH, and
203 * CVMX_POW_TAG_OP_UPDATE_WQP_GRP
204 */
205 uint64_t grp:4;
206 /*
207 * The type of the tag. type is used for everything
208 * except CVMX_POW_TAG_OP_DESCH,
209 * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
210 * CVMX_POW_TAG_OP_*_NSCHED
211 */
212 uint64_t type:3;
213 /*
214 * The actual tag. tag is used for everything except
215 * CVMX_POW_TAG_OP_DESCH,
216 * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
217 * CVMX_POW_TAG_OP_*_NSCHED
218 */
219 uint64_t tag:32;
220 } s;
221} cvmx_pow_tag_req_t;
222
223/**
224 * This structure describes the address to load stuff from POW
225 */
226typedef union {
227 uint64_t u64;
228
229 /**
230 * Address for new work request loads (did<2:0> == 0)
231 */
232 struct {
233 /* Mips64 address region. Should be CVMX_IO_SEG */
234 uint64_t mem_region:2;
235 /* Must be zero */
236 uint64_t reserved_49_61:13;
237 /* Must be one */
238 uint64_t is_io:1;
239 /* the ID of POW -- did<2:0> == 0 in this case */
240 uint64_t did:8;
241 /* Must be zero */
242 uint64_t reserved_4_39:36;
243 /*
244 * If set, don't return load response until work is
245 * available.
246 */
247 uint64_t wait:1;
248 /* Must be zero */
249 uint64_t reserved_0_2:3;
250 } swork;
251
252 /**
253 * Address for loads to get POW internal status
254 */
255 struct {
256 /* Mips64 address region. Should be CVMX_IO_SEG */
257 uint64_t mem_region:2;
258 /* Must be zero */
259 uint64_t reserved_49_61:13;
260 /* Must be one */
261 uint64_t is_io:1;
262 /* the ID of POW -- did<2:0> == 1 in this case */
263 uint64_t did:8;
264 /* Must be zero */
265 uint64_t reserved_10_39:30;
266 /* The core id to get status for */
267 uint64_t coreid:4;
268 /*
269 * If set and get_cur is set, return reverse tag-list
270 * pointer rather than forward tag-list pointer.
271 */
272 uint64_t get_rev:1;
273 /*
274 * If set, return current status rather than pending
275 * status.
276 */
277 uint64_t get_cur:1;
278 /*
279 * If set, get the work-queue pointer rather than
280 * tag/type.
281 */
282 uint64_t get_wqp:1;
283 /* Must be zero */
284 uint64_t reserved_0_2:3;
285 } sstatus;
286
287 /**
288 * Address for memory loads to get POW internal state
289 */
290 struct {
291 /* Mips64 address region. Should be CVMX_IO_SEG */
292 uint64_t mem_region:2;
293 /* Must be zero */
294 uint64_t reserved_49_61:13;
295 /* Must be one */
296 uint64_t is_io:1;
297 /* the ID of POW -- did<2:0> == 2 in this case */
298 uint64_t did:8;
299 /* Must be zero */
300 uint64_t reserved_16_39:24;
301 /* POW memory index */
302 uint64_t index:11;
303 /*
304 * If set, return deschedule information rather than
305 * the standard response for work-queue index (invalid
306 * if the work-queue entry is not on the deschedule
307 * list).
308 */
309 uint64_t get_des:1;
310 /*
311 * If set, get the work-queue pointer rather than
312 * tag/type (no effect when get_des set).
313 */
314 uint64_t get_wqp:1;
315 /* Must be zero */
316 uint64_t reserved_0_2:3;
317 } smemload;
318
319 /**
320 * Address for index/pointer loads
321 */
322 struct {
323 /* Mips64 address region. Should be CVMX_IO_SEG */
324 uint64_t mem_region:2;
325 /* Must be zero */
326 uint64_t reserved_49_61:13;
327 /* Must be one */
328 uint64_t is_io:1;
329 /* the ID of POW -- did<2:0> == 3 in this case */
330 uint64_t did:8;
331 /* Must be zero */
332 uint64_t reserved_9_39:31;
333 /*
334 * when {get_rmt ==0 AND get_des_get_tail == 0}, this
335 * field selects one of eight POW internal-input
336 * queues (0-7), one per QOS level; values 8-15 are
337 * illegal in this case; when {get_rmt ==0 AND
338 * get_des_get_tail == 1}, this field selects one of
339 * 16 deschedule lists (per group); when get_rmt ==1,
340 * this field selects one of 16 memory-input queue
341 * lists. The two memory-input queue lists associated
342 * with each QOS level are:
343 *
344 * - qosgrp = 0, qosgrp = 8: QOS0
345 * - qosgrp = 1, qosgrp = 9: QOS1
346 * - qosgrp = 2, qosgrp = 10: QOS2
347 * - qosgrp = 3, qosgrp = 11: QOS3
348 * - qosgrp = 4, qosgrp = 12: QOS4
349 * - qosgrp = 5, qosgrp = 13: QOS5
350 * - qosgrp = 6, qosgrp = 14: QOS6
351 * - qosgrp = 7, qosgrp = 15: QOS7
352 */
353 uint64_t qosgrp:4;
354 /*
355 * If set and get_rmt is clear, return deschedule list
356 * indexes rather than indexes for the specified qos
357 * level; if set and get_rmt is set, return the tail
358 * pointer rather than the head pointer for the
359 * specified qos level.
360 */
361 uint64_t get_des_get_tail:1;
362 /*
363 * If set, return remote pointers rather than the
364 * local indexes for the specified qos level.
365 */
366 uint64_t get_rmt:1;
367 /* Must be zero */
368 uint64_t reserved_0_2:3;
369 } sindexload;
370
371 /**
372 * address for NULL_RD request (did<2:0> == 4) when this is read,
373 * HW attempts to change the state to NULL if it is NULL_NULL (the
374 * hardware cannot switch from NULL_NULL to NULL if a POW entry is
375 * not available - software may need to recover by finishing
376 * another piece of work before a POW entry can ever become
377 * available.)
378 */
379 struct {
380 /* Mips64 address region. Should be CVMX_IO_SEG */
381 uint64_t mem_region:2;
382 /* Must be zero */
383 uint64_t reserved_49_61:13;
384 /* Must be one */
385 uint64_t is_io:1;
386 /* the ID of POW -- did<2:0> == 4 in this case */
387 uint64_t did:8;
388 /* Must be zero */
389 uint64_t reserved_0_39:40;
390 } snull_rd;
391} cvmx_pow_load_addr_t;
392
393/**
394 * This structure defines the response to a load/SENDSINGLE to POW
395 * (except CSR reads)
396 */
397typedef union {
398 uint64_t u64;
399
400 /**
401 * Response to new work request loads
402 */
403 struct {
404 /*
405 * Set when no new work queue entry was returned. *
406 * If there was de-scheduled work, the HW will
407 * definitely return it. When this bit is set, it
408 * could mean either mean:
409 *
410 * - There was no work, or
411 *
412 * - There was no work that the HW could find. This
413 * case can happen, regardless of the wait bit value
414 * in the original request, when there is work in
415 * the IQ's that is too deep down the list.
416 */
417 uint64_t no_work:1;
418 /* Must be zero */
419 uint64_t reserved_40_62:23;
420 /* 36 in O1 -- the work queue pointer */
421 uint64_t addr:40;
422 } s_work;
423
424 /**
425 * Result for a POW Status Load (when get_cur==0 and get_wqp==0)
426 */
427 struct {
428 uint64_t reserved_62_63:2;
429 /* Set when there is a pending non-NULL SWTAG or
430 * SWTAG_FULL, and the POW entry has not left the list
431 * for the original tag. */
432 uint64_t pend_switch:1;
433 /* Set when SWTAG_FULL and pend_switch is set. */
434 uint64_t pend_switch_full:1;
435 /*
436 * Set when there is a pending NULL SWTAG, or an
437 * implicit switch to NULL.
438 */
439 uint64_t pend_switch_null:1;
440 /* Set when there is a pending DESCHED or SWTAG_DESCHED. */
441 uint64_t pend_desched:1;
442 /*
443 * Set when there is a pending SWTAG_DESCHED and
444 * pend_desched is set.
445 */
446 uint64_t pend_desched_switch:1;
447 /* Set when nosched is desired and pend_desched is set. */
448 uint64_t pend_nosched:1;
449 /* Set when there is a pending GET_WORK. */
450 uint64_t pend_new_work:1;
451 /*
452 * When pend_new_work is set, this bit indicates that
453 * the wait bit was set.
454 */
455 uint64_t pend_new_work_wait:1;
456 /* Set when there is a pending NULL_RD. */
457 uint64_t pend_null_rd:1;
458 /* Set when there is a pending CLR_NSCHED. */
459 uint64_t pend_nosched_clr:1;
460 uint64_t reserved_51:1;
461 /* This is the index when pend_nosched_clr is set. */
462 uint64_t pend_index:11;
463 /*
464 * This is the new_grp when (pend_desched AND
465 * pend_desched_switch) is set.
466 */
467 uint64_t pend_grp:4;
468 uint64_t reserved_34_35:2;
469 /*
470 * This is the tag type when pend_switch or
471 * (pend_desched AND pend_desched_switch) are set.
472 */
473 uint64_t pend_type:2;
474 /*
475 * - this is the tag when pend_switch or (pend_desched
476 * AND pend_desched_switch) are set.
477 */
478 uint64_t pend_tag:32;
479 } s_sstatus0;
480
481 /**
482 * Result for a POW Status Load (when get_cur==0 and get_wqp==1)
483 */
484 struct {
485 uint64_t reserved_62_63:2;
486 /*
487 * Set when there is a pending non-NULL SWTAG or
488 * SWTAG_FULL, and the POW entry has not left the list
489 * for the original tag.
490 */
491 uint64_t pend_switch:1;
492 /* Set when SWTAG_FULL and pend_switch is set. */
493 uint64_t pend_switch_full:1;
494 /*
495 * Set when there is a pending NULL SWTAG, or an
496 * implicit switch to NULL.
497 */
498 uint64_t pend_switch_null:1;
499 /*
500 * Set when there is a pending DESCHED or
501 * SWTAG_DESCHED.
502 */
503 uint64_t pend_desched:1;
504 /*
505 * Set when there is a pending SWTAG_DESCHED and
506 * pend_desched is set.
507 */
508 uint64_t pend_desched_switch:1;
509 /* Set when nosched is desired and pend_desched is set. */
510 uint64_t pend_nosched:1;
511 /* Set when there is a pending GET_WORK. */
512 uint64_t pend_new_work:1;
513 /*
514 * When pend_new_work is set, this bit indicates that
515 * the wait bit was set.
516 */
517 uint64_t pend_new_work_wait:1;
518 /* Set when there is a pending NULL_RD. */
519 uint64_t pend_null_rd:1;
520 /* Set when there is a pending CLR_NSCHED. */
521 uint64_t pend_nosched_clr:1;
522 uint64_t reserved_51:1;
523 /* This is the index when pend_nosched_clr is set. */
524 uint64_t pend_index:11;
525 /*
526 * This is the new_grp when (pend_desched AND
527 * pend_desched_switch) is set.
528 */
529 uint64_t pend_grp:4;
530 /* This is the wqp when pend_nosched_clr is set. */
531 uint64_t pend_wqp:36;
532 } s_sstatus1;
533
534 /**
535 * Result for a POW Status Load (when get_cur==1, get_wqp==0, and
536 * get_rev==0)
537 */
538 struct {
539 uint64_t reserved_62_63:2;
540 /*
541 * Points to the next POW entry in the tag list when
542 * tail == 0 (and tag_type is not NULL or NULL_NULL).
543 */
544 uint64_t link_index:11;
545 /* The POW entry attached to the core. */
546 uint64_t index:11;
547 /*
548 * The group attached to the core (updated when new
549 * tag list entered on SWTAG_FULL).
550 */
551 uint64_t grp:4;
552 /*
553 * Set when this POW entry is at the head of its tag
554 * list (also set when in the NULL or NULL_NULL
555 * state).
556 */
557 uint64_t head:1;
558 /*
559 * Set when this POW entry is at the tail of its tag
560 * list (also set when in the NULL or NULL_NULL
561 * state).
562 */
563 uint64_t tail:1;
564 /*
565 * The tag type attached to the core (updated when new
566 * tag list entered on SWTAG, SWTAG_FULL, or
567 * SWTAG_DESCHED).
568 */
569 uint64_t tag_type:2;
570 /*
571 * The tag attached to the core (updated when new tag
572 * list entered on SWTAG, SWTAG_FULL, or
573 * SWTAG_DESCHED).
574 */
575 uint64_t tag:32;
576 } s_sstatus2;
577
578 /**
579 * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1)
580 */
581 struct {
582 uint64_t reserved_62_63:2;
583 /*
584 * Points to the prior POW entry in the tag list when
585 * head == 0 (and tag_type is not NULL or
586 * NULL_NULL). This field is unpredictable when the
587 * core's state is NULL or NULL_NULL.
588 */
589 uint64_t revlink_index:11;
590 /* The POW entry attached to the core. */
591 uint64_t index:11;
592 /*
593 * The group attached to the core (updated when new
594 * tag list entered on SWTAG_FULL).
595 */
596 uint64_t grp:4;
597 /* Set when this POW entry is at the head of its tag
598 * list (also set when in the NULL or NULL_NULL
599 * state).
600 */
601 uint64_t head:1;
602 /*
603 * Set when this POW entry is at the tail of its tag
604 * list (also set when in the NULL or NULL_NULL
605 * state).
606 */
607 uint64_t tail:1;
608 /*
609 * The tag type attached to the core (updated when new
610 * tag list entered on SWTAG, SWTAG_FULL, or
611 * SWTAG_DESCHED).
612 */
613 uint64_t tag_type:2;
614 /*
615 * The tag attached to the core (updated when new tag
616 * list entered on SWTAG, SWTAG_FULL, or
617 * SWTAG_DESCHED).
618 */
619 uint64_t tag:32;
620 } s_sstatus3;
621
622 /**
623 * Result for a POW Status Load (when get_cur==1, get_wqp==1, and
624 * get_rev==0)
625 */
626 struct {
627 uint64_t reserved_62_63:2;
628 /*
629 * Points to the next POW entry in the tag list when
630 * tail == 0 (and tag_type is not NULL or NULL_NULL).
631 */
632 uint64_t link_index:11;
633 /* The POW entry attached to the core. */
634 uint64_t index:11;
635 /*
636 * The group attached to the core (updated when new
637 * tag list entered on SWTAG_FULL).
638 */
639 uint64_t grp:4;
640 /*
641 * The wqp attached to the core (updated when new tag
642 * list entered on SWTAG_FULL).
643 */
644 uint64_t wqp:36;
645 } s_sstatus4;
646
647 /**
648 * Result for a POW Status Load (when get_cur==1, get_wqp==1, and
649 * get_rev==1)
650 */
651 struct {
652 uint64_t reserved_62_63:2;
653 /*
654 * Points to the prior POW entry in the tag list when
655 * head == 0 (and tag_type is not NULL or
656 * NULL_NULL). This field is unpredictable when the
657 * core's state is NULL or NULL_NULL.
658 */
659 uint64_t revlink_index:11;
660 /* The POW entry attached to the core. */
661 uint64_t index:11;
662 /*
663 * The group attached to the core (updated when new
664 * tag list entered on SWTAG_FULL).
665 */
666 uint64_t grp:4;
667 /*
668 * The wqp attached to the core (updated when new tag
669 * list entered on SWTAG_FULL).
670 */
671 uint64_t wqp:36;
672 } s_sstatus5;
673
674 /**
675 * Result For POW Memory Load (get_des == 0 and get_wqp == 0)
676 */
677 struct {
678 uint64_t reserved_51_63:13;
679 /*
680 * The next entry in the input, free, descheduled_head
681 * list (unpredictable if entry is the tail of the
682 * list).
683 */
684 uint64_t next_index:11;
685 /* The group of the POW entry. */
686 uint64_t grp:4;
687 uint64_t reserved_35:1;
688 /*
689 * Set when this POW entry is at the tail of its tag
690 * list (also set when in the NULL or NULL_NULL
691 * state).
692 */
693 uint64_t tail:1;
694 /* The tag type of the POW entry. */
695 uint64_t tag_type:2;
696 /* The tag of the POW entry. */
697 uint64_t tag:32;
698 } s_smemload0;
699
700 /**
701 * Result For POW Memory Load (get_des == 0 and get_wqp == 1)
702 */
703 struct {
704 uint64_t reserved_51_63:13;
705 /*
706 * The next entry in the input, free, descheduled_head
707 * list (unpredictable if entry is the tail of the
708 * list).
709 */
710 uint64_t next_index:11;
711 /* The group of the POW entry. */
712 uint64_t grp:4;
713 /* The WQP held in the POW entry. */
714 uint64_t wqp:36;
715 } s_smemload1;
716
717 /**
718 * Result For POW Memory Load (get_des == 1)
719 */
720 struct {
721 uint64_t reserved_51_63:13;
722 /*
723 * The next entry in the tag list connected to the
724 * descheduled head.
725 */
726 uint64_t fwd_index:11;
727 /* The group of the POW entry. */
728 uint64_t grp:4;
729 /* The nosched bit for the POW entry. */
730 uint64_t nosched:1;
731 /* There is a pending tag switch */
732 uint64_t pend_switch:1;
733 /*
734 * The next tag type for the new tag list when
735 * pend_switch is set.
736 */
737 uint64_t pend_type:2;
738 /*
739 * The next tag for the new tag list when pend_switch
740 * is set.
741 */
742 uint64_t pend_tag:32;
743 } s_smemload2;
744
745 /**
746 * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0)
747 */
748 struct {
749 uint64_t reserved_52_63:12;
750 /*
751 * set when there is one or more POW entries on the
752 * free list.
753 */
754 uint64_t free_val:1;
755 /*
756 * set when there is exactly one POW entry on the free
757 * list.
758 */
759 uint64_t free_one:1;
760 uint64_t reserved_49:1;
761 /*
762 * when free_val is set, indicates the first entry on
763 * the free list.
764 */
765 uint64_t free_head:11;
766 uint64_t reserved_37:1;
767 /*
768 * when free_val is set, indicates the last entry on
769 * the free list.
770 */
771 uint64_t free_tail:11;
772 /*
773 * set when there is one or more POW entries on the
774 * input Q list selected by qosgrp.
775 */
776 uint64_t loc_val:1;
777 /*
778 * set when there is exactly one POW entry on the
779 * input Q list selected by qosgrp.
780 */
781 uint64_t loc_one:1;
782 uint64_t reserved_23:1;
783 /*
784 * when loc_val is set, indicates the first entry on
785 * the input Q list selected by qosgrp.
786 */
787 uint64_t loc_head:11;
788 uint64_t reserved_11:1;
789 /*
790 * when loc_val is set, indicates the last entry on
791 * the input Q list selected by qosgrp.
792 */
793 uint64_t loc_tail:11;
794 } sindexload0;
795
796 /**
797 * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1)
798 */
799 struct {
800 uint64_t reserved_52_63:12;
801 /*
802 * set when there is one or more POW entries on the
803 * nosched list.
804 */
805 uint64_t nosched_val:1;
806 /*
807 * set when there is exactly one POW entry on the
808 * nosched list.
809 */
810 uint64_t nosched_one:1;
811 uint64_t reserved_49:1;
812 /*
813 * when nosched_val is set, indicates the first entry
814 * on the nosched list.
815 */
816 uint64_t nosched_head:11;
817 uint64_t reserved_37:1;
818 /*
819 * when nosched_val is set, indicates the last entry
820 * on the nosched list.
821 */
822 uint64_t nosched_tail:11;
823 /*
824 * set when there is one or more descheduled heads on
825 * the descheduled list selected by qosgrp.
826 */
827 uint64_t des_val:1;
828 /*
829 * set when there is exactly one descheduled head on
830 * the descheduled list selected by qosgrp.
831 */
832 uint64_t des_one:1;
833 uint64_t reserved_23:1;
834 /*
835 * when des_val is set, indicates the first
836 * descheduled head on the descheduled list selected
837 * by qosgrp.
838 */
839 uint64_t des_head:11;
840 uint64_t reserved_11:1;
841 /*
842 * when des_val is set, indicates the last descheduled
843 * head on the descheduled list selected by qosgrp.
844 */
845 uint64_t des_tail:11;
846 } sindexload1;
847
848 /**
849 * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0)
850 */
851 struct {
852 uint64_t reserved_39_63:25;
853 /*
854 * Set when this DRAM list is the current head
855 * (i.e. is the next to be reloaded when the POW
856 * hardware reloads a POW entry from DRAM). The POW
857 * hardware alternates between the two DRAM lists
858 * associated with a QOS level when it reloads work
859 * from DRAM into the POW unit.
860 */
861 uint64_t rmt_is_head:1;
862 /*
863 * Set when the DRAM portion of the input Q list
864 * selected by qosgrp contains one or more pieces of
865 * work.
866 */
867 uint64_t rmt_val:1;
868 /*
869 * Set when the DRAM portion of the input Q list
870 * selected by qosgrp contains exactly one piece of
871 * work.
872 */
873 uint64_t rmt_one:1;
874 /*
875 * When rmt_val is set, indicates the first piece of
876 * work on the DRAM input Q list selected by
877 * qosgrp.
878 */
879 uint64_t rmt_head:36;
880 } sindexload2;
881
882 /**
883 * Result For POW Index/Pointer Load (get_rmt ==
884 * 1/get_des_get_tail == 1)
885 */
886 struct {
887 uint64_t reserved_39_63:25;
888 /*
889 * set when this DRAM list is the current head
890 * (i.e. is the next to be reloaded when the POW
891 * hardware reloads a POW entry from DRAM). The POW
892 * hardware alternates between the two DRAM lists
893 * associated with a QOS level when it reloads work
894 * from DRAM into the POW unit.
895 */
896 uint64_t rmt_is_head:1;
897 /*
898 * set when the DRAM portion of the input Q list
899 * selected by qosgrp contains one or more pieces of
900 * work.
901 */
902 uint64_t rmt_val:1;
903 /*
904 * set when the DRAM portion of the input Q list
905 * selected by qosgrp contains exactly one piece of
906 * work.
907 */
908 uint64_t rmt_one:1;
909 /*
910 * when rmt_val is set, indicates the last piece of
911 * work on the DRAM input Q list selected by
912 * qosgrp.
913 */
914 uint64_t rmt_tail:36;
915 } sindexload3;
916
917 /**
918 * Response to NULL_RD request loads
919 */
920 struct {
921 uint64_t unused:62;
922 /* of type cvmx_pow_tag_type_t. state is one of the
923 * following:
924 *
925 * - CVMX_POW_TAG_TYPE_ORDERED
926 * - CVMX_POW_TAG_TYPE_ATOMIC
927 * - CVMX_POW_TAG_TYPE_NULL
928 * - CVMX_POW_TAG_TYPE_NULL_NULL
929 */
930 uint64_t state:2;
931 } s_null_rd;
932
933} cvmx_pow_tag_load_resp_t;
934
935/**
936 * This structure describes the address used for stores to the POW.
937 * The store address is meaningful on stores to the POW. The
938 * hardware assumes that an aligned 64-bit store was used for all
939 * these stores. Note the assumption that the work queue entry is
940 * aligned on an 8-byte boundary (since the low-order 3 address bits
941 * must be zero). Note that not all fields are used by all
942 * operations.
943 *
944 * NOTE: The following is the behavior of the pending switch bit at the PP
945 * for POW stores (i.e. when did<7:3> == 0xc)
946 * - did<2:0> == 0 => pending switch bit is set
947 * - did<2:0> == 1 => no affect on the pending switch bit
948 * - did<2:0> == 3 => pending switch bit is cleared
949 * - did<2:0> == 7 => no affect on the pending switch bit
950 * - did<2:0> == others => must not be used
951 * - No other loads/stores have an affect on the pending switch bit
952 * - The switch bus from POW can clear the pending switch bit
953 *
954 * NOTE: did<2:0> == 2 is used by the HW for a special single-cycle
955 * ADDWQ command that only contains the pointer). SW must never use
956 * did<2:0> == 2.
957 */
958typedef union {
959 /**
960 * Unsigned 64 bit integer representation of store address
961 */
962 uint64_t u64;
963
964 struct {
965 /* Memory region. Should be CVMX_IO_SEG in most cases */
966 uint64_t mem_reg:2;
967 uint64_t reserved_49_61:13; /* Must be zero */
968 uint64_t is_io:1; /* Must be one */
969 /* Device ID of POW. Note that different sub-dids are used. */
970 uint64_t did:8;
971 uint64_t reserved_36_39:4; /* Must be zero */
972 /* Address field. addr<2:0> must be zero */
973 uint64_t addr:36;
974 } stag;
975} cvmx_pow_tag_store_addr_t;
976
977/**
978 * decode of the store data when an IOBDMA SENDSINGLE is sent to POW
979 */
980typedef union {
981 uint64_t u64;
982
983 struct {
984 /*
985 * the (64-bit word) location in scratchpad to write
986 * to (if len != 0)
987 */
988 uint64_t scraddr:8;
989 /* the number of words in the response (0 => no response) */
990 uint64_t len:8;
991 /* the ID of the device on the non-coherent bus */
992 uint64_t did:8;
993 uint64_t unused:36;
994 /* if set, don't return load response until work is available */
995 uint64_t wait:1;
996 uint64_t unused2:3;
997 } s;
998
999} cvmx_pow_iobdma_store_t;
1000
1001/* CSR typedefs have been moved to cvmx-csr-*.h */
1002
1003/**
1004 * Get the POW tag for this core. This returns the current
1005 * tag type, tag, group, and POW entry index associated with
1006 * this core. Index is only valid if the tag type isn't NULL_NULL.
1007 * If a tag switch is pending this routine returns the tag before
1008 * the tag switch, not after.
1009 *
1010 * Returns Current tag
1011 */
1012static inline cvmx_pow_tag_req_t cvmx_pow_get_current_tag(void)
1013{
1014 cvmx_pow_load_addr_t load_addr;
1015 cvmx_pow_tag_load_resp_t load_resp;
1016 cvmx_pow_tag_req_t result;
1017
1018 load_addr.u64 = 0;
1019 load_addr.sstatus.mem_region = CVMX_IO_SEG;
1020 load_addr.sstatus.is_io = 1;
1021 load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
1022 load_addr.sstatus.coreid = cvmx_get_core_num();
1023 load_addr.sstatus.get_cur = 1;
1024 load_resp.u64 = cvmx_read_csr(load_addr.u64);
1025 result.u64 = 0;
1026 result.s.grp = load_resp.s_sstatus2.grp;
1027 result.s.index = load_resp.s_sstatus2.index;
1028 result.s.type = load_resp.s_sstatus2.tag_type;
1029 result.s.tag = load_resp.s_sstatus2.tag;
1030 return result;
1031}
1032
1033/**
1034 * Get the POW WQE for this core. This returns the work queue
1035 * entry currently associated with this core.
1036 *
1037 * Returns WQE pointer
1038 */
1039static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)
1040{
1041 cvmx_pow_load_addr_t load_addr;
1042 cvmx_pow_tag_load_resp_t load_resp;
1043
1044 load_addr.u64 = 0;
1045 load_addr.sstatus.mem_region = CVMX_IO_SEG;
1046 load_addr.sstatus.is_io = 1;
1047 load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
1048 load_addr.sstatus.coreid = cvmx_get_core_num();
1049 load_addr.sstatus.get_cur = 1;
1050 load_addr.sstatus.get_wqp = 1;
1051 load_resp.u64 = cvmx_read_csr(load_addr.u64);
1052 return (cvmx_wqe_t *) cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp);
1053}
1054
1055#ifndef CVMX_MF_CHORD
1056#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
1057#endif
1058
1059/**
1060 * Print a warning if a tag switch is pending for this core
1061 *
1062 * @function: Function name checking for a pending tag switch
1063 */
1064static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
1065{
1066 uint64_t switch_complete;
1067 CVMX_MF_CHORD(switch_complete);
1068 if (!switch_complete)
1069 pr_warning("%s called with tag switch in progress\n", function);
1070}
1071
1072/**
1073 * Waits for a tag switch to complete by polling the completion bit.
1074 * Note that switches to NULL complete immediately and do not need
1075 * to be waited for.
1076 */
1077static inline void cvmx_pow_tag_sw_wait(void)
1078{
1079 const uint64_t MAX_CYCLES = 1ull << 31;
1080 uint64_t switch_complete;
1081 uint64_t start_cycle = cvmx_get_cycle();
1082 while (1) {
1083 CVMX_MF_CHORD(switch_complete);
1084 if (unlikely(switch_complete))
1085 break;
1086 if (unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) {
1087 pr_warning("Tag switch is taking a long time, "
1088 "possible deadlock\n");
1089 start_cycle = -MAX_CYCLES - 1;
1090 }
1091 }
1092}
1093
1094/**
1095 * Synchronous work request. Requests work from the POW.
1096 * This function does NOT wait for previous tag switches to complete,
1097 * so the caller must ensure that there is not a pending tag switch.
1098 *
1099 * @wait: When set, call stalls until work becomes avaiable, or times out.
1100 * If not set, returns immediately.
1101 *
1102 * Returns Returns the WQE pointer from POW. Returns NULL if no work
1103 * was available.
1104 */
1105static inline cvmx_wqe_t *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t
1106 wait)
1107{
1108 cvmx_pow_load_addr_t ptr;
1109 cvmx_pow_tag_load_resp_t result;
1110
1111 if (CVMX_ENABLE_POW_CHECKS)
1112 __cvmx_pow_warn_if_pending_switch(__func__);
1113
1114 ptr.u64 = 0;
1115 ptr.swork.mem_region = CVMX_IO_SEG;
1116 ptr.swork.is_io = 1;
1117 ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG;
1118 ptr.swork.wait = wait;
1119
1120 result.u64 = cvmx_read_csr(ptr.u64);
1121
1122 if (result.s_work.no_work)
1123 return NULL;
1124 else
1125 return (cvmx_wqe_t *) cvmx_phys_to_ptr(result.s_work.addr);
1126}
1127
1128/**
1129 * Synchronous work request. Requests work from the POW.
1130 * This function waits for any previous tag switch to complete before
1131 * requesting the new work.
1132 *
1133 * @wait: When set, call stalls until work becomes avaiable, or times out.
1134 * If not set, returns immediately.
1135 *
1136 * Returns Returns the WQE pointer from POW. Returns NULL if no work
1137 * was available.
1138 */
1139static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
1140{
1141 if (CVMX_ENABLE_POW_CHECKS)
1142 __cvmx_pow_warn_if_pending_switch(__func__);
1143
1144 /* Must not have a switch pending when requesting work */
1145 cvmx_pow_tag_sw_wait();
1146 return cvmx_pow_work_request_sync_nocheck(wait);
1147
1148}
1149
1150/**
1151 * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
1152 * This function waits for any previous tag switch to complete before
1153 * requesting the null_rd.
1154 *
1155 * Returns Returns the POW state of type cvmx_pow_tag_type_t.
1156 */
1157static inline enum cvmx_pow_tag_type cvmx_pow_work_request_null_rd(void)
1158{
1159 cvmx_pow_load_addr_t ptr;
1160 cvmx_pow_tag_load_resp_t result;
1161
1162 if (CVMX_ENABLE_POW_CHECKS)
1163 __cvmx_pow_warn_if_pending_switch(__func__);
1164
1165 /* Must not have a switch pending when requesting work */
1166 cvmx_pow_tag_sw_wait();
1167
1168 ptr.u64 = 0;
1169 ptr.snull_rd.mem_region = CVMX_IO_SEG;
1170 ptr.snull_rd.is_io = 1;
1171 ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD;
1172
1173 result.u64 = cvmx_read_csr(ptr.u64);
1174
1175 return (enum cvmx_pow_tag_type) result.s_null_rd.state;
1176}
1177
1178/**
1179 * Asynchronous work request. Work is requested from the POW unit,
1180 * and should later be checked with function
1181 * cvmx_pow_work_response_async. This function does NOT wait for
1182 * previous tag switches to complete, so the caller must ensure that
1183 * there is not a pending tag switch.
1184 *
1185 * @scr_addr: Scratch memory address that response will be returned
1186 * to, which is either a valid WQE, or a response with the
1187 * invalid bit set. Byte address, must be 8 byte aligned.
1188 *
1189 * @wait: 1 to cause response to wait for work to become available (or
1190 * timeout), 0 to cause response to return immediately
1191 */
1192static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
1193 cvmx_pow_wait_t wait)
1194{
1195 cvmx_pow_iobdma_store_t data;
1196
1197 if (CVMX_ENABLE_POW_CHECKS)
1198 __cvmx_pow_warn_if_pending_switch(__func__);
1199
1200 /* scr_addr must be 8 byte aligned */
1201 data.s.scraddr = scr_addr >> 3;
1202 data.s.len = 1;
1203 data.s.did = CVMX_OCT_DID_TAG_SWTAG;
1204 data.s.wait = wait;
1205 cvmx_send_single(data.u64);
1206}
1207
1208/**
1209 * Asynchronous work request. Work is requested from the POW unit,
1210 * and should later be checked with function
1211 * cvmx_pow_work_response_async. This function waits for any previous
1212 * tag switch to complete before requesting the new work.
1213 *
1214 * @scr_addr: Scratch memory address that response will be returned
1215 * to, which is either a valid WQE, or a response with the
1216 * invalid bit set. Byte address, must be 8 byte aligned.
1217 *
1218 * @wait: 1 to cause response to wait for work to become available (or
1219 * timeout), 0 to cause response to return immediately
1220 */
1221static inline void cvmx_pow_work_request_async(int scr_addr,
1222 cvmx_pow_wait_t wait)
1223{
1224 if (CVMX_ENABLE_POW_CHECKS)
1225 __cvmx_pow_warn_if_pending_switch(__func__);
1226
1227 /* Must not have a switch pending when requesting work */
1228 cvmx_pow_tag_sw_wait();
1229 cvmx_pow_work_request_async_nocheck(scr_addr, wait);
1230}
1231
1232/**
1233 * Gets result of asynchronous work request. Performs a IOBDMA sync
1234 * to wait for the response.
1235 *
1236 * @scr_addr: Scratch memory address to get result from Byte address,
1237 * must be 8 byte aligned.
1238 *
1239 * Returns Returns the WQE from the scratch register, or NULL if no
1240 * work was available.
1241 */
1242static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr)
1243{
1244 cvmx_pow_tag_load_resp_t result;
1245
1246 CVMX_SYNCIOBDMA;
1247 result.u64 = cvmx_scratch_read64(scr_addr);
1248
1249 if (result.s_work.no_work)
1250 return NULL;
1251 else
1252 return (cvmx_wqe_t *) cvmx_phys_to_ptr(result.s_work.addr);
1253}
1254
1255/**
1256 * Checks if a work queue entry pointer returned by a work
1257 * request is valid. It may be invalid due to no work
1258 * being available or due to a timeout.
1259 *
1260 * @wqe_ptr: pointer to a work queue entry returned by the POW
1261 *
1262 * Returns 0 if pointer is valid
1263 * 1 if invalid (no work was returned)
1264 */
1265static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
1266{
1267 return wqe_ptr == NULL;
1268}
1269
1270/**
1271 * Starts a tag switch to the provided tag value and tag type.
1272 * Completion for the tag switch must be checked for separately. This
1273 * function does NOT update the work queue entry in dram to match tag
1274 * value and type, so the application must keep track of these if they
1275 * are important to the application. This tag switch command must not
1276 * be used for switches to NULL, as the tag switch pending bit will be
1277 * set by the switch request, but never cleared by the hardware.
1278 *
1279 * NOTE: This should not be used when switching from a NULL tag. Use
1280 * cvmx_pow_tag_sw_full() instead.
1281 *
1282 * This function does no checks, so the caller must ensure that any
1283 * previous tag switch has completed.
1284 *
1285 * @tag: new tag value
1286 * @tag_type: new tag type (ordered or atomic)
1287 */
1288static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag,
1289 enum cvmx_pow_tag_type tag_type)
1290{
1291 cvmx_addr_t ptr;
1292 cvmx_pow_tag_req_t tag_req;
1293
1294 if (CVMX_ENABLE_POW_CHECKS) {
1295 cvmx_pow_tag_req_t current_tag;
1296 __cvmx_pow_warn_if_pending_switch(__func__);
1297 current_tag = cvmx_pow_get_current_tag();
1298 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1299 pr_warning("%s called with NULL_NULL tag\n",
1300 __func__);
1301 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
1302 pr_warning("%s called with NULL tag\n", __func__);
1303 if ((current_tag.s.type == tag_type)
1304 && (current_tag.s.tag == tag))
1305 pr_warning("%s called to perform a tag switch to the "
1306 "same tag\n",
1307 __func__);
1308 if (tag_type == CVMX_POW_TAG_TYPE_NULL)
1309 pr_warning("%s called to perform a tag switch to "
1310 "NULL. Use cvmx_pow_tag_sw_null() instead\n",
1311 __func__);
1312 }
1313
1314 /*
1315 * Note that WQE in DRAM is not updated here, as the POW does
1316 * not read from DRAM once the WQE is in flight. See hardware
1317 * manual for complete details. It is the application's
1318 * responsibility to keep track of the current tag value if
1319 * that is important.
1320 */
1321
1322 tag_req.u64 = 0;
1323 tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
1324 tag_req.s.tag = tag;
1325 tag_req.s.type = tag_type;
1326
1327 ptr.u64 = 0;
1328 ptr.sio.mem_region = CVMX_IO_SEG;
1329 ptr.sio.is_io = 1;
1330 ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
1331
1332 /* once this store arrives at POW, it will attempt the switch
1333 software must wait for the switch to complete separately */
1334 cvmx_write_io(ptr.u64, tag_req.u64);
1335}
1336
1337/**
1338 * Starts a tag switch to the provided tag value and tag type.
1339 * Completion for the tag switch must be checked for separately. This
1340 * function does NOT update the work queue entry in dram to match tag
1341 * value and type, so the application must keep track of these if they
1342 * are important to the application. This tag switch command must not
1343 * be used for switches to NULL, as the tag switch pending bit will be
1344 * set by the switch request, but never cleared by the hardware.
1345 *
1346 * NOTE: This should not be used when switching from a NULL tag. Use
1347 * cvmx_pow_tag_sw_full() instead.
1348 *
1349 * This function waits for any previous tag switch to complete, and also
1350 * displays an error on tag switches to NULL.
1351 *
1352 * @tag: new tag value
1353 * @tag_type: new tag type (ordered or atomic)
1354 */
1355static inline void cvmx_pow_tag_sw(uint32_t tag,
1356 enum cvmx_pow_tag_type tag_type)
1357{
1358 if (CVMX_ENABLE_POW_CHECKS)
1359 __cvmx_pow_warn_if_pending_switch(__func__);
1360
1361 /*
1362 * Note that WQE in DRAM is not updated here, as the POW does
1363 * not read from DRAM once the WQE is in flight. See hardware
1364 * manual for complete details. It is the application's
1365 * responsibility to keep track of the current tag value if
1366 * that is important.
1367 */
1368
1369 /*
1370 * Ensure that there is not a pending tag switch, as a tag
1371 * switch cannot be started if a previous switch is still
1372 * pending.
1373 */
1374 cvmx_pow_tag_sw_wait();
1375 cvmx_pow_tag_sw_nocheck(tag, tag_type);
1376}
1377
1378/**
1379 * Starts a tag switch to the provided tag value and tag type.
1380 * Completion for the tag switch must be checked for separately. This
1381 * function does NOT update the work queue entry in dram to match tag
1382 * value and type, so the application must keep track of these if they
1383 * are important to the application. This tag switch command must not
1384 * be used for switches to NULL, as the tag switch pending bit will be
1385 * set by the switch request, but never cleared by the hardware.
1386 *
1387 * This function must be used for tag switches from NULL.
1388 *
1389 * This function does no checks, so the caller must ensure that any
1390 * previous tag switch has completed.
1391 *
1392 * @wqp: pointer to work queue entry to submit. This entry is
1393 * updated to match the other parameters
1394 * @tag: tag value to be assigned to work queue entry
1395 * @tag_type: type of tag
1396 * @group: group value for the work queue entry.
1397 */
1398static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag,
1399 enum cvmx_pow_tag_type tag_type,
1400 uint64_t group)
1401{
1402 cvmx_addr_t ptr;
1403 cvmx_pow_tag_req_t tag_req;
1404
1405 if (CVMX_ENABLE_POW_CHECKS) {
1406 cvmx_pow_tag_req_t current_tag;
1407 __cvmx_pow_warn_if_pending_switch(__func__);
1408 current_tag = cvmx_pow_get_current_tag();
1409 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1410 pr_warning("%s called with NULL_NULL tag\n",
1411 __func__);
1412 if ((current_tag.s.type == tag_type)
1413 && (current_tag.s.tag == tag))
1414 pr_warning("%s called to perform a tag switch to "
1415 "the same tag\n",
1416 __func__);
1417 if (tag_type == CVMX_POW_TAG_TYPE_NULL)
1418 pr_warning("%s called to perform a tag switch to "
1419 "NULL. Use cvmx_pow_tag_sw_null() instead\n",
1420 __func__);
1421 if (wqp != cvmx_phys_to_ptr(0x80))
1422 if (wqp != cvmx_pow_get_current_wqp())
1423 pr_warning("%s passed WQE(%p) doesn't match "
1424 "the address in the POW(%p)\n",
1425 __func__, wqp,
1426 cvmx_pow_get_current_wqp());
1427 }
1428
1429 /*
1430 * Note that WQE in DRAM is not updated here, as the POW does
1431 * not read from DRAM once the WQE is in flight. See hardware
1432 * manual for complete details. It is the application's
1433 * responsibility to keep track of the current tag value if
1434 * that is important.
1435 */
1436
1437 tag_req.u64 = 0;
1438 tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_FULL;
1439 tag_req.s.tag = tag;
1440 tag_req.s.type = tag_type;
1441 tag_req.s.grp = group;
1442
1443 ptr.u64 = 0;
1444 ptr.sio.mem_region = CVMX_IO_SEG;
1445 ptr.sio.is_io = 1;
1446 ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
1447 ptr.sio.offset = CAST64(wqp);
1448
1449 /*
1450 * once this store arrives at POW, it will attempt the switch
1451 * software must wait for the switch to complete separately.
1452 */
1453 cvmx_write_io(ptr.u64, tag_req.u64);
1454}
1455
1456/**
1457 * Starts a tag switch to the provided tag value and tag type.
1458 * Completion for the tag switch must be checked for separately. This
1459 * function does NOT update the work queue entry in dram to match tag
1460 * value and type, so the application must keep track of these if they
1461 * are important to the application. This tag switch command must not
1462 * be used for switches to NULL, as the tag switch pending bit will be
1463 * set by the switch request, but never cleared by the hardware.
1464 *
1465 * This function must be used for tag switches from NULL.
1466 *
1467 * This function waits for any pending tag switches to complete
1468 * before requesting the tag switch.
1469 *
1470 * @wqp: pointer to work queue entry to submit. This entry is updated
1471 * to match the other parameters
1472 * @tag: tag value to be assigned to work queue entry
1473 * @tag_type: type of tag
1474 * @group: group value for the work queue entry.
1475 */
1476static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag,
1477 enum cvmx_pow_tag_type tag_type,
1478 uint64_t group)
1479{
1480 if (CVMX_ENABLE_POW_CHECKS)
1481 __cvmx_pow_warn_if_pending_switch(__func__);
1482
1483 /*
1484 * Ensure that there is not a pending tag switch, as a tag
1485 * switch cannot be started if a previous switch is still
1486 * pending.
1487 */
1488 cvmx_pow_tag_sw_wait();
1489 cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);
1490}
1491
1492/**
1493 * Switch to a NULL tag, which ends any ordering or
1494 * synchronization provided by the POW for the current
1495 * work queue entry. This operation completes immediatly,
1496 * so completetion should not be waited for.
1497 * This function does NOT wait for previous tag switches to complete,
1498 * so the caller must ensure that any previous tag switches have completed.
1499 */
1500static inline void cvmx_pow_tag_sw_null_nocheck(void)
1501{
1502 cvmx_addr_t ptr;
1503 cvmx_pow_tag_req_t tag_req;
1504
1505 if (CVMX_ENABLE_POW_CHECKS) {
1506 cvmx_pow_tag_req_t current_tag;
1507 __cvmx_pow_warn_if_pending_switch(__func__);
1508 current_tag = cvmx_pow_get_current_tag();
1509 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1510 pr_warning("%s called with NULL_NULL tag\n",
1511 __func__);
1512 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
1513 pr_warning("%s called when we already have a "
1514 "NULL tag\n",
1515 __func__);
1516 }
1517
1518 tag_req.u64 = 0;
1519 tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
1520 tag_req.s.type = CVMX_POW_TAG_TYPE_NULL;
1521
1522 ptr.u64 = 0;
1523 ptr.sio.mem_region = CVMX_IO_SEG;
1524 ptr.sio.is_io = 1;
1525 ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
1526
1527 cvmx_write_io(ptr.u64, tag_req.u64);
1528
1529 /* switch to NULL completes immediately */
1530}
1531
1532/**
1533 * Switch to a NULL tag, which ends any ordering or
1534 * synchronization provided by the POW for the current
1535 * work queue entry. This operation completes immediatly,
1536 * so completetion should not be waited for.
1537 * This function waits for any pending tag switches to complete
1538 * before requesting the switch to NULL.
1539 */
1540static inline void cvmx_pow_tag_sw_null(void)
1541{
1542 if (CVMX_ENABLE_POW_CHECKS)
1543 __cvmx_pow_warn_if_pending_switch(__func__);
1544
1545 /*
1546 * Ensure that there is not a pending tag switch, as a tag
1547 * switch cannot be started if a previous switch is still
1548 * pending.
1549 */
1550 cvmx_pow_tag_sw_wait();
1551 cvmx_pow_tag_sw_null_nocheck();
1552
1553 /* switch to NULL completes immediately */
1554}
1555
1556/**
1557 * Submits work to an input queue. This function updates the work
1558 * queue entry in DRAM to match the arguments given. Note that the
1559 * tag provided is for the work queue entry submitted, and is
1560 * unrelated to the tag that the core currently holds.
1561 *
1562 * @wqp: pointer to work queue entry to submit. This entry is
1563 * updated to match the other parameters
1564 * @tag: tag value to be assigned to work queue entry
1565 * @tag_type: type of tag
1566 * @qos: Input queue to add to.
1567 * @grp: group value for the work queue entry.
1568 */
1569static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
1570 enum cvmx_pow_tag_type tag_type,
1571 uint64_t qos, uint64_t grp)
1572{
1573 cvmx_addr_t ptr;
1574 cvmx_pow_tag_req_t tag_req;
1575
1576 wqp->qos = qos;
1577 wqp->tag = tag;
1578 wqp->tag_type = tag_type;
1579 wqp->grp = grp;
1580
1581 tag_req.u64 = 0;
1582 tag_req.s.op = CVMX_POW_TAG_OP_ADDWQ;
1583 tag_req.s.type = tag_type;
1584 tag_req.s.tag = tag;
1585 tag_req.s.qos = qos;
1586 tag_req.s.grp = grp;
1587
1588 ptr.u64 = 0;
1589 ptr.sio.mem_region = CVMX_IO_SEG;
1590 ptr.sio.is_io = 1;
1591 ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
1592 ptr.sio.offset = cvmx_ptr_to_phys(wqp);
1593
1594 /*
1595 * SYNC write to memory before the work submit. This is
1596 * necessary as POW may read values from DRAM at this time.
1597 */
1598 CVMX_SYNCWS;
1599 cvmx_write_io(ptr.u64, tag_req.u64);
1600}
1601
1602/**
1603 * This function sets the group mask for a core. The group mask
1604 * indicates which groups each core will accept work from. There are
1605 * 16 groups.
1606 *
1607 * @core_num: core to apply mask to
1608 * @mask: Group mask. There are 16 groups, so only bits 0-15 are valid,
1609 * representing groups 0-15.
1610 * Each 1 bit in the mask enables the core to accept work from
1611 * the corresponding group.
1612 */
1613static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
1614{
1615 union cvmx_pow_pp_grp_mskx grp_msk;
1616
1617 grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
1618 grp_msk.s.grp_msk = mask;
1619 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
1620}
1621
1622/**
1623 * This function sets POW static priorities for a core. Each input queue has
1624 * an associated priority value.
1625 *
1626 * @core_num: core to apply priorities to
1627 * @priority: Vector of 8 priorities, one per POW Input Queue (0-7).
1628 * Highest priority is 0 and lowest is 7. A priority value
1629 * of 0xF instructs POW to skip the Input Queue when
1630 * scheduling to this specific core.
1631 * NOTE: priorities should not have gaps in values, meaning
1632 * {0,1,1,1,1,1,1,1} is a valid configuration while
1633 * {0,2,2,2,2,2,2,2} is not.
1634 */
1635static inline void cvmx_pow_set_priority(uint64_t core_num,
1636 const uint8_t priority[])
1637{
1638 /* POW priorities are supported on CN5xxx and later */
1639 if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
1640 union cvmx_pow_pp_grp_mskx grp_msk;
1641
1642 grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
1643 grp_msk.s.qos0_pri = priority[0];
1644 grp_msk.s.qos1_pri = priority[1];
1645 grp_msk.s.qos2_pri = priority[2];
1646 grp_msk.s.qos3_pri = priority[3];
1647 grp_msk.s.qos4_pri = priority[4];
1648 grp_msk.s.qos5_pri = priority[5];
1649 grp_msk.s.qos6_pri = priority[6];
1650 grp_msk.s.qos7_pri = priority[7];
1651
1652 /* Detect gaps between priorities and flag error */
1653 {
1654 int i;
1655 uint32_t prio_mask = 0;
1656
1657 for (i = 0; i < 8; i++)
1658 if (priority[i] != 0xF)
1659 prio_mask |= 1 << priority[i];
1660
1661 if (prio_mask ^ ((1 << cvmx_pop(prio_mask)) - 1)) {
1662 pr_err("POW static priorities should be "
1663 "contiguous (0x%llx)\n",
1664 (unsigned long long)prio_mask);
1665 return;
1666 }
1667 }
1668
1669 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
1670 }
1671}
1672
1673/**
1674 * Performs a tag switch and then an immediate deschedule. This completes
1675 * immediatly, so completion must not be waited for. This function does NOT
1676 * update the wqe in DRAM to match arguments.
1677 *
1678 * This function does NOT wait for any prior tag switches to complete, so the
1679 * calling code must do this.
1680 *
1681 * Note the following CAVEAT of the Octeon HW behavior when
1682 * re-scheduling DE-SCHEDULEd items whose (next) state is
1683 * ORDERED:
1684 * - If there are no switches pending at the time that the
1685 * HW executes the de-schedule, the HW will only re-schedule
1686 * the head of the FIFO associated with the given tag. This
1687 * means that in many respects, the HW treats this ORDERED
1688 * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
1689 * case (to an ORDERED tag), the HW will do the switch
1690 * before the deschedule whenever it is possible to do
1691 * the switch immediately, so it may often look like
1692 * this case.
1693 * - If there is a pending switch to ORDERED at the time
1694 * the HW executes the de-schedule, the HW will perform
1695 * the switch at the time it re-schedules, and will be
1696 * able to reschedule any/all of the entries with the
1697 * same tag.
1698 * Due to this behavior, the RECOMMENDATION to software is
1699 * that they have a (next) state of ATOMIC when they
1700 * DE-SCHEDULE. If an ORDERED tag is what was really desired,
1701 * SW can choose to immediately switch to an ORDERED tag
1702 * after the work (that has an ATOMIC tag) is re-scheduled.
1703 * Note that since there are never any tag switches pending
1704 * when the HW re-schedules, this switch can be IMMEDIATE upon
1705 * the reception of the pointer during the re-schedule.
1706 *
1707 * @tag: New tag value
1708 * @tag_type: New tag type
1709 * @group: New group value
1710 * @no_sched: Control whether this work queue entry will be rescheduled.
1711 * - 1 : don't schedule this work
1712 * - 0 : allow this work to be scheduled.
1713 */
1714static inline void cvmx_pow_tag_sw_desched_nocheck(
1715 uint32_t tag,
1716 enum cvmx_pow_tag_type tag_type,
1717 uint64_t group,
1718 uint64_t no_sched)
1719{
1720 cvmx_addr_t ptr;
1721 cvmx_pow_tag_req_t tag_req;
1722
1723 if (CVMX_ENABLE_POW_CHECKS) {
1724 cvmx_pow_tag_req_t current_tag;
1725 __cvmx_pow_warn_if_pending_switch(__func__);
1726 current_tag = cvmx_pow_get_current_tag();
1727 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1728 pr_warning("%s called with NULL_NULL tag\n",
1729 __func__);
1730 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
1731 pr_warning("%s called with NULL tag. Deschedule not "
1732 "allowed from NULL state\n",
1733 __func__);
1734 if ((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC)
1735 && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC))
1736 pr_warning("%s called where neither the before or "
1737 "after tag is ATOMIC\n",
1738 __func__);
1739 }
1740
1741 tag_req.u64 = 0;
1742 tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
1743 tag_req.s.tag = tag;
1744 tag_req.s.type = tag_type;
1745 tag_req.s.grp = group;
1746 tag_req.s.no_sched = no_sched;
1747
1748 ptr.u64 = 0;
1749 ptr.sio.mem_region = CVMX_IO_SEG;
1750 ptr.sio.is_io = 1;
1751 ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
1752 /*
1753 * since TAG3 is used, this store will clear the local pending
1754 * switch bit.
1755 */
1756 cvmx_write_io(ptr.u64, tag_req.u64);
1757}
1758
1759/**
1760 * Performs a tag switch and then an immediate deschedule. This completes
1761 * immediatly, so completion must not be waited for. This function does NOT
1762 * update the wqe in DRAM to match arguments.
1763 *
1764 * This function waits for any prior tag switches to complete, so the
1765 * calling code may call this function with a pending tag switch.
1766 *
1767 * Note the following CAVEAT of the Octeon HW behavior when
1768 * re-scheduling DE-SCHEDULEd items whose (next) state is
1769 * ORDERED:
1770 * - If there are no switches pending at the time that the
1771 * HW executes the de-schedule, the HW will only re-schedule
1772 * the head of the FIFO associated with the given tag. This
1773 * means that in many respects, the HW treats this ORDERED
1774 * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
1775 * case (to an ORDERED tag), the HW will do the switch
1776 * before the deschedule whenever it is possible to do
1777 * the switch immediately, so it may often look like
1778 * this case.
1779 * - If there is a pending switch to ORDERED at the time
1780 * the HW executes the de-schedule, the HW will perform
1781 * the switch at the time it re-schedules, and will be
1782 * able to reschedule any/all of the entries with the
1783 * same tag.
1784 * Due to this behavior, the RECOMMENDATION to software is
1785 * that they have a (next) state of ATOMIC when they
1786 * DE-SCHEDULE. If an ORDERED tag is what was really desired,
1787 * SW can choose to immediately switch to an ORDERED tag
1788 * after the work (that has an ATOMIC tag) is re-scheduled.
1789 * Note that since there are never any tag switches pending
1790 * when the HW re-schedules, this switch can be IMMEDIATE upon
1791 * the reception of the pointer during the re-schedule.
1792 *
1793 * @tag: New tag value
1794 * @tag_type: New tag type
1795 * @group: New group value
1796 * @no_sched: Control whether this work queue entry will be rescheduled.
1797 * - 1 : don't schedule this work
1798 * - 0 : allow this work to be scheduled.
1799 */
1800static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
1801 enum cvmx_pow_tag_type tag_type,
1802 uint64_t group, uint64_t no_sched)
1803{
1804 if (CVMX_ENABLE_POW_CHECKS)
1805 __cvmx_pow_warn_if_pending_switch(__func__);
1806
1807 /* Need to make sure any writes to the work queue entry are complete */
1808 CVMX_SYNCWS;
1809 /*
1810 * Ensure that there is not a pending tag switch, as a tag
1811 * switch cannot be started if a previous switch is still
1812 * pending.
1813 */
1814 cvmx_pow_tag_sw_wait();
1815 cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);
1816}
1817
1818/**
1819 * Descchedules the current work queue entry.
1820 *
1821 * @no_sched: no schedule flag value to be set on the work queue
1822 * entry. If this is set the entry will not be
1823 * rescheduled.
1824 */
1825static inline void cvmx_pow_desched(uint64_t no_sched)
1826{
1827 cvmx_addr_t ptr;
1828 cvmx_pow_tag_req_t tag_req;
1829
1830 if (CVMX_ENABLE_POW_CHECKS) {
1831 cvmx_pow_tag_req_t current_tag;
1832 __cvmx_pow_warn_if_pending_switch(__func__);
1833 current_tag = cvmx_pow_get_current_tag();
1834 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1835 pr_warning("%s called with NULL_NULL tag\n",
1836 __func__);
1837 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
1838 pr_warning("%s called with NULL tag. Deschedule not "
1839 "expected from NULL state\n",
1840 __func__);
1841 }
1842
1843 /* Need to make sure any writes to the work queue entry are complete */
1844 CVMX_SYNCWS;
1845
1846 tag_req.u64 = 0;
1847 tag_req.s.op = CVMX_POW_TAG_OP_DESCH;
1848 tag_req.s.no_sched = no_sched;
1849
1850 ptr.u64 = 0;
1851 ptr.sio.mem_region = CVMX_IO_SEG;
1852 ptr.sio.is_io = 1;
1853 ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
1854 /*
1855 * since TAG3 is used, this store will clear the local pending
1856 * switch bit.
1857 */
1858 cvmx_write_io(ptr.u64, tag_req.u64);
1859}
1860
1861/****************************************************
1862* Define usage of bits within the 32 bit tag values.
1863*****************************************************/
1864
1865/*
1866 * Number of bits of the tag used by software. The SW bits are always
1867 * a contiguous block of the high starting at bit 31. The hardware
1868 * bits are always the low bits. By default, the top 8 bits of the
1869 * tag are reserved for software, and the low 24 are set by the IPD
1870 * unit.
1871 */
1872#define CVMX_TAG_SW_BITS (8)
1873#define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS)
1874
1875/* Below is the list of values for the top 8 bits of the tag. */
1876/*
1877 * Tag values with top byte of this value are reserved for internal
1878 * executive uses.
1879 */
1880#define CVMX_TAG_SW_BITS_INTERNAL 0x1
1881/* The executive divides the remaining 24 bits as follows:
1882 * - the upper 8 bits (bits 23 - 16 of the tag) define a subgroup
1883 *
1884 * - the lower 16 bits (bits 15 - 0 of the tag) define are the value
1885 * with the subgroup
1886 *
1887 * Note that this section describes the format of tags generated by
1888 * software - refer to the hardware documentation for a description of
1889 * the tags values generated by the packet input hardware. Subgroups
1890 * are defined here.
1891 */
1892/* Mask for the value portion of the tag */
1893#define CVMX_TAG_SUBGROUP_MASK 0xFFFF
1894#define CVMX_TAG_SUBGROUP_SHIFT 16
1895#define CVMX_TAG_SUBGROUP_PKO 0x1
1896
1897/* End of executive tag subgroup definitions */
1898
1899/*
1900 * The remaining values software bit values 0x2 - 0xff are available
1901 * for application use.
1902 */
1903
1904/**
1905 * This function creates a 32 bit tag value from the two values provided.
1906 *
1907 * @sw_bits: The upper bits (number depends on configuration) are set
1908 * to this value. The remainder of bits are set by the
1909 * hw_bits parameter.
1910 *
1911 * @hw_bits: The lower bits (number depends on configuration) are set
1912 * to this value. The remainder of bits are set by the
1913 * sw_bits parameter.
1914 *
1915 * Returns 32 bit value of the combined hw and sw bits.
1916 */
1917static inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits)
1918{
1919 return ((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) <<
1920 CVMX_TAG_SW_SHIFT) |
1921 (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS));
1922}
1923
1924/**
1925 * Extracts the bits allocated for software use from the tag
1926 *
1927 * @tag: 32 bit tag value
1928 *
1929 * Returns N bit software tag value, where N is configurable with the
1930 * CVMX_TAG_SW_BITS define
1931 */
1932static inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag)
1933{
1934 return (tag >> (32 - CVMX_TAG_SW_BITS)) &
1935 cvmx_build_mask(CVMX_TAG_SW_BITS);
1936}
1937
1938/**
1939 *
1940 * Extracts the bits allocated for hardware use from the tag
1941 *
1942 * @tag: 32 bit tag value
1943 *
1944 * Returns (32 - N) bit software tag value, where N is configurable
1945 * with the CVMX_TAG_SW_BITS define
1946 */
1947static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
1948{
1949 return tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS);
1950}
1951
1952/**
1953 * Store the current POW internal state into the supplied
1954 * buffer. It is recommended that you pass a buffer of at least
1955 * 128KB. The format of the capture may change based on SDK
1956 * version and Octeon chip.
1957 *
1958 * @buffer: Buffer to store capture into
1959 * @buffer_size:
1960 * The size of the supplied buffer
1961 *
1962 * Returns Zero on sucess, negative on failure
1963 */
1964extern int cvmx_pow_capture(void *buffer, int buffer_size);
1965
1966/**
1967 * Dump a POW capture to the console in a human readable format.
1968 *
1969 * @buffer: POW capture from cvmx_pow_capture()
1970 * @buffer_size:
1971 * Size of the buffer
1972 */
1973extern void cvmx_pow_display(void *buffer, int buffer_size);
1974
1975/**
1976 * Return the number of POW entries supported by this chip
1977 *
1978 * Returns Number of POW entries
1979 */
1980extern int cvmx_pow_get_num_entries(void);
1981
1982#endif /* __CVMX_POW_H__ */
diff --git a/drivers/staging/octeon/cvmx-scratch.h b/drivers/staging/octeon/cvmx-scratch.h
new file mode 100644
index 000000000000..96b70cfd6245
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-scratch.h
@@ -0,0 +1,139 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 *
30 * This file provides support for the processor local scratch memory.
31 * Scratch memory is byte addressable - all addresses are byte addresses.
32 *
33 */
34
35#ifndef __CVMX_SCRATCH_H__
36#define __CVMX_SCRATCH_H__
37
38/*
39 * Note: This define must be a long, not a long long in order to
40 * compile without warnings for both 32bit and 64bit.
41 */
42#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
43
44/**
45 * Reads an 8 bit value from the processor local scratchpad memory.
46 *
47 * @address: byte address to read from
48 *
49 * Returns value read
50 */
51static inline uint8_t cvmx_scratch_read8(uint64_t address)
52{
53 return *CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address);
54}
55
56/**
57 * Reads a 16 bit value from the processor local scratchpad memory.
58 *
59 * @address: byte address to read from
60 *
61 * Returns value read
62 */
63static inline uint16_t cvmx_scratch_read16(uint64_t address)
64{
65 return *CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address);
66}
67
68/**
69 * Reads a 32 bit value from the processor local scratchpad memory.
70 *
71 * @address: byte address to read from
72 *
73 * Returns value read
74 */
75static inline uint32_t cvmx_scratch_read32(uint64_t address)
76{
77 return *CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address);
78}
79
80/**
81 * Reads a 64 bit value from the processor local scratchpad memory.
82 *
83 * @address: byte address to read from
84 *
85 * Returns value read
86 */
87static inline uint64_t cvmx_scratch_read64(uint64_t address)
88{
89 return *CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address);
90}
91
92/**
93 * Writes an 8 bit value to the processor local scratchpad memory.
94 *
95 * @address: byte address to write to
96 * @value: value to write
97 */
98static inline void cvmx_scratch_write8(uint64_t address, uint64_t value)
99{
100 *CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address) =
101 (uint8_t) value;
102}
103
104/**
105 * Writes a 32 bit value to the processor local scratchpad memory.
106 *
107 * @address: byte address to write to
108 * @value: value to write
109 */
110static inline void cvmx_scratch_write16(uint64_t address, uint64_t value)
111{
112 *CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address) =
113 (uint16_t) value;
114}
115
116/**
117 * Writes a 16 bit value to the processor local scratchpad memory.
118 *
119 * @address: byte address to write to
120 * @value: value to write
121 */
122static inline void cvmx_scratch_write32(uint64_t address, uint64_t value)
123{
124 *CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address) =
125 (uint32_t) value;
126}
127
128/**
129 * Writes a 64 bit value to the processor local scratchpad memory.
130 *
131 * @address: byte address to write to
132 * @value: value to write
133 */
134static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
135{
136 *CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address) = value;
137}
138
139#endif /* __CVMX_SCRATCH_H__ */
diff --git a/drivers/staging/octeon/cvmx-smix-defs.h b/drivers/staging/octeon/cvmx-smix-defs.h
new file mode 100644
index 000000000000..9ae45fcbe3e3
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-smix-defs.h
@@ -0,0 +1,178 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28#ifndef __CVMX_SMIX_DEFS_H__
29#define __CVMX_SMIX_DEFS_H__
30
31#define CVMX_SMIX_CLK(offset) \
32 CVMX_ADD_IO_SEG(0x0001180000001818ull + (((offset) & 1) * 256))
33#define CVMX_SMIX_CMD(offset) \
34 CVMX_ADD_IO_SEG(0x0001180000001800ull + (((offset) & 1) * 256))
35#define CVMX_SMIX_EN(offset) \
36 CVMX_ADD_IO_SEG(0x0001180000001820ull + (((offset) & 1) * 256))
37#define CVMX_SMIX_RD_DAT(offset) \
38 CVMX_ADD_IO_SEG(0x0001180000001810ull + (((offset) & 1) * 256))
39#define CVMX_SMIX_WR_DAT(offset) \
40 CVMX_ADD_IO_SEG(0x0001180000001808ull + (((offset) & 1) * 256))
41
42union cvmx_smix_clk {
43 uint64_t u64;
44 struct cvmx_smix_clk_s {
45 uint64_t reserved_25_63:39;
46 uint64_t mode:1;
47 uint64_t reserved_21_23:3;
48 uint64_t sample_hi:5;
49 uint64_t sample_mode:1;
50 uint64_t reserved_14_14:1;
51 uint64_t clk_idle:1;
52 uint64_t preamble:1;
53 uint64_t sample:4;
54 uint64_t phase:8;
55 } s;
56 struct cvmx_smix_clk_cn30xx {
57 uint64_t reserved_21_63:43;
58 uint64_t sample_hi:5;
59 uint64_t reserved_14_15:2;
60 uint64_t clk_idle:1;
61 uint64_t preamble:1;
62 uint64_t sample:4;
63 uint64_t phase:8;
64 } cn30xx;
65 struct cvmx_smix_clk_cn30xx cn31xx;
66 struct cvmx_smix_clk_cn30xx cn38xx;
67 struct cvmx_smix_clk_cn30xx cn38xxp2;
68 struct cvmx_smix_clk_cn50xx {
69 uint64_t reserved_25_63:39;
70 uint64_t mode:1;
71 uint64_t reserved_21_23:3;
72 uint64_t sample_hi:5;
73 uint64_t reserved_14_15:2;
74 uint64_t clk_idle:1;
75 uint64_t preamble:1;
76 uint64_t sample:4;
77 uint64_t phase:8;
78 } cn50xx;
79 struct cvmx_smix_clk_s cn52xx;
80 struct cvmx_smix_clk_cn50xx cn52xxp1;
81 struct cvmx_smix_clk_s cn56xx;
82 struct cvmx_smix_clk_cn50xx cn56xxp1;
83 struct cvmx_smix_clk_cn30xx cn58xx;
84 struct cvmx_smix_clk_cn30xx cn58xxp1;
85};
86
87union cvmx_smix_cmd {
88 uint64_t u64;
89 struct cvmx_smix_cmd_s {
90 uint64_t reserved_18_63:46;
91 uint64_t phy_op:2;
92 uint64_t reserved_13_15:3;
93 uint64_t phy_adr:5;
94 uint64_t reserved_5_7:3;
95 uint64_t reg_adr:5;
96 } s;
97 struct cvmx_smix_cmd_cn30xx {
98 uint64_t reserved_17_63:47;
99 uint64_t phy_op:1;
100 uint64_t reserved_13_15:3;
101 uint64_t phy_adr:5;
102 uint64_t reserved_5_7:3;
103 uint64_t reg_adr:5;
104 } cn30xx;
105 struct cvmx_smix_cmd_cn30xx cn31xx;
106 struct cvmx_smix_cmd_cn30xx cn38xx;
107 struct cvmx_smix_cmd_cn30xx cn38xxp2;
108 struct cvmx_smix_cmd_s cn50xx;
109 struct cvmx_smix_cmd_s cn52xx;
110 struct cvmx_smix_cmd_s cn52xxp1;
111 struct cvmx_smix_cmd_s cn56xx;
112 struct cvmx_smix_cmd_s cn56xxp1;
113 struct cvmx_smix_cmd_cn30xx cn58xx;
114 struct cvmx_smix_cmd_cn30xx cn58xxp1;
115};
116
117union cvmx_smix_en {
118 uint64_t u64;
119 struct cvmx_smix_en_s {
120 uint64_t reserved_1_63:63;
121 uint64_t en:1;
122 } s;
123 struct cvmx_smix_en_s cn30xx;
124 struct cvmx_smix_en_s cn31xx;
125 struct cvmx_smix_en_s cn38xx;
126 struct cvmx_smix_en_s cn38xxp2;
127 struct cvmx_smix_en_s cn50xx;
128 struct cvmx_smix_en_s cn52xx;
129 struct cvmx_smix_en_s cn52xxp1;
130 struct cvmx_smix_en_s cn56xx;
131 struct cvmx_smix_en_s cn56xxp1;
132 struct cvmx_smix_en_s cn58xx;
133 struct cvmx_smix_en_s cn58xxp1;
134};
135
136union cvmx_smix_rd_dat {
137 uint64_t u64;
138 struct cvmx_smix_rd_dat_s {
139 uint64_t reserved_18_63:46;
140 uint64_t pending:1;
141 uint64_t val:1;
142 uint64_t dat:16;
143 } s;
144 struct cvmx_smix_rd_dat_s cn30xx;
145 struct cvmx_smix_rd_dat_s cn31xx;
146 struct cvmx_smix_rd_dat_s cn38xx;
147 struct cvmx_smix_rd_dat_s cn38xxp2;
148 struct cvmx_smix_rd_dat_s cn50xx;
149 struct cvmx_smix_rd_dat_s cn52xx;
150 struct cvmx_smix_rd_dat_s cn52xxp1;
151 struct cvmx_smix_rd_dat_s cn56xx;
152 struct cvmx_smix_rd_dat_s cn56xxp1;
153 struct cvmx_smix_rd_dat_s cn58xx;
154 struct cvmx_smix_rd_dat_s cn58xxp1;
155};
156
157union cvmx_smix_wr_dat {
158 uint64_t u64;
159 struct cvmx_smix_wr_dat_s {
160 uint64_t reserved_18_63:46;
161 uint64_t pending:1;
162 uint64_t val:1;
163 uint64_t dat:16;
164 } s;
165 struct cvmx_smix_wr_dat_s cn30xx;
166 struct cvmx_smix_wr_dat_s cn31xx;
167 struct cvmx_smix_wr_dat_s cn38xx;
168 struct cvmx_smix_wr_dat_s cn38xxp2;
169 struct cvmx_smix_wr_dat_s cn50xx;
170 struct cvmx_smix_wr_dat_s cn52xx;
171 struct cvmx_smix_wr_dat_s cn52xxp1;
172 struct cvmx_smix_wr_dat_s cn56xx;
173 struct cvmx_smix_wr_dat_s cn56xxp1;
174 struct cvmx_smix_wr_dat_s cn58xx;
175 struct cvmx_smix_wr_dat_s cn58xxp1;
176};
177
178#endif
diff --git a/drivers/staging/octeon/cvmx-spi.c b/drivers/staging/octeon/cvmx-spi.c
new file mode 100644
index 000000000000..82794d920cec
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-spi.c
@@ -0,0 +1,667 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 *
30 * Support library for the SPI
31 */
32#include <asm/octeon/octeon.h>
33
34#include "cvmx-config.h"
35
36#include "cvmx-pko.h"
37#include "cvmx-spi.h"
38
39#include "cvmx-spxx-defs.h"
40#include "cvmx-stxx-defs.h"
41#include "cvmx-srxx-defs.h"
42
43#define INVOKE_CB(function_p, args...) \
44 do { \
45 if (function_p) { \
46 res = function_p(args); \
47 if (res) \
48 return res; \
49 } \
50 } while (0)
51
52#if CVMX_ENABLE_DEBUG_PRINTS
53static const char *modes[] =
54 { "UNKNOWN", "TX Halfplex", "Rx Halfplex", "Duplex" };
55#endif
56
57/* Default callbacks, can be overridden
58 * using cvmx_spi_get_callbacks/cvmx_spi_set_callbacks
59 */
60static cvmx_spi_callbacks_t cvmx_spi_callbacks = {
61 .reset_cb = cvmx_spi_reset_cb,
62 .calendar_setup_cb = cvmx_spi_calendar_setup_cb,
63 .clock_detect_cb = cvmx_spi_clock_detect_cb,
64 .training_cb = cvmx_spi_training_cb,
65 .calendar_sync_cb = cvmx_spi_calendar_sync_cb,
66 .interface_up_cb = cvmx_spi_interface_up_cb
67};
68
69/**
70 * Get current SPI4 initialization callbacks
71 *
72 * @callbacks: Pointer to the callbacks structure.to fill
73 *
74 * Returns Pointer to cvmx_spi_callbacks_t structure.
75 */
76void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks)
77{
78 memcpy(callbacks, &cvmx_spi_callbacks, sizeof(cvmx_spi_callbacks));
79}
80
81/**
82 * Set new SPI4 initialization callbacks
83 *
84 * @new_callbacks: Pointer to an updated callbacks structure.
85 */
86void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks)
87{
88 memcpy(&cvmx_spi_callbacks, new_callbacks, sizeof(cvmx_spi_callbacks));
89}
90
91/**
92 * Initialize and start the SPI interface.
93 *
94 * @interface: The identifier of the packet interface to configure and
95 * use as a SPI interface.
96 * @mode: The operating mode for the SPI interface. The interface
97 * can operate as a full duplex (both Tx and Rx data paths
98 * active) or as a halfplex (either the Tx data path is
99 * active or the Rx data path is active, but not both).
100 * @timeout: Timeout to wait for clock synchronization in seconds
101 * @num_ports: Number of SPI ports to configure
102 *
103 * Returns Zero on success, negative of failure.
104 */
105int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout,
106 int num_ports)
107{
108 int res = -1;
109
110 if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
111 return res;
112
113 /* Callback to perform SPI4 reset */
114 INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
115
116 /* Callback to perform calendar setup */
117 INVOKE_CB(cvmx_spi_callbacks.calendar_setup_cb, interface, mode,
118 num_ports);
119
120 /* Callback to perform clock detection */
121 INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
122
123 /* Callback to perform SPI4 link training */
124 INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
125
126 /* Callback to perform calendar sync */
127 INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode,
128 timeout);
129
130 /* Callback to handle interface coming up */
131 INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
132
133 return res;
134}
135
136/**
137 * This routine restarts the SPI interface after it has lost synchronization
138 * with its correspondent system.
139 *
140 * @interface: The identifier of the packet interface to configure and
141 * use as a SPI interface.
142 * @mode: The operating mode for the SPI interface. The interface
143 * can operate as a full duplex (both Tx and Rx data paths
144 * active) or as a halfplex (either the Tx data path is
145 * active or the Rx data path is active, but not both).
146 * @timeout: Timeout to wait for clock synchronization in seconds
147 *
148 * Returns Zero on success, negative of failure.
149 */
150int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
151{
152 int res = -1;
153
154 if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
155 return res;
156
157 cvmx_dprintf("SPI%d: Restart %s\n", interface, modes[mode]);
158
159 /* Callback to perform SPI4 reset */
160 INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
161
162 /* NOTE: Calendar setup is not performed during restart */
163 /* Refer to cvmx_spi_start_interface() for the full sequence */
164
165 /* Callback to perform clock detection */
166 INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
167
168 /* Callback to perform SPI4 link training */
169 INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
170
171 /* Callback to perform calendar sync */
172 INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode,
173 timeout);
174
175 /* Callback to handle interface coming up */
176 INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
177
178 return res;
179}
180
181/**
182 * Callback to perform SPI4 reset
183 *
184 * @interface: The identifier of the packet interface to configure and
185 * use as a SPI interface.
186 * @mode: The operating mode for the SPI interface. The interface
187 * can operate as a full duplex (both Tx and Rx data paths
188 * active) or as a halfplex (either the Tx data path is
189 * active or the Rx data path is active, but not both).
190 *
191 * Returns Zero on success, non-zero error code on failure (will cause
192 * SPI initialization to abort)
193 */
194int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
195{
196 union cvmx_spxx_dbg_deskew_ctl spxx_dbg_deskew_ctl;
197 union cvmx_spxx_clk_ctl spxx_clk_ctl;
198 union cvmx_spxx_bist_stat spxx_bist_stat;
199 union cvmx_spxx_int_msk spxx_int_msk;
200 union cvmx_stxx_int_msk stxx_int_msk;
201 union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
202 int index;
203 uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
204
205 /* Disable SPI error events while we run BIST */
206 spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
207 cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
208 stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
209 cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
210
211 /* Run BIST in the SPI interface */
212 cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0);
213 cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0);
214 spxx_clk_ctl.u64 = 0;
215 spxx_clk_ctl.s.runbist = 1;
216 cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
217 cvmx_wait(10 * MS);
218 spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
219 if (spxx_bist_stat.s.stat0)
220 cvmx_dprintf
221 ("ERROR SPI%d: BIST failed on receive datapath FIFO\n",
222 interface);
223 if (spxx_bist_stat.s.stat1)
224 cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n",
225 interface);
226 if (spxx_bist_stat.s.stat2)
227 cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n",
228 interface);
229
230 /* Clear the calendar table after BIST to fix parity errors */
231 for (index = 0; index < 32; index++) {
232 union cvmx_srxx_spi4_calx srxx_spi4_calx;
233 union cvmx_stxx_spi4_calx stxx_spi4_calx;
234
235 srxx_spi4_calx.u64 = 0;
236 srxx_spi4_calx.s.oddpar = 1;
237 cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
238 srxx_spi4_calx.u64);
239
240 stxx_spi4_calx.u64 = 0;
241 stxx_spi4_calx.s.oddpar = 1;
242 cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
243 stxx_spi4_calx.u64);
244 }
245
246 /* Re enable reporting of error interrupts */
247 cvmx_write_csr(CVMX_SPXX_INT_REG(interface),
248 cvmx_read_csr(CVMX_SPXX_INT_REG(interface)));
249 cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
250 cvmx_write_csr(CVMX_STXX_INT_REG(interface),
251 cvmx_read_csr(CVMX_STXX_INT_REG(interface)));
252 cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
253
254 /* Setup the CLKDLY right in the middle */
255 spxx_clk_ctl.u64 = 0;
256 spxx_clk_ctl.s.seetrn = 0;
257 spxx_clk_ctl.s.clkdly = 0x10;
258 spxx_clk_ctl.s.runbist = 0;
259 spxx_clk_ctl.s.statdrv = 0;
260 /* This should always be on the opposite edge as statdrv */
261 spxx_clk_ctl.s.statrcv = 1;
262 spxx_clk_ctl.s.sndtrn = 0;
263 spxx_clk_ctl.s.drptrn = 0;
264 spxx_clk_ctl.s.rcvtrn = 0;
265 spxx_clk_ctl.s.srxdlck = 0;
266 cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
267 cvmx_wait(100 * MS);
268
269 /* Reset SRX0 DLL */
270 spxx_clk_ctl.s.srxdlck = 1;
271 cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
272
273 /* Waiting for Inf0 Spi4 RX DLL to lock */
274 cvmx_wait(100 * MS);
275
276 /* Enable dynamic alignment */
277 spxx_trn4_ctl.s.trntest = 0;
278 spxx_trn4_ctl.s.jitter = 1;
279 spxx_trn4_ctl.s.clr_boot = 1;
280 spxx_trn4_ctl.s.set_boot = 0;
281 if (OCTEON_IS_MODEL(OCTEON_CN58XX))
282 spxx_trn4_ctl.s.maxdist = 3;
283 else
284 spxx_trn4_ctl.s.maxdist = 8;
285 spxx_trn4_ctl.s.macro_en = 1;
286 spxx_trn4_ctl.s.mux_en = 1;
287 cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
288
289 spxx_dbg_deskew_ctl.u64 = 0;
290 cvmx_write_csr(CVMX_SPXX_DBG_DESKEW_CTL(interface),
291 spxx_dbg_deskew_ctl.u64);
292
293 return 0;
294}
295
296/**
297 * Callback to setup calendar and miscellaneous settings before clock detection
298 *
299 * @interface: The identifier of the packet interface to configure and
300 * use as a SPI interface.
301 * @mode: The operating mode for the SPI interface. The interface
302 * can operate as a full duplex (both Tx and Rx data paths
303 * active) or as a halfplex (either the Tx data path is
304 * active or the Rx data path is active, but not both).
305 * @num_ports: Number of ports to configure on SPI
306 *
307 * Returns Zero on success, non-zero error code on failure (will cause
308 * SPI initialization to abort)
309 */
310int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
311 int num_ports)
312{
313 int port;
314 int index;
315 if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
316 union cvmx_srxx_com_ctl srxx_com_ctl;
317 union cvmx_srxx_spi4_stat srxx_spi4_stat;
318
319 /* SRX0 number of Ports */
320 srxx_com_ctl.u64 = 0;
321 srxx_com_ctl.s.prts = num_ports - 1;
322 srxx_com_ctl.s.st_en = 0;
323 srxx_com_ctl.s.inf_en = 0;
324 cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
325
326 /* SRX0 Calendar Table. This round robbins through all ports */
327 port = 0;
328 index = 0;
329 while (port < num_ports) {
330 union cvmx_srxx_spi4_calx srxx_spi4_calx;
331 srxx_spi4_calx.u64 = 0;
332 srxx_spi4_calx.s.prt0 = port++;
333 srxx_spi4_calx.s.prt1 = port++;
334 srxx_spi4_calx.s.prt2 = port++;
335 srxx_spi4_calx.s.prt3 = port++;
336 srxx_spi4_calx.s.oddpar =
337 ~(cvmx_dpop(srxx_spi4_calx.u64) & 1);
338 cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
339 srxx_spi4_calx.u64);
340 index++;
341 }
342 srxx_spi4_stat.u64 = 0;
343 srxx_spi4_stat.s.len = num_ports;
344 srxx_spi4_stat.s.m = 1;
345 cvmx_write_csr(CVMX_SRXX_SPI4_STAT(interface),
346 srxx_spi4_stat.u64);
347 }
348
349 if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
350 union cvmx_stxx_arb_ctl stxx_arb_ctl;
351 union cvmx_gmxx_tx_spi_max gmxx_tx_spi_max;
352 union cvmx_gmxx_tx_spi_thresh gmxx_tx_spi_thresh;
353 union cvmx_gmxx_tx_spi_ctl gmxx_tx_spi_ctl;
354 union cvmx_stxx_spi4_stat stxx_spi4_stat;
355 union cvmx_stxx_spi4_dat stxx_spi4_dat;
356
357 /* STX0 Config */
358 stxx_arb_ctl.u64 = 0;
359 stxx_arb_ctl.s.igntpa = 0;
360 stxx_arb_ctl.s.mintrn = 0;
361 cvmx_write_csr(CVMX_STXX_ARB_CTL(interface), stxx_arb_ctl.u64);
362
363 gmxx_tx_spi_max.u64 = 0;
364 gmxx_tx_spi_max.s.max1 = 8;
365 gmxx_tx_spi_max.s.max2 = 4;
366 gmxx_tx_spi_max.s.slice = 0;
367 cvmx_write_csr(CVMX_GMXX_TX_SPI_MAX(interface),
368 gmxx_tx_spi_max.u64);
369
370 gmxx_tx_spi_thresh.u64 = 0;
371 gmxx_tx_spi_thresh.s.thresh = 4;
372 cvmx_write_csr(CVMX_GMXX_TX_SPI_THRESH(interface),
373 gmxx_tx_spi_thresh.u64);
374
375 gmxx_tx_spi_ctl.u64 = 0;
376 gmxx_tx_spi_ctl.s.tpa_clr = 0;
377 gmxx_tx_spi_ctl.s.cont_pkt = 0;
378 cvmx_write_csr(CVMX_GMXX_TX_SPI_CTL(interface),
379 gmxx_tx_spi_ctl.u64);
380
381 /* STX0 Training Control */
382 stxx_spi4_dat.u64 = 0;
383 /*Minimum needed by dynamic alignment */
384 stxx_spi4_dat.s.alpha = 32;
385 stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20 */
386 cvmx_write_csr(CVMX_STXX_SPI4_DAT(interface),
387 stxx_spi4_dat.u64);
388
389 /* STX0 Calendar Table. This round robbins through all ports */
390 port = 0;
391 index = 0;
392 while (port < num_ports) {
393 union cvmx_stxx_spi4_calx stxx_spi4_calx;
394 stxx_spi4_calx.u64 = 0;
395 stxx_spi4_calx.s.prt0 = port++;
396 stxx_spi4_calx.s.prt1 = port++;
397 stxx_spi4_calx.s.prt2 = port++;
398 stxx_spi4_calx.s.prt3 = port++;
399 stxx_spi4_calx.s.oddpar =
400 ~(cvmx_dpop(stxx_spi4_calx.u64) & 1);
401 cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
402 stxx_spi4_calx.u64);
403 index++;
404 }
405 stxx_spi4_stat.u64 = 0;
406 stxx_spi4_stat.s.len = num_ports;
407 stxx_spi4_stat.s.m = 1;
408 cvmx_write_csr(CVMX_STXX_SPI4_STAT(interface),
409 stxx_spi4_stat.u64);
410 }
411
412 return 0;
413}
414
415/**
416 * Callback to perform clock detection
417 *
418 * @interface: The identifier of the packet interface to configure and
419 * use as a SPI interface.
420 * @mode: The operating mode for the SPI interface. The interface
421 * can operate as a full duplex (both Tx and Rx data paths
422 * active) or as a halfplex (either the Tx data path is
423 * active or the Rx data path is active, but not both).
424 * @timeout: Timeout to wait for clock synchronization in seconds
425 *
426 * Returns Zero on success, non-zero error code on failure (will cause
427 * SPI initialization to abort)
428 */
429int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout)
430{
431 int clock_transitions;
432 union cvmx_spxx_clk_stat stat;
433 uint64_t timeout_time;
434 uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
435
436 /*
437 * Regardless of operating mode, both Tx and Rx clocks must be
438 * present for the SPI interface to operate.
439 */
440 cvmx_dprintf("SPI%d: Waiting to see TsClk...\n", interface);
441 timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
442 /*
443 * Require 100 clock transitions in order to avoid any noise
444 * in the beginning.
445 */
446 clock_transitions = 100;
447 do {
448 stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
449 if (stat.s.s4clk0 && stat.s.s4clk1 && clock_transitions) {
450 /*
451 * We've seen a clock transition, so decrement
452 * the number we still need.
453 */
454 clock_transitions--;
455 cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
456 stat.s.s4clk0 = 0;
457 stat.s.s4clk1 = 0;
458 }
459 if (cvmx_get_cycle() > timeout_time) {
460 cvmx_dprintf("SPI%d: Timeout\n", interface);
461 return -1;
462 }
463 } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);
464
465 cvmx_dprintf("SPI%d: Waiting to see RsClk...\n", interface);
466 timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
467 /*
468 * Require 100 clock transitions in order to avoid any noise in the
469 * beginning.
470 */
471 clock_transitions = 100;
472 do {
473 stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
474 if (stat.s.d4clk0 && stat.s.d4clk1 && clock_transitions) {
475 /*
476 * We've seen a clock transition, so decrement
477 * the number we still need
478 */
479 clock_transitions--;
480 cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
481 stat.s.d4clk0 = 0;
482 stat.s.d4clk1 = 0;
483 }
484 if (cvmx_get_cycle() > timeout_time) {
485 cvmx_dprintf("SPI%d: Timeout\n", interface);
486 return -1;
487 }
488 } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);
489
490 return 0;
491}
492
493/**
494 * Callback to perform link training
495 *
496 * @interface: The identifier of the packet interface to configure and
497 * use as a SPI interface.
498 * @mode: The operating mode for the SPI interface. The interface
499 * can operate as a full duplex (both Tx and Rx data paths
500 * active) or as a halfplex (either the Tx data path is
501 * active or the Rx data path is active, but not both).
502 * @timeout: Timeout to wait for link to be trained (in seconds)
503 *
504 * Returns Zero on success, non-zero error code on failure (will cause
505 * SPI initialization to abort)
506 */
507int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
508{
509 union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
510 union cvmx_spxx_clk_stat stat;
511 uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
512 uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
513 int rx_training_needed;
514
515 /* SRX0 & STX0 Inf0 Links are configured - begin training */
516 union cvmx_spxx_clk_ctl spxx_clk_ctl;
517 spxx_clk_ctl.u64 = 0;
518 spxx_clk_ctl.s.seetrn = 0;
519 spxx_clk_ctl.s.clkdly = 0x10;
520 spxx_clk_ctl.s.runbist = 0;
521 spxx_clk_ctl.s.statdrv = 0;
522 /* This should always be on the opposite edge as statdrv */
523 spxx_clk_ctl.s.statrcv = 1;
524 spxx_clk_ctl.s.sndtrn = 1;
525 spxx_clk_ctl.s.drptrn = 1;
526 spxx_clk_ctl.s.rcvtrn = 1;
527 spxx_clk_ctl.s.srxdlck = 1;
528 cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
529 cvmx_wait(1000 * MS);
530
531 /* SRX0 clear the boot bit */
532 spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface));
533 spxx_trn4_ctl.s.clr_boot = 1;
534 cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
535
536 /* Wait for the training sequence to complete */
537 cvmx_dprintf("SPI%d: Waiting for training\n", interface);
538 cvmx_wait(1000 * MS);
539 /* Wait a really long time here */
540 timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;
541 /*
542 * The HRM says we must wait for 34 + 16 * MAXDIST training sequences.
543 * We'll be pessimistic and wait for a lot more.
544 */
545 rx_training_needed = 500;
546 do {
547 stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
548 if (stat.s.srxtrn && rx_training_needed) {
549 rx_training_needed--;
550 cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
551 stat.s.srxtrn = 0;
552 }
553 if (cvmx_get_cycle() > timeout_time) {
554 cvmx_dprintf("SPI%d: Timeout\n", interface);
555 return -1;
556 }
557 } while (stat.s.srxtrn == 0);
558
559 return 0;
560}
561
562/**
563 * Callback to perform calendar data synchronization
564 *
565 * @interface: The identifier of the packet interface to configure and
566 * use as a SPI interface.
567 * @mode: The operating mode for the SPI interface. The interface
568 * can operate as a full duplex (both Tx and Rx data paths
569 * active) or as a halfplex (either the Tx data path is
570 * active or the Rx data path is active, but not both).
571 * @timeout: Timeout to wait for calendar data in seconds
572 *
573 * Returns Zero on success, non-zero error code on failure (will cause
574 * SPI initialization to abort)
575 */
576int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout)
577{
578 uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
579 if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
580 /* SRX0 interface should be good, send calendar data */
581 union cvmx_srxx_com_ctl srxx_com_ctl;
582 cvmx_dprintf
583 ("SPI%d: Rx is synchronized, start sending calendar data\n",
584 interface);
585 srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
586 srxx_com_ctl.s.inf_en = 1;
587 srxx_com_ctl.s.st_en = 1;
588 cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
589 }
590
591 if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
592 /* STX0 has achieved sync */
593 /* The corespondant board should be sending calendar data */
594 /* Enable the STX0 STAT receiver. */
595 union cvmx_spxx_clk_stat stat;
596 uint64_t timeout_time;
597 union cvmx_stxx_com_ctl stxx_com_ctl;
598 stxx_com_ctl.u64 = 0;
599 stxx_com_ctl.s.st_en = 1;
600 cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
601
602 /* Waiting for calendar sync on STX0 STAT */
603 cvmx_dprintf("SPI%d: Waiting to sync on STX[%d] STAT\n",
604 interface, interface);
605 timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
606 /* SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10) */
607 do {
608 stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
609 if (cvmx_get_cycle() > timeout_time) {
610 cvmx_dprintf("SPI%d: Timeout\n", interface);
611 return -1;
612 }
613 } while (stat.s.stxcal == 0);
614 }
615
616 return 0;
617}
618
619/**
620 * Callback to handle interface up
621 *
622 * @interface: The identifier of the packet interface to configure and
623 * use as a SPI interface.
624 * @mode: The operating mode for the SPI interface. The interface
625 * can operate as a full duplex (both Tx and Rx data paths
626 * active) or as a halfplex (either the Tx data path is
627 * active or the Rx data path is active, but not both).
628 *
629 * Returns Zero on success, non-zero error code on failure (will cause
630 * SPI initialization to abort)
631 */
632int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode)
633{
634 union cvmx_gmxx_rxx_frm_min gmxx_rxx_frm_min;
635 union cvmx_gmxx_rxx_frm_max gmxx_rxx_frm_max;
636 union cvmx_gmxx_rxx_jabber gmxx_rxx_jabber;
637
638 if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
639 union cvmx_srxx_com_ctl srxx_com_ctl;
640 srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
641 srxx_com_ctl.s.inf_en = 1;
642 cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
643 cvmx_dprintf("SPI%d: Rx is now up\n", interface);
644 }
645
646 if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
647 union cvmx_stxx_com_ctl stxx_com_ctl;
648 stxx_com_ctl.u64 = cvmx_read_csr(CVMX_STXX_COM_CTL(interface));
649 stxx_com_ctl.s.inf_en = 1;
650 cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
651 cvmx_dprintf("SPI%d: Tx is now up\n", interface);
652 }
653
654 gmxx_rxx_frm_min.u64 = 0;
655 gmxx_rxx_frm_min.s.len = 64;
656 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MIN(0, interface),
657 gmxx_rxx_frm_min.u64);
658 gmxx_rxx_frm_max.u64 = 0;
659 gmxx_rxx_frm_max.s.len = 64 * 1024 - 4;
660 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(0, interface),
661 gmxx_rxx_frm_max.u64);
662 gmxx_rxx_jabber.u64 = 0;
663 gmxx_rxx_jabber.s.cnt = 64 * 1024 - 4;
664 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(0, interface), gmxx_rxx_jabber.u64);
665
666 return 0;
667}
diff --git a/drivers/staging/octeon/cvmx-spi.h b/drivers/staging/octeon/cvmx-spi.h
new file mode 100644
index 000000000000..e814648953a5
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-spi.h
@@ -0,0 +1,269 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/*
29 *
30 * This file contains defines for the SPI interface
31 */
32#ifndef __CVMX_SPI_H__
33#define __CVMX_SPI_H__
34
35#include "cvmx-gmxx-defs.h"
36
37/* CSR typedefs have been moved to cvmx-csr-*.h */
38
39typedef enum {
40 CVMX_SPI_MODE_UNKNOWN = 0,
41 CVMX_SPI_MODE_TX_HALFPLEX = 1,
42 CVMX_SPI_MODE_RX_HALFPLEX = 2,
43 CVMX_SPI_MODE_DUPLEX = 3
44} cvmx_spi_mode_t;
45
46/** Callbacks structure to customize SPI4 initialization sequence */
47typedef struct {
48 /** Called to reset SPI4 DLL */
49 int (*reset_cb) (int interface, cvmx_spi_mode_t mode);
50
51 /** Called to setup calendar */
52 int (*calendar_setup_cb) (int interface, cvmx_spi_mode_t mode,
53 int num_ports);
54
55 /** Called for Tx and Rx clock detection */
56 int (*clock_detect_cb) (int interface, cvmx_spi_mode_t mode,
57 int timeout);
58
59 /** Called to perform link training */
60 int (*training_cb) (int interface, cvmx_spi_mode_t mode, int timeout);
61
62 /** Called for calendar data synchronization */
63 int (*calendar_sync_cb) (int interface, cvmx_spi_mode_t mode,
64 int timeout);
65
66 /** Called when interface is up */
67 int (*interface_up_cb) (int interface, cvmx_spi_mode_t mode);
68
69} cvmx_spi_callbacks_t;
70
71/**
72 * Return true if the supplied interface is configured for SPI
73 *
74 * @interface: Interface to check
75 * Returns True if interface is SPI
76 */
77static inline int cvmx_spi_is_spi_interface(int interface)
78{
79 uint64_t gmxState = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
80 return (gmxState & 0x2) && (gmxState & 0x1);
81}
82
83/**
84 * Initialize and start the SPI interface.
85 *
86 * @interface: The identifier of the packet interface to configure and
87 * use as a SPI interface.
88 * @mode: The operating mode for the SPI interface. The interface
89 * can operate as a full duplex (both Tx and Rx data paths
90 * active) or as a halfplex (either the Tx data path is
91 * active or the Rx data path is active, but not both).
92 * @timeout: Timeout to wait for clock synchronization in seconds
93 * @num_ports: Number of SPI ports to configure
94 *
95 * Returns Zero on success, negative of failure.
96 */
97extern int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode,
98 int timeout, int num_ports);
99
100/**
101 * This routine restarts the SPI interface after it has lost synchronization
102 * with its corespondant system.
103 *
104 * @interface: The identifier of the packet interface to configure and
105 * use as a SPI interface.
106 * @mode: The operating mode for the SPI interface. The interface
107 * can operate as a full duplex (both Tx and Rx data paths
108 * active) or as a halfplex (either the Tx data path is
109 * active or the Rx data path is active, but not both).
110 * @timeout: Timeout to wait for clock synchronization in seconds
111 * Returns Zero on success, negative of failure.
112 */
113extern int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode,
114 int timeout);
115
116/**
117 * Return non-zero if the SPI interface has a SPI4000 attached
118 *
119 * @interface: SPI interface the SPI4000 is connected to
120 *
121 * Returns
122 */
123static inline int cvmx_spi4000_is_present(int interface)
124{
125 return 0;
126}
127
128/**
129 * Initialize the SPI4000 for use
130 *
131 * @interface: SPI interface the SPI4000 is connected to
132 */
133static inline int cvmx_spi4000_initialize(int interface)
134{
135 return 0;
136}
137
138/**
139 * Poll all the SPI4000 port and check its speed
140 *
141 * @interface: Interface the SPI4000 is on
142 * @port: Port to poll (0-9)
143 * Returns Status of the port. 0=down. All other values the port is up.
144 */
145static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
146 int interface,
147 int port)
148{
149 union cvmx_gmxx_rxx_rx_inbnd r;
150 r.u64 = 0;
151 return r;
152}
153
154/**
155 * Get current SPI4 initialization callbacks
156 *
157 * @callbacks: Pointer to the callbacks structure.to fill
158 *
159 * Returns Pointer to cvmx_spi_callbacks_t structure.
160 */
161extern void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks);
162
163/**
164 * Set new SPI4 initialization callbacks
165 *
166 * @new_callbacks: Pointer to an updated callbacks structure.
167 */
168extern void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks);
169
170/**
171 * Callback to perform SPI4 reset
172 *
173 * @interface: The identifier of the packet interface to configure and
174 * use as a SPI interface.
175 * @mode: The operating mode for the SPI interface. The interface
176 * can operate as a full duplex (both Tx and Rx data paths
177 * active) or as a halfplex (either the Tx data path is
178 * active or the Rx data path is active, but not both).
179 *
180 * Returns Zero on success, non-zero error code on failure (will cause
181 * SPI initialization to abort)
182 */
183extern int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode);
184
185/**
186 * Callback to setup calendar and miscellaneous settings before clock
187 * detection
188 *
189 * @interface: The identifier of the packet interface to configure and
190 * use as a SPI interface.
191 * @mode: The operating mode for the SPI interface. The interface
192 * can operate as a full duplex (both Tx and Rx data paths
193 * active) or as a halfplex (either the Tx data path is
194 * active or the Rx data path is active, but not both).
195 * @num_ports: Number of ports to configure on SPI
196 *
197 * Returns Zero on success, non-zero error code on failure (will cause
198 * SPI initialization to abort)
199 */
200extern int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
201 int num_ports);
202
203/**
204 * Callback to perform clock detection
205 *
206 * @interface: The identifier of the packet interface to configure and
207 * use as a SPI interface.
208 * @mode: The operating mode for the SPI interface. The interface
209 * can operate as a full duplex (both Tx and Rx data paths
210 * active) or as a halfplex (either the Tx data path is
211 * active or the Rx data path is active, but not both).
212 * @timeout: Timeout to wait for clock synchronization in seconds
213 *
214 * Returns Zero on success, non-zero error code on failure (will cause
215 * SPI initialization to abort)
216 */
217extern int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode,
218 int timeout);
219
220/**
221 * Callback to perform link training
222 *
223 * @interface: The identifier of the packet interface to configure and
224 * use as a SPI interface.
225 * @mode: The operating mode for the SPI interface. The interface
226 * can operate as a full duplex (both Tx and Rx data paths
227 * active) or as a halfplex (either the Tx data path is
228 * active or the Rx data path is active, but not both).
229 * @timeout: Timeout to wait for link to be trained (in seconds)
230 *
231 * Returns Zero on success, non-zero error code on failure (will cause
232 * SPI initialization to abort)
233 */
234extern int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode,
235 int timeout);
236
237/**
238 * Callback to perform calendar data synchronization
239 *
240 * @interface: The identifier of the packet interface to configure and
241 * use as a SPI interface.
242 * @mode: The operating mode for the SPI interface. The interface
243 * can operate as a full duplex (both Tx and Rx data paths
244 * active) or as a halfplex (either the Tx data path is
245 * active or the Rx data path is active, but not both).
246 * @timeout: Timeout to wait for calendar data in seconds
247 *
248 * Returns Zero on success, non-zero error code on failure (will cause
249 * SPI initialization to abort)
250 */
251extern int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode,
252 int timeout);
253
254/**
255 * Callback to handle interface up
256 *
257 * @interface: The identifier of the packet interface to configure and
258 * use as a SPI interface.
259 * @mode: The operating mode for the SPI interface. The interface
260 * can operate as a full duplex (both Tx and Rx data paths
261 * active) or as a halfplex (either the Tx data path is
262 * active or the Rx data path is active, but not both).
263 *
264 * Returns Zero on success, non-zero error code on failure (will cause
265 * SPI initialization to abort)
266 */
267extern int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode);
268
269#endif /* __CVMX_SPI_H__ */
diff --git a/drivers/staging/octeon/cvmx-spxx-defs.h b/drivers/staging/octeon/cvmx-spxx-defs.h
new file mode 100644
index 000000000000..b16940e32c83
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-spxx-defs.h
@@ -0,0 +1,347 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28#ifndef __CVMX_SPXX_DEFS_H__
29#define __CVMX_SPXX_DEFS_H__
30
31#define CVMX_SPXX_BCKPRS_CNT(block_id) \
32 CVMX_ADD_IO_SEG(0x0001180090000340ull + (((block_id) & 1) * 0x8000000ull))
33#define CVMX_SPXX_BIST_STAT(block_id) \
34 CVMX_ADD_IO_SEG(0x00011800900007F8ull + (((block_id) & 1) * 0x8000000ull))
35#define CVMX_SPXX_CLK_CTL(block_id) \
36 CVMX_ADD_IO_SEG(0x0001180090000348ull + (((block_id) & 1) * 0x8000000ull))
37#define CVMX_SPXX_CLK_STAT(block_id) \
38 CVMX_ADD_IO_SEG(0x0001180090000350ull + (((block_id) & 1) * 0x8000000ull))
39#define CVMX_SPXX_DBG_DESKEW_CTL(block_id) \
40 CVMX_ADD_IO_SEG(0x0001180090000368ull + (((block_id) & 1) * 0x8000000ull))
41#define CVMX_SPXX_DBG_DESKEW_STATE(block_id) \
42 CVMX_ADD_IO_SEG(0x0001180090000370ull + (((block_id) & 1) * 0x8000000ull))
43#define CVMX_SPXX_DRV_CTL(block_id) \
44 CVMX_ADD_IO_SEG(0x0001180090000358ull + (((block_id) & 1) * 0x8000000ull))
45#define CVMX_SPXX_ERR_CTL(block_id) \
46 CVMX_ADD_IO_SEG(0x0001180090000320ull + (((block_id) & 1) * 0x8000000ull))
47#define CVMX_SPXX_INT_DAT(block_id) \
48 CVMX_ADD_IO_SEG(0x0001180090000318ull + (((block_id) & 1) * 0x8000000ull))
49#define CVMX_SPXX_INT_MSK(block_id) \
50 CVMX_ADD_IO_SEG(0x0001180090000308ull + (((block_id) & 1) * 0x8000000ull))
51#define CVMX_SPXX_INT_REG(block_id) \
52 CVMX_ADD_IO_SEG(0x0001180090000300ull + (((block_id) & 1) * 0x8000000ull))
53#define CVMX_SPXX_INT_SYNC(block_id) \
54 CVMX_ADD_IO_SEG(0x0001180090000310ull + (((block_id) & 1) * 0x8000000ull))
55#define CVMX_SPXX_TPA_ACC(block_id) \
56 CVMX_ADD_IO_SEG(0x0001180090000338ull + (((block_id) & 1) * 0x8000000ull))
57#define CVMX_SPXX_TPA_MAX(block_id) \
58 CVMX_ADD_IO_SEG(0x0001180090000330ull + (((block_id) & 1) * 0x8000000ull))
59#define CVMX_SPXX_TPA_SEL(block_id) \
60 CVMX_ADD_IO_SEG(0x0001180090000328ull + (((block_id) & 1) * 0x8000000ull))
61#define CVMX_SPXX_TRN4_CTL(block_id) \
62 CVMX_ADD_IO_SEG(0x0001180090000360ull + (((block_id) & 1) * 0x8000000ull))
63
64union cvmx_spxx_bckprs_cnt {
65 uint64_t u64;
66 struct cvmx_spxx_bckprs_cnt_s {
67 uint64_t reserved_32_63:32;
68 uint64_t cnt:32;
69 } s;
70 struct cvmx_spxx_bckprs_cnt_s cn38xx;
71 struct cvmx_spxx_bckprs_cnt_s cn38xxp2;
72 struct cvmx_spxx_bckprs_cnt_s cn58xx;
73 struct cvmx_spxx_bckprs_cnt_s cn58xxp1;
74};
75
76union cvmx_spxx_bist_stat {
77 uint64_t u64;
78 struct cvmx_spxx_bist_stat_s {
79 uint64_t reserved_3_63:61;
80 uint64_t stat2:1;
81 uint64_t stat1:1;
82 uint64_t stat0:1;
83 } s;
84 struct cvmx_spxx_bist_stat_s cn38xx;
85 struct cvmx_spxx_bist_stat_s cn38xxp2;
86 struct cvmx_spxx_bist_stat_s cn58xx;
87 struct cvmx_spxx_bist_stat_s cn58xxp1;
88};
89
90union cvmx_spxx_clk_ctl {
91 uint64_t u64;
92 struct cvmx_spxx_clk_ctl_s {
93 uint64_t reserved_17_63:47;
94 uint64_t seetrn:1;
95 uint64_t reserved_12_15:4;
96 uint64_t clkdly:5;
97 uint64_t runbist:1;
98 uint64_t statdrv:1;
99 uint64_t statrcv:1;
100 uint64_t sndtrn:1;
101 uint64_t drptrn:1;
102 uint64_t rcvtrn:1;
103 uint64_t srxdlck:1;
104 } s;
105 struct cvmx_spxx_clk_ctl_s cn38xx;
106 struct cvmx_spxx_clk_ctl_s cn38xxp2;
107 struct cvmx_spxx_clk_ctl_s cn58xx;
108 struct cvmx_spxx_clk_ctl_s cn58xxp1;
109};
110
111union cvmx_spxx_clk_stat {
112 uint64_t u64;
113 struct cvmx_spxx_clk_stat_s {
114 uint64_t reserved_11_63:53;
115 uint64_t stxcal:1;
116 uint64_t reserved_9_9:1;
117 uint64_t srxtrn:1;
118 uint64_t s4clk1:1;
119 uint64_t s4clk0:1;
120 uint64_t d4clk1:1;
121 uint64_t d4clk0:1;
122 uint64_t reserved_0_3:4;
123 } s;
124 struct cvmx_spxx_clk_stat_s cn38xx;
125 struct cvmx_spxx_clk_stat_s cn38xxp2;
126 struct cvmx_spxx_clk_stat_s cn58xx;
127 struct cvmx_spxx_clk_stat_s cn58xxp1;
128};
129
130union cvmx_spxx_dbg_deskew_ctl {
131 uint64_t u64;
132 struct cvmx_spxx_dbg_deskew_ctl_s {
133 uint64_t reserved_30_63:34;
134 uint64_t fallnop:1;
135 uint64_t fall8:1;
136 uint64_t reserved_26_27:2;
137 uint64_t sstep_go:1;
138 uint64_t sstep:1;
139 uint64_t reserved_22_23:2;
140 uint64_t clrdly:1;
141 uint64_t dec:1;
142 uint64_t inc:1;
143 uint64_t mux:1;
144 uint64_t offset:5;
145 uint64_t bitsel:5;
146 uint64_t offdly:6;
147 uint64_t dllfrc:1;
148 uint64_t dlldis:1;
149 } s;
150 struct cvmx_spxx_dbg_deskew_ctl_s cn38xx;
151 struct cvmx_spxx_dbg_deskew_ctl_s cn38xxp2;
152 struct cvmx_spxx_dbg_deskew_ctl_s cn58xx;
153 struct cvmx_spxx_dbg_deskew_ctl_s cn58xxp1;
154};
155
156union cvmx_spxx_dbg_deskew_state {
157 uint64_t u64;
158 struct cvmx_spxx_dbg_deskew_state_s {
159 uint64_t reserved_9_63:55;
160 uint64_t testres:1;
161 uint64_t unxterm:1;
162 uint64_t muxsel:2;
163 uint64_t offset:5;
164 } s;
165 struct cvmx_spxx_dbg_deskew_state_s cn38xx;
166 struct cvmx_spxx_dbg_deskew_state_s cn38xxp2;
167 struct cvmx_spxx_dbg_deskew_state_s cn58xx;
168 struct cvmx_spxx_dbg_deskew_state_s cn58xxp1;
169};
170
171union cvmx_spxx_drv_ctl {
172 uint64_t u64;
173 struct cvmx_spxx_drv_ctl_s {
174 uint64_t reserved_0_63:64;
175 } s;
176 struct cvmx_spxx_drv_ctl_cn38xx {
177 uint64_t reserved_16_63:48;
178 uint64_t stx4ncmp:4;
179 uint64_t stx4pcmp:4;
180 uint64_t srx4cmp:8;
181 } cn38xx;
182 struct cvmx_spxx_drv_ctl_cn38xx cn38xxp2;
183 struct cvmx_spxx_drv_ctl_cn58xx {
184 uint64_t reserved_24_63:40;
185 uint64_t stx4ncmp:4;
186 uint64_t stx4pcmp:4;
187 uint64_t reserved_10_15:6;
188 uint64_t srx4cmp:10;
189 } cn58xx;
190 struct cvmx_spxx_drv_ctl_cn58xx cn58xxp1;
191};
192
193union cvmx_spxx_err_ctl {
194 uint64_t u64;
195 struct cvmx_spxx_err_ctl_s {
196 uint64_t reserved_9_63:55;
197 uint64_t prtnxa:1;
198 uint64_t dipcls:1;
199 uint64_t dippay:1;
200 uint64_t reserved_4_5:2;
201 uint64_t errcnt:4;
202 } s;
203 struct cvmx_spxx_err_ctl_s cn38xx;
204 struct cvmx_spxx_err_ctl_s cn38xxp2;
205 struct cvmx_spxx_err_ctl_s cn58xx;
206 struct cvmx_spxx_err_ctl_s cn58xxp1;
207};
208
209union cvmx_spxx_int_dat {
210 uint64_t u64;
211 struct cvmx_spxx_int_dat_s {
212 uint64_t reserved_32_63:32;
213 uint64_t mul:1;
214 uint64_t reserved_14_30:17;
215 uint64_t calbnk:2;
216 uint64_t rsvop:4;
217 uint64_t prt:8;
218 } s;
219 struct cvmx_spxx_int_dat_s cn38xx;
220 struct cvmx_spxx_int_dat_s cn38xxp2;
221 struct cvmx_spxx_int_dat_s cn58xx;
222 struct cvmx_spxx_int_dat_s cn58xxp1;
223};
224
225union cvmx_spxx_int_msk {
226 uint64_t u64;
227 struct cvmx_spxx_int_msk_s {
228 uint64_t reserved_12_63:52;
229 uint64_t calerr:1;
230 uint64_t syncerr:1;
231 uint64_t diperr:1;
232 uint64_t tpaovr:1;
233 uint64_t rsverr:1;
234 uint64_t drwnng:1;
235 uint64_t clserr:1;
236 uint64_t spiovr:1;
237 uint64_t reserved_2_3:2;
238 uint64_t abnorm:1;
239 uint64_t prtnxa:1;
240 } s;
241 struct cvmx_spxx_int_msk_s cn38xx;
242 struct cvmx_spxx_int_msk_s cn38xxp2;
243 struct cvmx_spxx_int_msk_s cn58xx;
244 struct cvmx_spxx_int_msk_s cn58xxp1;
245};
246
247union cvmx_spxx_int_reg {
248 uint64_t u64;
249 struct cvmx_spxx_int_reg_s {
250 uint64_t reserved_32_63:32;
251 uint64_t spf:1;
252 uint64_t reserved_12_30:19;
253 uint64_t calerr:1;
254 uint64_t syncerr:1;
255 uint64_t diperr:1;
256 uint64_t tpaovr:1;
257 uint64_t rsverr:1;
258 uint64_t drwnng:1;
259 uint64_t clserr:1;
260 uint64_t spiovr:1;
261 uint64_t reserved_2_3:2;
262 uint64_t abnorm:1;
263 uint64_t prtnxa:1;
264 } s;
265 struct cvmx_spxx_int_reg_s cn38xx;
266 struct cvmx_spxx_int_reg_s cn38xxp2;
267 struct cvmx_spxx_int_reg_s cn58xx;
268 struct cvmx_spxx_int_reg_s cn58xxp1;
269};
270
271union cvmx_spxx_int_sync {
272 uint64_t u64;
273 struct cvmx_spxx_int_sync_s {
274 uint64_t reserved_12_63:52;
275 uint64_t calerr:1;
276 uint64_t syncerr:1;
277 uint64_t diperr:1;
278 uint64_t tpaovr:1;
279 uint64_t rsverr:1;
280 uint64_t drwnng:1;
281 uint64_t clserr:1;
282 uint64_t spiovr:1;
283 uint64_t reserved_2_3:2;
284 uint64_t abnorm:1;
285 uint64_t prtnxa:1;
286 } s;
287 struct cvmx_spxx_int_sync_s cn38xx;
288 struct cvmx_spxx_int_sync_s cn38xxp2;
289 struct cvmx_spxx_int_sync_s cn58xx;
290 struct cvmx_spxx_int_sync_s cn58xxp1;
291};
292
293union cvmx_spxx_tpa_acc {
294 uint64_t u64;
295 struct cvmx_spxx_tpa_acc_s {
296 uint64_t reserved_32_63:32;
297 uint64_t cnt:32;
298 } s;
299 struct cvmx_spxx_tpa_acc_s cn38xx;
300 struct cvmx_spxx_tpa_acc_s cn38xxp2;
301 struct cvmx_spxx_tpa_acc_s cn58xx;
302 struct cvmx_spxx_tpa_acc_s cn58xxp1;
303};
304
305union cvmx_spxx_tpa_max {
306 uint64_t u64;
307 struct cvmx_spxx_tpa_max_s {
308 uint64_t reserved_32_63:32;
309 uint64_t max:32;
310 } s;
311 struct cvmx_spxx_tpa_max_s cn38xx;
312 struct cvmx_spxx_tpa_max_s cn38xxp2;
313 struct cvmx_spxx_tpa_max_s cn58xx;
314 struct cvmx_spxx_tpa_max_s cn58xxp1;
315};
316
317union cvmx_spxx_tpa_sel {
318 uint64_t u64;
319 struct cvmx_spxx_tpa_sel_s {
320 uint64_t reserved_4_63:60;
321 uint64_t prtsel:4;
322 } s;
323 struct cvmx_spxx_tpa_sel_s cn38xx;
324 struct cvmx_spxx_tpa_sel_s cn38xxp2;
325 struct cvmx_spxx_tpa_sel_s cn58xx;
326 struct cvmx_spxx_tpa_sel_s cn58xxp1;
327};
328
329union cvmx_spxx_trn4_ctl {
330 uint64_t u64;
331 struct cvmx_spxx_trn4_ctl_s {
332 uint64_t reserved_13_63:51;
333 uint64_t trntest:1;
334 uint64_t jitter:3;
335 uint64_t clr_boot:1;
336 uint64_t set_boot:1;
337 uint64_t maxdist:5;
338 uint64_t macro_en:1;
339 uint64_t mux_en:1;
340 } s;
341 struct cvmx_spxx_trn4_ctl_s cn38xx;
342 struct cvmx_spxx_trn4_ctl_s cn38xxp2;
343 struct cvmx_spxx_trn4_ctl_s cn58xx;
344 struct cvmx_spxx_trn4_ctl_s cn58xxp1;
345};
346
347#endif
diff --git a/drivers/staging/octeon/cvmx-srxx-defs.h b/drivers/staging/octeon/cvmx-srxx-defs.h
new file mode 100644
index 000000000000..d82b366c279f
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-srxx-defs.h
@@ -0,0 +1,126 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28#ifndef __CVMX_SRXX_DEFS_H__
29#define __CVMX_SRXX_DEFS_H__
30
31#define CVMX_SRXX_COM_CTL(block_id) \
32 CVMX_ADD_IO_SEG(0x0001180090000200ull + (((block_id) & 1) * 0x8000000ull))
33#define CVMX_SRXX_IGN_RX_FULL(block_id) \
34 CVMX_ADD_IO_SEG(0x0001180090000218ull + (((block_id) & 1) * 0x8000000ull))
35#define CVMX_SRXX_SPI4_CALX(offset, block_id) \
36 CVMX_ADD_IO_SEG(0x0001180090000000ull + (((offset) & 31) * 8) + (((block_id) & 1) * 0x8000000ull))
37#define CVMX_SRXX_SPI4_STAT(block_id) \
38 CVMX_ADD_IO_SEG(0x0001180090000208ull + (((block_id) & 1) * 0x8000000ull))
39#define CVMX_SRXX_SW_TICK_CTL(block_id) \
40 CVMX_ADD_IO_SEG(0x0001180090000220ull + (((block_id) & 1) * 0x8000000ull))
41#define CVMX_SRXX_SW_TICK_DAT(block_id) \
42 CVMX_ADD_IO_SEG(0x0001180090000228ull + (((block_id) & 1) * 0x8000000ull))
43
44union cvmx_srxx_com_ctl {
45 uint64_t u64;
46 struct cvmx_srxx_com_ctl_s {
47 uint64_t reserved_8_63:56;
48 uint64_t prts:4;
49 uint64_t st_en:1;
50 uint64_t reserved_1_2:2;
51 uint64_t inf_en:1;
52 } s;
53 struct cvmx_srxx_com_ctl_s cn38xx;
54 struct cvmx_srxx_com_ctl_s cn38xxp2;
55 struct cvmx_srxx_com_ctl_s cn58xx;
56 struct cvmx_srxx_com_ctl_s cn58xxp1;
57};
58
59union cvmx_srxx_ign_rx_full {
60 uint64_t u64;
61 struct cvmx_srxx_ign_rx_full_s {
62 uint64_t reserved_16_63:48;
63 uint64_t ignore:16;
64 } s;
65 struct cvmx_srxx_ign_rx_full_s cn38xx;
66 struct cvmx_srxx_ign_rx_full_s cn38xxp2;
67 struct cvmx_srxx_ign_rx_full_s cn58xx;
68 struct cvmx_srxx_ign_rx_full_s cn58xxp1;
69};
70
71union cvmx_srxx_spi4_calx {
72 uint64_t u64;
73 struct cvmx_srxx_spi4_calx_s {
74 uint64_t reserved_17_63:47;
75 uint64_t oddpar:1;
76 uint64_t prt3:4;
77 uint64_t prt2:4;
78 uint64_t prt1:4;
79 uint64_t prt0:4;
80 } s;
81 struct cvmx_srxx_spi4_calx_s cn38xx;
82 struct cvmx_srxx_spi4_calx_s cn38xxp2;
83 struct cvmx_srxx_spi4_calx_s cn58xx;
84 struct cvmx_srxx_spi4_calx_s cn58xxp1;
85};
86
87union cvmx_srxx_spi4_stat {
88 uint64_t u64;
89 struct cvmx_srxx_spi4_stat_s {
90 uint64_t reserved_16_63:48;
91 uint64_t m:8;
92 uint64_t reserved_7_7:1;
93 uint64_t len:7;
94 } s;
95 struct cvmx_srxx_spi4_stat_s cn38xx;
96 struct cvmx_srxx_spi4_stat_s cn38xxp2;
97 struct cvmx_srxx_spi4_stat_s cn58xx;
98 struct cvmx_srxx_spi4_stat_s cn58xxp1;
99};
100
101union cvmx_srxx_sw_tick_ctl {
102 uint64_t u64;
103 struct cvmx_srxx_sw_tick_ctl_s {
104 uint64_t reserved_14_63:50;
105 uint64_t eop:1;
106 uint64_t sop:1;
107 uint64_t mod:4;
108 uint64_t opc:4;
109 uint64_t adr:4;
110 } s;
111 struct cvmx_srxx_sw_tick_ctl_s cn38xx;
112 struct cvmx_srxx_sw_tick_ctl_s cn58xx;
113 struct cvmx_srxx_sw_tick_ctl_s cn58xxp1;
114};
115
116union cvmx_srxx_sw_tick_dat {
117 uint64_t u64;
118 struct cvmx_srxx_sw_tick_dat_s {
119 uint64_t dat:64;
120 } s;
121 struct cvmx_srxx_sw_tick_dat_s cn38xx;
122 struct cvmx_srxx_sw_tick_dat_s cn58xx;
123 struct cvmx_srxx_sw_tick_dat_s cn58xxp1;
124};
125
126#endif
diff --git a/drivers/staging/octeon/cvmx-stxx-defs.h b/drivers/staging/octeon/cvmx-stxx-defs.h
new file mode 100644
index 000000000000..4f209b62cae1
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-stxx-defs.h
@@ -0,0 +1,292 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28#ifndef __CVMX_STXX_DEFS_H__
29#define __CVMX_STXX_DEFS_H__
30
31#define CVMX_STXX_ARB_CTL(block_id) \
32 CVMX_ADD_IO_SEG(0x0001180090000608ull + (((block_id) & 1) * 0x8000000ull))
33#define CVMX_STXX_BCKPRS_CNT(block_id) \
34 CVMX_ADD_IO_SEG(0x0001180090000688ull + (((block_id) & 1) * 0x8000000ull))
35#define CVMX_STXX_COM_CTL(block_id) \
36 CVMX_ADD_IO_SEG(0x0001180090000600ull + (((block_id) & 1) * 0x8000000ull))
37#define CVMX_STXX_DIP_CNT(block_id) \
38 CVMX_ADD_IO_SEG(0x0001180090000690ull + (((block_id) & 1) * 0x8000000ull))
39#define CVMX_STXX_IGN_CAL(block_id) \
40 CVMX_ADD_IO_SEG(0x0001180090000610ull + (((block_id) & 1) * 0x8000000ull))
41#define CVMX_STXX_INT_MSK(block_id) \
42 CVMX_ADD_IO_SEG(0x00011800900006A0ull + (((block_id) & 1) * 0x8000000ull))
43#define CVMX_STXX_INT_REG(block_id) \
44 CVMX_ADD_IO_SEG(0x0001180090000698ull + (((block_id) & 1) * 0x8000000ull))
45#define CVMX_STXX_INT_SYNC(block_id) \
46 CVMX_ADD_IO_SEG(0x00011800900006A8ull + (((block_id) & 1) * 0x8000000ull))
47#define CVMX_STXX_MIN_BST(block_id) \
48 CVMX_ADD_IO_SEG(0x0001180090000618ull + (((block_id) & 1) * 0x8000000ull))
49#define CVMX_STXX_SPI4_CALX(offset, block_id) \
50 CVMX_ADD_IO_SEG(0x0001180090000400ull + (((offset) & 31) * 8) + (((block_id) & 1) * 0x8000000ull))
51#define CVMX_STXX_SPI4_DAT(block_id) \
52 CVMX_ADD_IO_SEG(0x0001180090000628ull + (((block_id) & 1) * 0x8000000ull))
53#define CVMX_STXX_SPI4_STAT(block_id) \
54 CVMX_ADD_IO_SEG(0x0001180090000630ull + (((block_id) & 1) * 0x8000000ull))
55#define CVMX_STXX_STAT_BYTES_HI(block_id) \
56 CVMX_ADD_IO_SEG(0x0001180090000648ull + (((block_id) & 1) * 0x8000000ull))
57#define CVMX_STXX_STAT_BYTES_LO(block_id) \
58 CVMX_ADD_IO_SEG(0x0001180090000680ull + (((block_id) & 1) * 0x8000000ull))
59#define CVMX_STXX_STAT_CTL(block_id) \
60 CVMX_ADD_IO_SEG(0x0001180090000638ull + (((block_id) & 1) * 0x8000000ull))
61#define CVMX_STXX_STAT_PKT_XMT(block_id) \
62 CVMX_ADD_IO_SEG(0x0001180090000640ull + (((block_id) & 1) * 0x8000000ull))
63
64union cvmx_stxx_arb_ctl {
65 uint64_t u64;
66 struct cvmx_stxx_arb_ctl_s {
67 uint64_t reserved_6_63:58;
68 uint64_t mintrn:1;
69 uint64_t reserved_4_4:1;
70 uint64_t igntpa:1;
71 uint64_t reserved_0_2:3;
72 } s;
73 struct cvmx_stxx_arb_ctl_s cn38xx;
74 struct cvmx_stxx_arb_ctl_s cn38xxp2;
75 struct cvmx_stxx_arb_ctl_s cn58xx;
76 struct cvmx_stxx_arb_ctl_s cn58xxp1;
77};
78
79union cvmx_stxx_bckprs_cnt {
80 uint64_t u64;
81 struct cvmx_stxx_bckprs_cnt_s {
82 uint64_t reserved_32_63:32;
83 uint64_t cnt:32;
84 } s;
85 struct cvmx_stxx_bckprs_cnt_s cn38xx;
86 struct cvmx_stxx_bckprs_cnt_s cn38xxp2;
87 struct cvmx_stxx_bckprs_cnt_s cn58xx;
88 struct cvmx_stxx_bckprs_cnt_s cn58xxp1;
89};
90
91union cvmx_stxx_com_ctl {
92 uint64_t u64;
93 struct cvmx_stxx_com_ctl_s {
94 uint64_t reserved_4_63:60;
95 uint64_t st_en:1;
96 uint64_t reserved_1_2:2;
97 uint64_t inf_en:1;
98 } s;
99 struct cvmx_stxx_com_ctl_s cn38xx;
100 struct cvmx_stxx_com_ctl_s cn38xxp2;
101 struct cvmx_stxx_com_ctl_s cn58xx;
102 struct cvmx_stxx_com_ctl_s cn58xxp1;
103};
104
105union cvmx_stxx_dip_cnt {
106 uint64_t u64;
107 struct cvmx_stxx_dip_cnt_s {
108 uint64_t reserved_8_63:56;
109 uint64_t frmmax:4;
110 uint64_t dipmax:4;
111 } s;
112 struct cvmx_stxx_dip_cnt_s cn38xx;
113 struct cvmx_stxx_dip_cnt_s cn38xxp2;
114 struct cvmx_stxx_dip_cnt_s cn58xx;
115 struct cvmx_stxx_dip_cnt_s cn58xxp1;
116};
117
118union cvmx_stxx_ign_cal {
119 uint64_t u64;
120 struct cvmx_stxx_ign_cal_s {
121 uint64_t reserved_16_63:48;
122 uint64_t igntpa:16;
123 } s;
124 struct cvmx_stxx_ign_cal_s cn38xx;
125 struct cvmx_stxx_ign_cal_s cn38xxp2;
126 struct cvmx_stxx_ign_cal_s cn58xx;
127 struct cvmx_stxx_ign_cal_s cn58xxp1;
128};
129
130union cvmx_stxx_int_msk {
131 uint64_t u64;
132 struct cvmx_stxx_int_msk_s {
133 uint64_t reserved_8_63:56;
134 uint64_t frmerr:1;
135 uint64_t unxfrm:1;
136 uint64_t nosync:1;
137 uint64_t diperr:1;
138 uint64_t datovr:1;
139 uint64_t ovrbst:1;
140 uint64_t calpar1:1;
141 uint64_t calpar0:1;
142 } s;
143 struct cvmx_stxx_int_msk_s cn38xx;
144 struct cvmx_stxx_int_msk_s cn38xxp2;
145 struct cvmx_stxx_int_msk_s cn58xx;
146 struct cvmx_stxx_int_msk_s cn58xxp1;
147};
148
149union cvmx_stxx_int_reg {
150 uint64_t u64;
151 struct cvmx_stxx_int_reg_s {
152 uint64_t reserved_9_63:55;
153 uint64_t syncerr:1;
154 uint64_t frmerr:1;
155 uint64_t unxfrm:1;
156 uint64_t nosync:1;
157 uint64_t diperr:1;
158 uint64_t datovr:1;
159 uint64_t ovrbst:1;
160 uint64_t calpar1:1;
161 uint64_t calpar0:1;
162 } s;
163 struct cvmx_stxx_int_reg_s cn38xx;
164 struct cvmx_stxx_int_reg_s cn38xxp2;
165 struct cvmx_stxx_int_reg_s cn58xx;
166 struct cvmx_stxx_int_reg_s cn58xxp1;
167};
168
169union cvmx_stxx_int_sync {
170 uint64_t u64;
171 struct cvmx_stxx_int_sync_s {
172 uint64_t reserved_8_63:56;
173 uint64_t frmerr:1;
174 uint64_t unxfrm:1;
175 uint64_t nosync:1;
176 uint64_t diperr:1;
177 uint64_t datovr:1;
178 uint64_t ovrbst:1;
179 uint64_t calpar1:1;
180 uint64_t calpar0:1;
181 } s;
182 struct cvmx_stxx_int_sync_s cn38xx;
183 struct cvmx_stxx_int_sync_s cn38xxp2;
184 struct cvmx_stxx_int_sync_s cn58xx;
185 struct cvmx_stxx_int_sync_s cn58xxp1;
186};
187
188union cvmx_stxx_min_bst {
189 uint64_t u64;
190 struct cvmx_stxx_min_bst_s {
191 uint64_t reserved_9_63:55;
192 uint64_t minb:9;
193 } s;
194 struct cvmx_stxx_min_bst_s cn38xx;
195 struct cvmx_stxx_min_bst_s cn38xxp2;
196 struct cvmx_stxx_min_bst_s cn58xx;
197 struct cvmx_stxx_min_bst_s cn58xxp1;
198};
199
200union cvmx_stxx_spi4_calx {
201 uint64_t u64;
202 struct cvmx_stxx_spi4_calx_s {
203 uint64_t reserved_17_63:47;
204 uint64_t oddpar:1;
205 uint64_t prt3:4;
206 uint64_t prt2:4;
207 uint64_t prt1:4;
208 uint64_t prt0:4;
209 } s;
210 struct cvmx_stxx_spi4_calx_s cn38xx;
211 struct cvmx_stxx_spi4_calx_s cn38xxp2;
212 struct cvmx_stxx_spi4_calx_s cn58xx;
213 struct cvmx_stxx_spi4_calx_s cn58xxp1;
214};
215
216union cvmx_stxx_spi4_dat {
217 uint64_t u64;
218 struct cvmx_stxx_spi4_dat_s {
219 uint64_t reserved_32_63:32;
220 uint64_t alpha:16;
221 uint64_t max_t:16;
222 } s;
223 struct cvmx_stxx_spi4_dat_s cn38xx;
224 struct cvmx_stxx_spi4_dat_s cn38xxp2;
225 struct cvmx_stxx_spi4_dat_s cn58xx;
226 struct cvmx_stxx_spi4_dat_s cn58xxp1;
227};
228
229union cvmx_stxx_spi4_stat {
230 uint64_t u64;
231 struct cvmx_stxx_spi4_stat_s {
232 uint64_t reserved_16_63:48;
233 uint64_t m:8;
234 uint64_t reserved_7_7:1;
235 uint64_t len:7;
236 } s;
237 struct cvmx_stxx_spi4_stat_s cn38xx;
238 struct cvmx_stxx_spi4_stat_s cn38xxp2;
239 struct cvmx_stxx_spi4_stat_s cn58xx;
240 struct cvmx_stxx_spi4_stat_s cn58xxp1;
241};
242
243union cvmx_stxx_stat_bytes_hi {
244 uint64_t u64;
245 struct cvmx_stxx_stat_bytes_hi_s {
246 uint64_t reserved_32_63:32;
247 uint64_t cnt:32;
248 } s;
249 struct cvmx_stxx_stat_bytes_hi_s cn38xx;
250 struct cvmx_stxx_stat_bytes_hi_s cn38xxp2;
251 struct cvmx_stxx_stat_bytes_hi_s cn58xx;
252 struct cvmx_stxx_stat_bytes_hi_s cn58xxp1;
253};
254
255union cvmx_stxx_stat_bytes_lo {
256 uint64_t u64;
257 struct cvmx_stxx_stat_bytes_lo_s {
258 uint64_t reserved_32_63:32;
259 uint64_t cnt:32;
260 } s;
261 struct cvmx_stxx_stat_bytes_lo_s cn38xx;
262 struct cvmx_stxx_stat_bytes_lo_s cn38xxp2;
263 struct cvmx_stxx_stat_bytes_lo_s cn58xx;
264 struct cvmx_stxx_stat_bytes_lo_s cn58xxp1;
265};
266
267union cvmx_stxx_stat_ctl {
268 uint64_t u64;
269 struct cvmx_stxx_stat_ctl_s {
270 uint64_t reserved_5_63:59;
271 uint64_t clr:1;
272 uint64_t bckprs:4;
273 } s;
274 struct cvmx_stxx_stat_ctl_s cn38xx;
275 struct cvmx_stxx_stat_ctl_s cn38xxp2;
276 struct cvmx_stxx_stat_ctl_s cn58xx;
277 struct cvmx_stxx_stat_ctl_s cn58xxp1;
278};
279
280union cvmx_stxx_stat_pkt_xmt {
281 uint64_t u64;
282 struct cvmx_stxx_stat_pkt_xmt_s {
283 uint64_t reserved_32_63:32;
284 uint64_t cnt:32;
285 } s;
286 struct cvmx_stxx_stat_pkt_xmt_s cn38xx;
287 struct cvmx_stxx_stat_pkt_xmt_s cn38xxp2;
288 struct cvmx_stxx_stat_pkt_xmt_s cn58xx;
289 struct cvmx_stxx_stat_pkt_xmt_s cn58xxp1;
290};
291
292#endif
diff --git a/drivers/staging/octeon/cvmx-wqe.h b/drivers/staging/octeon/cvmx-wqe.h
new file mode 100644
index 000000000000..653610953d28
--- /dev/null
+++ b/drivers/staging/octeon/cvmx-wqe.h
@@ -0,0 +1,397 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28/**
29 *
30 * This header file defines the work queue entry (wqe) data structure.
31 * Since this is a commonly used structure that depends on structures
32 * from several hardware blocks, those definitions have been placed
33 * in this file to create a single point of definition of the wqe
34 * format.
35 * Data structures are still named according to the block that they
36 * relate to.
37 *
38 */
39
40#ifndef __CVMX_WQE_H__
41#define __CVMX_WQE_H__
42
43#include "cvmx-packet.h"
44
45
46#define OCT_TAG_TYPE_STRING(x) \
47 (((x) == CVMX_POW_TAG_TYPE_ORDERED) ? "ORDERED" : \
48 (((x) == CVMX_POW_TAG_TYPE_ATOMIC) ? "ATOMIC" : \
49 (((x) == CVMX_POW_TAG_TYPE_NULL) ? "NULL" : \
50 "NULL_NULL")))
51
52/**
53 * HW decode / err_code in work queue entry
54 */
55typedef union {
56 uint64_t u64;
57
58 /* Use this struct if the hardware determines that the packet is IP */
59 struct {
60 /* HW sets this to the number of buffers used by this packet */
61 uint64_t bufs:8;
62 /* HW sets to the number of L2 bytes prior to the IP */
63 uint64_t ip_offset:8;
64 /* set to 1 if we found DSA/VLAN in the L2 */
65 uint64_t vlan_valid:1;
66 /* Set to 1 if the DSA/VLAN tag is stacked */
67 uint64_t vlan_stacked:1;
68 uint64_t unassigned:1;
69 /* HW sets to the DSA/VLAN CFI flag (valid when vlan_valid) */
70 uint64_t vlan_cfi:1;
71 /* HW sets to the DSA/VLAN_ID field (valid when vlan_valid) */
72 uint64_t vlan_id:12;
73 /* Ring Identifier (if PCIe). Requires PIP_GBL_CTL[RING_EN]=1 */
74 uint64_t pr:4;
75 uint64_t unassigned2:8;
76 /* the packet needs to be decompressed */
77 uint64_t dec_ipcomp:1;
78 /* the packet is either TCP or UDP */
79 uint64_t tcp_or_udp:1;
80 /* the packet needs to be decrypted (ESP or AH) */
81 uint64_t dec_ipsec:1;
82 /* the packet is IPv6 */
83 uint64_t is_v6:1;
84
85 /*
86 * (rcv_error, not_IP, IP_exc, is_frag, L4_error,
87 * software, etc.).
88 */
89
90 /*
91 * reserved for software use, hardware will clear on
92 * packet creation.
93 */
94 uint64_t software:1;
95 /* exceptional conditions below */
96 /* the receive interface hardware detected an L4 error
97 * (only applies if !is_frag) (only applies if
98 * !rcv_error && !not_IP && !IP_exc && !is_frag)
99 * failure indicated in err_code below, decode:
100 *
101 * - 1 = Malformed L4
102 * - 2 = L4 Checksum Error: the L4 checksum value is
103 * - 3 = UDP Length Error: The UDP length field would
104 * make the UDP data longer than what remains in
105 * the IP packet (as defined by the IP header
106 * length field).
107 * - 4 = Bad L4 Port: either the source or destination
108 * TCP/UDP port is 0.
109 * - 8 = TCP FIN Only: the packet is TCP and only the
110 * FIN flag set.
111 * - 9 = TCP No Flags: the packet is TCP and no flags
112 * are set.
113 * - 10 = TCP FIN RST: the packet is TCP and both FIN
114 * and RST are set.
115 * - 11 = TCP SYN URG: the packet is TCP and both SYN
116 * and URG are set.
117 * - 12 = TCP SYN RST: the packet is TCP and both SYN
118 * and RST are set.
119 * - 13 = TCP SYN FIN: the packet is TCP and both SYN
120 * and FIN are set.
121 */
122 uint64_t L4_error:1;
123 /* set if the packet is a fragment */
124 uint64_t is_frag:1;
125 /* the receive interface hardware detected an IP error
126 * / exception (only applies if !rcv_error && !not_IP)
127 * failure indicated in err_code below, decode:
128 *
129 * - 1 = Not IP: the IP version field is neither 4 nor
130 * 6.
131 * - 2 = IPv4 Header Checksum Error: the IPv4 header
132 * has a checksum violation.
133 * - 3 = IP Malformed Header: the packet is not long
134 * enough to contain the IP header.
135 * - 4 = IP Malformed: the packet is not long enough
136 * to contain the bytes indicated by the IP
137 * header. Pad is allowed.
138 * - 5 = IP TTL Hop: the IPv4 TTL field or the IPv6
139 * Hop Count field are zero.
140 * - 6 = IP Options
141 */
142 uint64_t IP_exc:1;
143 /*
144 * Set if the hardware determined that the packet is a
145 * broadcast.
146 */
147 uint64_t is_bcast:1;
148 /*
149 * St if the hardware determined that the packet is a
150 * multi-cast.
151 */
152 uint64_t is_mcast:1;
153 /*
154 * Set if the packet may not be IP (must be zero in
155 * this case).
156 */
157 uint64_t not_IP:1;
158 /*
159 * The receive interface hardware detected a receive
160 * error (must be zero in this case).
161 */
162 uint64_t rcv_error:1;
163 /* lower err_code = first-level descriptor of the
164 * work */
165 /* zero for packet submitted by hardware that isn't on
166 * the slow path */
167 /* type is cvmx_pip_err_t */
168 uint64_t err_code:8;
169 } s;
170
171 /* use this to get at the 16 vlan bits */
172 struct {
173 uint64_t unused1:16;
174 uint64_t vlan:16;
175 uint64_t unused2:32;
176 } svlan;
177
178 /*
179 * use this struct if the hardware could not determine that
180 * the packet is ip.
181 */
182 struct {
183 /*
184 * HW sets this to the number of buffers used by this
185 * packet.
186 */
187 uint64_t bufs:8;
188 uint64_t unused:8;
189 /* set to 1 if we found DSA/VLAN in the L2 */
190 uint64_t vlan_valid:1;
191 /* Set to 1 if the DSA/VLAN tag is stacked */
192 uint64_t vlan_stacked:1;
193 uint64_t unassigned:1;
194 /*
195 * HW sets to the DSA/VLAN CFI flag (valid when
196 * vlan_valid)
197 */
198 uint64_t vlan_cfi:1;
199 /*
200 * HW sets to the DSA/VLAN_ID field (valid when
201 * vlan_valid).
202 */
203 uint64_t vlan_id:12;
204 /*
205 * Ring Identifier (if PCIe). Requires
206 * PIP_GBL_CTL[RING_EN]=1
207 */
208 uint64_t pr:4;
209 uint64_t unassigned2:12;
210 /*
211 * reserved for software use, hardware will clear on
212 * packet creation.
213 */
214 uint64_t software:1;
215 uint64_t unassigned3:1;
216 /*
217 * set if the hardware determined that the packet is
218 * rarp.
219 */
220 uint64_t is_rarp:1;
221 /*
222 * set if the hardware determined that the packet is
223 * arp
224 */
225 uint64_t is_arp:1;
226 /*
227 * set if the hardware determined that the packet is a
228 * broadcast.
229 */
230 uint64_t is_bcast:1;
231 /*
232 * set if the hardware determined that the packet is a
233 * multi-cast
234 */
235 uint64_t is_mcast:1;
236 /*
237 * set if the packet may not be IP (must be one in
238 * this case)
239 */
240 uint64_t not_IP:1;
241 /* The receive interface hardware detected a receive
242 * error. Failure indicated in err_code below,
243 * decode:
244 *
245 * - 1 = partial error: a packet was partially
246 * received, but internal buffering / bandwidth
247 * was not adequate to receive the entire
248 * packet.
249 * - 2 = jabber error: the RGMII packet was too large
250 * and is truncated.
251 * - 3 = overrun error: the RGMII packet is longer
252 * than allowed and had an FCS error.
253 * - 4 = oversize error: the RGMII packet is longer
254 * than allowed.
255 * - 5 = alignment error: the RGMII packet is not an
256 * integer number of bytes
257 * and had an FCS error (100M and 10M only).
258 * - 6 = fragment error: the RGMII packet is shorter
259 * than allowed and had an FCS error.
260 * - 7 = GMX FCS error: the RGMII packet had an FCS
261 * error.
262 * - 8 = undersize error: the RGMII packet is shorter
263 * than allowed.
264 * - 9 = extend error: the RGMII packet had an extend
265 * error.
266 * - 10 = length mismatch error: the RGMII packet had
267 * a length that did not match the length field
268 * in the L2 HDR.
269 * - 11 = RGMII RX error/SPI4 DIP4 Error: the RGMII
270 * packet had one or more data reception errors
271 * (RXERR) or the SPI4 packet had one or more
272 * DIP4 errors.
273 * - 12 = RGMII skip error/SPI4 Abort Error: the RGMII
274 * packet was not large enough to cover the
275 * skipped bytes or the SPI4 packet was
276 * terminated with an About EOPS.
277 * - 13 = RGMII nibble error/SPI4 Port NXA Error: the
278 * RGMII packet had a studder error (data not
279 * repeated - 10/100M only) or the SPI4 packet
280 * was sent to an NXA.
281 * - 16 = FCS error: a SPI4.2 packet had an FCS error.
282 * - 17 = Skip error: a packet was not large enough to
283 * cover the skipped bytes.
284 * - 18 = L2 header malformed: the packet is not long
285 * enough to contain the L2.
286 */
287
288 uint64_t rcv_error:1;
289 /*
290 * lower err_code = first-level descriptor of the
291 * work
292 */
293 /*
294 * zero for packet submitted by hardware that isn't on
295 * the slow path
296 */
297 /* type is cvmx_pip_err_t (union, so can't use directly */
298 uint64_t err_code:8;
299 } snoip;
300
301} cvmx_pip_wqe_word2;
302
303/**
304 * Work queue entry format
305 *
306 * must be 8-byte aligned
307 */
308typedef struct {
309
310 /*****************************************************************
311 * WORD 0
312 * HW WRITE: the following 64 bits are filled by HW when a packet arrives
313 */
314
315 /**
316 * raw chksum result generated by the HW
317 */
318 uint16_t hw_chksum;
319 /**
320 * Field unused by hardware - available for software
321 */
322 uint8_t unused;
323 /**
324 * Next pointer used by hardware for list maintenance.
325 * May be written/read by HW before the work queue
326 * entry is scheduled to a PP
327 * (Only 36 bits used in Octeon 1)
328 */
329 uint64_t next_ptr:40;
330
331 /*****************************************************************
332 * WORD 1
333 * HW WRITE: the following 64 bits are filled by HW when a packet arrives
334 */
335
336 /**
337 * HW sets to the total number of bytes in the packet
338 */
339 uint64_t len:16;
340 /**
341 * HW sets this to input physical port
342 */
343 uint64_t ipprt:6;
344
345 /**
346 * HW sets this to what it thought the priority of the input packet was
347 */
348 uint64_t qos:3;
349
350 /**
351 * the group that the work queue entry will be scheduled to
352 */
353 uint64_t grp:4;
354 /**
355 * the type of the tag (ORDERED, ATOMIC, NULL)
356 */
357 uint64_t tag_type:3;
358 /**
359 * the synchronization/ordering tag
360 */
361 uint64_t tag:32;
362
363 /**
364 * WORD 2 HW WRITE: the following 64-bits are filled in by
365 * hardware when a packet arrives This indicates a variety of
366 * status and error conditions.
367 */
368 cvmx_pip_wqe_word2 word2;
369
370 /**
371 * Pointer to the first segment of the packet.
372 */
373 union cvmx_buf_ptr packet_ptr;
374
375 /**
376 * HW WRITE: octeon will fill in a programmable amount from the
377 * packet, up to (at most, but perhaps less) the amount
378 * needed to fill the work queue entry to 128 bytes
379 *
380 * If the packet is recognized to be IP, the hardware starts
381 * (except that the IPv4 header is padded for appropriate
382 * alignment) writing here where the IP header starts. If the
383 * packet is not recognized to be IP, the hardware starts
384 * writing the beginning of the packet here.
385 */
386 uint8_t packet_data[96];
387
388 /**
389 * If desired, SW can make the work Q entry any length. For the
390 * purposes of discussion here, Assume 128B always, as this is all that
391 * the hardware deals with.
392 *
393 */
394
395} CVMX_CACHE_LINE_ALIGNED cvmx_wqe_t;
396
397#endif /* __CVMX_WQE_H__ */
diff --git a/drivers/staging/octeon/ethernet-common.c b/drivers/staging/octeon/ethernet-common.c
new file mode 100644
index 000000000000..3e6f5b8cc63d
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-common.c
@@ -0,0 +1,328 @@
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/kernel.h>
28#include <linux/mii.h>
29#include <net/dst.h>
30
31#include <asm/atomic.h>
32#include <asm/octeon/octeon.h>
33
34#include "ethernet-defines.h"
35#include "ethernet-tx.h"
36#include "ethernet-mdio.h"
37#include "ethernet-util.h"
38#include "octeon-ethernet.h"
39#include "ethernet-common.h"
40
41#include "cvmx-pip.h"
42#include "cvmx-pko.h"
43#include "cvmx-fau.h"
44#include "cvmx-helper.h"
45
46#include "cvmx-gmxx-defs.h"
47
48/**
49 * Get the low level ethernet statistics
50 *
51 * @dev: Device to get the statistics from
52 * Returns Pointer to the statistics
53 */
54static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
55{
56 cvmx_pip_port_status_t rx_status;
57 cvmx_pko_port_status_t tx_status;
58 struct octeon_ethernet *priv = netdev_priv(dev);
59
60 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
61 if (octeon_is_simulation()) {
62 /* The simulator doesn't support statistics */
63 memset(&rx_status, 0, sizeof(rx_status));
64 memset(&tx_status, 0, sizeof(tx_status));
65 } else {
66 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
67 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
68 }
69
70 priv->stats.rx_packets += rx_status.inb_packets;
71 priv->stats.tx_packets += tx_status.packets;
72 priv->stats.rx_bytes += rx_status.inb_octets;
73 priv->stats.tx_bytes += tx_status.octets;
74 priv->stats.multicast += rx_status.multicast_packets;
75 priv->stats.rx_crc_errors += rx_status.inb_errors;
76 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
77
78 /*
79 * The drop counter must be incremented atomically
80 * since the RX tasklet also increments it.
81 */
82#ifdef CONFIG_64BIT
83 atomic64_add(rx_status.dropped_packets,
84 (atomic64_t *)&priv->stats.rx_dropped);
85#else
86 atomic_add(rx_status.dropped_packets,
87 (atomic_t *)&priv->stats.rx_dropped);
88#endif
89 }
90
91 return &priv->stats;
92}
93
94/**
95 * Set the multicast list. Currently unimplemented.
96 *
97 * @dev: Device to work on
98 */
99static void cvm_oct_common_set_multicast_list(struct net_device *dev)
100{
101 union cvmx_gmxx_prtx_cfg gmx_cfg;
102 struct octeon_ethernet *priv = netdev_priv(dev);
103 int interface = INTERFACE(priv->port);
104 int index = INDEX(priv->port);
105
106 if ((interface < 2)
107 && (cvmx_helper_interface_get_mode(interface) !=
108 CVMX_HELPER_INTERFACE_MODE_SPI)) {
109 union cvmx_gmxx_rxx_adr_ctl control;
110 control.u64 = 0;
111 control.s.bcst = 1; /* Allow broadcast MAC addresses */
112
113 if (dev->mc_list || (dev->flags & IFF_ALLMULTI) ||
114 (dev->flags & IFF_PROMISC))
115 /* Force accept multicast packets */
116 control.s.mcst = 2;
117 else
118 /* Force reject multicat packets */
119 control.s.mcst = 1;
120
121 if (dev->flags & IFF_PROMISC)
122 /*
123 * Reject matches if promisc. Since CAM is
124 * shut off, should accept everything.
125 */
126 control.s.cam_mode = 0;
127 else
128 /* Filter packets based on the CAM */
129 control.s.cam_mode = 1;
130
131 gmx_cfg.u64 =
132 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
133 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
134 gmx_cfg.u64 & ~1ull);
135
136 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
137 control.u64);
138 if (dev->flags & IFF_PROMISC)
139 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
140 (index, interface), 0);
141 else
142 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
143 (index, interface), 1);
144
145 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
146 gmx_cfg.u64);
147 }
148}
149
150/**
151 * Set the hardware MAC address for a device
152 *
153 * @dev: Device to change the MAC address for
154 * @addr: Address structure to change it too. MAC address is addr + 2.
155 * Returns Zero on success
156 */
157static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
158{
159 struct octeon_ethernet *priv = netdev_priv(dev);
160 union cvmx_gmxx_prtx_cfg gmx_cfg;
161 int interface = INTERFACE(priv->port);
162 int index = INDEX(priv->port);
163
164 memcpy(dev->dev_addr, addr + 2, 6);
165
166 if ((interface < 2)
167 && (cvmx_helper_interface_get_mode(interface) !=
168 CVMX_HELPER_INTERFACE_MODE_SPI)) {
169 int i;
170 uint8_t *ptr = addr;
171 uint64_t mac = 0;
172 for (i = 0; i < 6; i++)
173 mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
174
175 gmx_cfg.u64 =
176 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
177 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
178 gmx_cfg.u64 & ~1ull);
179
180 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
181 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
182 ptr[2]);
183 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
184 ptr[3]);
185 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
186 ptr[4]);
187 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
188 ptr[5]);
189 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
190 ptr[6]);
191 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
192 ptr[7]);
193 cvm_oct_common_set_multicast_list(dev);
194 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
195 gmx_cfg.u64);
196 }
197 return 0;
198}
199
200/**
201 * Change the link MTU. Unimplemented
202 *
203 * @dev: Device to change
204 * @new_mtu: The new MTU
205 *
206 * Returns Zero on success
207 */
208static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
209{
210 struct octeon_ethernet *priv = netdev_priv(dev);
211 int interface = INTERFACE(priv->port);
212 int index = INDEX(priv->port);
213#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
214 int vlan_bytes = 4;
215#else
216 int vlan_bytes = 0;
217#endif
218
219 /*
220 * Limit the MTU to make sure the ethernet packets are between
221 * 64 bytes and 65535 bytes.
222 */
223 if ((new_mtu + 14 + 4 + vlan_bytes < 64)
224 || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
225 pr_err("MTU must be between %d and %d.\n",
226 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
227 return -EINVAL;
228 }
229 dev->mtu = new_mtu;
230
231 if ((interface < 2)
232 && (cvmx_helper_interface_get_mode(interface) !=
233 CVMX_HELPER_INTERFACE_MODE_SPI)) {
234 /* Add ethernet header and FCS, and VLAN if configured. */
235 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
236
237 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
238 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
239 /* Signal errors on packets larger than the MTU */
240 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
241 max_packet);
242 } else {
243 /*
244 * Set the hardware to truncate packets larger
245 * than the MTU and smaller the 64 bytes.
246 */
247 union cvmx_pip_frm_len_chkx frm_len_chk;
248 frm_len_chk.u64 = 0;
249 frm_len_chk.s.minlen = 64;
250 frm_len_chk.s.maxlen = max_packet;
251 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
252 frm_len_chk.u64);
253 }
254 /*
255 * Set the hardware to truncate packets larger than
256 * the MTU. The jabber register must be set to a
257 * multiple of 8 bytes, so round up.
258 */
259 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
260 (max_packet + 7) & ~7u);
261 }
262 return 0;
263}
264
265/**
266 * Per network device initialization
267 *
268 * @dev: Device to initialize
269 * Returns Zero on success
270 */
271int cvm_oct_common_init(struct net_device *dev)
272{
273 static int count;
274 char mac[8] = { 0x00, 0x00,
275 octeon_bootinfo->mac_addr_base[0],
276 octeon_bootinfo->mac_addr_base[1],
277 octeon_bootinfo->mac_addr_base[2],
278 octeon_bootinfo->mac_addr_base[3],
279 octeon_bootinfo->mac_addr_base[4],
280 octeon_bootinfo->mac_addr_base[5] + count
281 };
282 struct octeon_ethernet *priv = netdev_priv(dev);
283
284 /*
285 * Force the interface to use the POW send if always_use_pow
286 * was specified or it is in the pow send list.
287 */
288 if ((pow_send_group != -1)
289 && (always_use_pow || strstr(pow_send_list, dev->name)))
290 priv->queue = -1;
291
292 if (priv->queue != -1) {
293 dev->hard_start_xmit = cvm_oct_xmit;
294 if (USE_HW_TCPUDP_CHECKSUM)
295 dev->features |= NETIF_F_IP_CSUM;
296 } else
297 dev->hard_start_xmit = cvm_oct_xmit_pow;
298 count++;
299
300 dev->get_stats = cvm_oct_common_get_stats;
301 dev->set_mac_address = cvm_oct_common_set_mac_address;
302 dev->set_multicast_list = cvm_oct_common_set_multicast_list;
303 dev->change_mtu = cvm_oct_common_change_mtu;
304 dev->do_ioctl = cvm_oct_ioctl;
305 /* We do our own locking, Linux doesn't need to */
306 dev->features |= NETIF_F_LLTX;
307 SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
308#ifdef CONFIG_NET_POLL_CONTROLLER
309 dev->poll_controller = cvm_oct_poll_controller;
310#endif
311
312 cvm_oct_mdio_setup_device(dev);
313 dev->set_mac_address(dev, mac);
314 dev->change_mtu(dev, dev->mtu);
315
316 /*
317 * Zero out stats for port so we won't mistakenly show
318 * counters from the bootloader.
319 */
320 memset(dev->get_stats(dev), 0, sizeof(struct net_device_stats));
321
322 return 0;
323}
324
325void cvm_oct_common_uninit(struct net_device *dev)
326{
327 /* Currently nothing to do */
328}
diff --git a/drivers/staging/octeon/ethernet-common.h b/drivers/staging/octeon/ethernet-common.h
new file mode 100644
index 000000000000..2bd9cd76a398
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-common.h
@@ -0,0 +1,29 @@
1/*********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26*********************************************************************/
27
28int cvm_oct_common_init(struct net_device *dev);
29void cvm_oct_common_uninit(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
new file mode 100644
index 000000000000..8f7374e7664c
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -0,0 +1,134 @@
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27
28/*
29 * A few defines are used to control the operation of this driver:
30 * CONFIG_CAVIUM_RESERVE32
31 * This kernel config options controls the amount of memory configured
32 * in a wired TLB entry for all processes to share. If this is set, the
33 * driver will use this memory instead of kernel memory for pools. This
34 * allows 32bit userspace application to access the buffers, but also
35 * requires all received packets to be copied.
36 * CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
37 * This kernel config option allows the user to control the number of
38 * packet and work queue buffers allocated by the driver. If this is zero,
39 * the driver uses the default from below.
40 * USE_SKBUFFS_IN_HW
41 * Tells the driver to populate the packet buffers with kernel skbuffs.
42 * This allows the driver to receive packets without copying them. It also
43 * means that 32bit userspace can't access the packet buffers.
44 * USE_32BIT_SHARED
45 * This define tells the driver to allocate memory for buffers from the
46 * 32bit sahred region instead of the kernel memory space.
47 * USE_HW_TCPUDP_CHECKSUM
48 * Controls if the Octeon TCP/UDP checksum engine is used for packet
49 * output. If this is zero, the kernel will perform the checksum in
50 * software.
51 * USE_MULTICORE_RECEIVE
52 * Process receive interrupts on multiple cores. This spreads the network
53 * load across the first 8 processors. If ths is zero, only one core
54 * processes incomming packets.
55 * USE_ASYNC_IOBDMA
56 * Use asynchronous IO access to hardware. This uses Octeon's asynchronous
57 * IOBDMAs to issue IO accesses without stalling. Set this to zero
58 * to disable this. Note that IOBDMAs require CVMSEG.
59 * REUSE_SKBUFFS_WITHOUT_FREE
60 * Allows the TX path to free an skbuff into the FPA hardware pool. This
61 * can significantly improve performance for forwarding and bridging, but
62 * may be somewhat dangerous. Checks are made, but if any buffer is reused
63 * without the proper Linux cleanup, the networking stack may have very
64 * bizarre bugs.
65 */
66#ifndef __ETHERNET_DEFINES_H__
67#define __ETHERNET_DEFINES_H__
68
69#include "cvmx-config.h"
70
71
72#define OCTEON_ETHERNET_VERSION "1.9"
73
74#ifndef CONFIG_CAVIUM_RESERVE32
75#define CONFIG_CAVIUM_RESERVE32 0
76#endif
77
78#if CONFIG_CAVIUM_RESERVE32
79#define USE_32BIT_SHARED 1
80#define USE_SKBUFFS_IN_HW 0
81#define REUSE_SKBUFFS_WITHOUT_FREE 0
82#else
83#define USE_32BIT_SHARED 0
84#define USE_SKBUFFS_IN_HW 1
85#ifdef CONFIG_NETFILTER
86#define REUSE_SKBUFFS_WITHOUT_FREE 0
87#else
88#define REUSE_SKBUFFS_WITHOUT_FREE 1
89#endif
90#endif
91
92/* Max interrupts per second per core */
93#define INTERRUPT_LIMIT 10000
94
95/* Don't limit the number of interrupts */
96/*#define INTERRUPT_LIMIT 0 */
97#define USE_HW_TCPUDP_CHECKSUM 1
98
99#define USE_MULTICORE_RECEIVE 1
100
101/* Enable Random Early Dropping under load */
102#define USE_RED 1
103#define USE_ASYNC_IOBDMA (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0)
104
105/*
106 * Allow SW based preamble removal at 10Mbps to workaround PHYs giving
107 * us bad preambles.
108 */
109#define USE_10MBPS_PREAMBLE_WORKAROUND 1
110/*
111 * Use this to have all FPA frees also tell the L2 not to write data
112 * to memory.
113 */
114#define DONT_WRITEBACK(x) (x)
115/* Use this to not have FPA frees control L2 */
116/*#define DONT_WRITEBACK(x) 0 */
117
118/* Maximum number of packets to process per interrupt. */
119#define MAX_RX_PACKETS 120
120#define MAX_OUT_QUEUE_DEPTH 1000
121
122#ifndef CONFIG_SMP
123#undef USE_MULTICORE_RECEIVE
124#define USE_MULTICORE_RECEIVE 0
125#endif
126
127#define IP_PROTOCOL_TCP 6
128#define IP_PROTOCOL_UDP 0x11
129
130#define FAU_NUM_PACKET_BUFFERS_TO_FREE (CVMX_FAU_REG_END - sizeof(uint32_t))
131#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1)
132
133
134#endif /* __ETHERNET_DEFINES_H__ */
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
new file mode 100644
index 000000000000..93cab0a48925
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -0,0 +1,231 @@
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/kernel.h>
28#include <linux/ethtool.h>
29#include <linux/mii.h>
30#include <net/dst.h>
31
32#include <asm/octeon/octeon.h>
33
34#include "ethernet-defines.h"
35#include "octeon-ethernet.h"
36#include "ethernet-mdio.h"
37
38#include "cvmx-helper-board.h"
39
40#include "cvmx-smix-defs.h"
41
42DECLARE_MUTEX(mdio_sem);
43
44/**
45 * Perform an MII read. Called by the generic MII routines
46 *
47 * @dev: Device to perform read for
48 * @phy_id: The MII phy id
49 * @location: Register location to read
50 * Returns Result from the read or zero on failure
51 */
52static int cvm_oct_mdio_read(struct net_device *dev, int phy_id, int location)
53{
54 union cvmx_smix_cmd smi_cmd;
55 union cvmx_smix_rd_dat smi_rd;
56
57 smi_cmd.u64 = 0;
58 smi_cmd.s.phy_op = 1;
59 smi_cmd.s.phy_adr = phy_id;
60 smi_cmd.s.reg_adr = location;
61 cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64);
62
63 do {
64 if (!in_interrupt())
65 yield();
66 smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(0));
67 } while (smi_rd.s.pending);
68
69 if (smi_rd.s.val)
70 return smi_rd.s.dat;
71 else
72 return 0;
73}
74
75static int cvm_oct_mdio_dummy_read(struct net_device *dev, int phy_id,
76 int location)
77{
78 return 0xffff;
79}
80
81/**
82 * Perform an MII write. Called by the generic MII routines
83 *
84 * @dev: Device to perform write for
85 * @phy_id: The MII phy id
86 * @location: Register location to write
87 * @val: Value to write
88 */
89static void cvm_oct_mdio_write(struct net_device *dev, int phy_id, int location,
90 int val)
91{
92 union cvmx_smix_cmd smi_cmd;
93 union cvmx_smix_wr_dat smi_wr;
94
95 smi_wr.u64 = 0;
96 smi_wr.s.dat = val;
97 cvmx_write_csr(CVMX_SMIX_WR_DAT(0), smi_wr.u64);
98
99 smi_cmd.u64 = 0;
100 smi_cmd.s.phy_op = 0;
101 smi_cmd.s.phy_adr = phy_id;
102 smi_cmd.s.reg_adr = location;
103 cvmx_write_csr(CVMX_SMIX_CMD(0), smi_cmd.u64);
104
105 do {
106 if (!in_interrupt())
107 yield();
108 smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(0));
109 } while (smi_wr.s.pending);
110}
111
112static void cvm_oct_mdio_dummy_write(struct net_device *dev, int phy_id,
113 int location, int val)
114{
115}
116
117static void cvm_oct_get_drvinfo(struct net_device *dev,
118 struct ethtool_drvinfo *info)
119{
120 strcpy(info->driver, "cavium-ethernet");
121 strcpy(info->version, OCTEON_ETHERNET_VERSION);
122 strcpy(info->bus_info, "Builtin");
123}
124
125static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
126{
127 struct octeon_ethernet *priv = netdev_priv(dev);
128 int ret;
129
130 down(&mdio_sem);
131 ret = mii_ethtool_gset(&priv->mii_info, cmd);
132 up(&mdio_sem);
133
134 return ret;
135}
136
137static int cvm_oct_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
138{
139 struct octeon_ethernet *priv = netdev_priv(dev);
140 int ret;
141
142 down(&mdio_sem);
143 ret = mii_ethtool_sset(&priv->mii_info, cmd);
144 up(&mdio_sem);
145
146 return ret;
147}
148
149static int cvm_oct_nway_reset(struct net_device *dev)
150{
151 struct octeon_ethernet *priv = netdev_priv(dev);
152 int ret;
153
154 down(&mdio_sem);
155 ret = mii_nway_restart(&priv->mii_info);
156 up(&mdio_sem);
157
158 return ret;
159}
160
161static u32 cvm_oct_get_link(struct net_device *dev)
162{
163 struct octeon_ethernet *priv = netdev_priv(dev);
164 u32 ret;
165
166 down(&mdio_sem);
167 ret = mii_link_ok(&priv->mii_info);
168 up(&mdio_sem);
169
170 return ret;
171}
172
173struct ethtool_ops cvm_oct_ethtool_ops = {
174 .get_drvinfo = cvm_oct_get_drvinfo,
175 .get_settings = cvm_oct_get_settings,
176 .set_settings = cvm_oct_set_settings,
177 .nway_reset = cvm_oct_nway_reset,
178 .get_link = cvm_oct_get_link,
179 .get_sg = ethtool_op_get_sg,
180 .get_tx_csum = ethtool_op_get_tx_csum,
181};
182
183/**
184 * IOCTL support for PHY control
185 *
186 * @dev: Device to change
187 * @rq: the request
188 * @cmd: the command
189 * Returns Zero on success
190 */
191int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
192{
193 struct octeon_ethernet *priv = netdev_priv(dev);
194 struct mii_ioctl_data *data = if_mii(rq);
195 unsigned int duplex_chg;
196 int ret;
197
198 down(&mdio_sem);
199 ret = generic_mii_ioctl(&priv->mii_info, data, cmd, &duplex_chg);
200 up(&mdio_sem);
201
202 return ret;
203}
204
205/**
206 * Setup the MDIO device structures
207 *
208 * @dev: Device to setup
209 *
210 * Returns Zero on success, negative on failure
211 */
212int cvm_oct_mdio_setup_device(struct net_device *dev)
213{
214 struct octeon_ethernet *priv = netdev_priv(dev);
215 int phy_id = cvmx_helper_board_get_mii_address(priv->port);
216 if (phy_id != -1) {
217 priv->mii_info.dev = dev;
218 priv->mii_info.phy_id = phy_id;
219 priv->mii_info.phy_id_mask = 0xff;
220 priv->mii_info.supports_gmii = 1;
221 priv->mii_info.reg_num_mask = 0x1f;
222 priv->mii_info.mdio_read = cvm_oct_mdio_read;
223 priv->mii_info.mdio_write = cvm_oct_mdio_write;
224 } else {
225 /* Supply dummy MDIO routines so the kernel won't crash
226 if the user tries to read them */
227 priv->mii_info.mdio_read = cvm_oct_mdio_dummy_read;
228 priv->mii_info.mdio_write = cvm_oct_mdio_dummy_write;
229 }
230 return 0;
231}
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
new file mode 100644
index 000000000000..6314141e5ef2
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -0,0 +1,46 @@
1/*********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26*********************************************************************/
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/netdevice.h>
30#include <linux/init.h>
31#include <linux/etherdevice.h>
32#include <linux/ip.h>
33#include <linux/string.h>
34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/seq_file.h>
37#include <linux/proc_fs.h>
38#include <net/dst.h>
39#ifdef CONFIG_XFRM
40#include <linux/xfrm.h>
41#include <net/xfrm.h>
42#endif /* CONFIG_XFRM */
43
44extern struct ethtool_ops cvm_oct_ethtool_ops;
45int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
46int cvm_oct_mdio_setup_device(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
new file mode 100644
index 000000000000..b595903e2af1
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -0,0 +1,198 @@
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/kernel.h>
28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h>
31
32#include <asm/octeon/octeon.h>
33
34#include "ethernet-defines.h"
35
36#include "cvmx-fpa.h"
37
38/**
39 * Fill the supplied hardware pool with skbuffs
40 *
41 * @pool: Pool to allocate an skbuff for
42 * @size: Size of the buffer needed for the pool
43 * @elements: Number of buffers to allocate
44 */
45static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
46{
47 int freed = elements;
48 while (freed) {
49
50 struct sk_buff *skb = dev_alloc_skb(size + 128);
51 if (unlikely(skb == NULL)) {
52 pr_warning
53 ("Failed to allocate skb for hardware pool %d\n",
54 pool);
55 break;
56 }
57
58 skb_reserve(skb, 128 - (((unsigned long)skb->data) & 0x7f));
59 *(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
60 cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128));
61 freed--;
62 }
63 return elements - freed;
64}
65
66/**
67 * Free the supplied hardware pool of skbuffs
68 *
69 * @pool: Pool to allocate an skbuff for
70 * @size: Size of the buffer needed for the pool
71 * @elements: Number of buffers to allocate
72 */
73static void cvm_oct_free_hw_skbuff(int pool, int size, int elements)
74{
75 char *memory;
76
77 do {
78 memory = cvmx_fpa_alloc(pool);
79 if (memory) {
80 struct sk_buff *skb =
81 *(struct sk_buff **)(memory - sizeof(void *));
82 elements--;
83 dev_kfree_skb(skb);
84 }
85 } while (memory);
86
87 if (elements < 0)
88 pr_warning("Freeing of pool %u had too many skbuffs (%d)\n",
89 pool, elements);
90 else if (elements > 0)
91 pr_warning("Freeing of pool %u is missing %d skbuffs\n",
92 pool, elements);
93}
94
95/**
96 * This function fills a hardware pool with memory. Depending
97 * on the config defines, this memory might come from the
98 * kernel or global 32bit memory allocated with
99 * cvmx_bootmem_alloc.
100 *
101 * @pool: Pool to populate
102 * @size: Size of each buffer in the pool
103 * @elements: Number of buffers to allocate
104 */
105static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
106{
107 char *memory;
108 int freed = elements;
109
110 if (USE_32BIT_SHARED) {
111 extern uint64_t octeon_reserve32_memory;
112
113 memory =
114 cvmx_bootmem_alloc_range(elements * size, 128,
115 octeon_reserve32_memory,
116 octeon_reserve32_memory +
117 (CONFIG_CAVIUM_RESERVE32 << 20) -
118 1);
119 if (memory == NULL)
120 panic("Unable to allocate %u bytes for FPA pool %d\n",
121 elements * size, pool);
122
123 pr_notice("Memory range %p - %p reserved for "
124 "hardware\n", memory,
125 memory + elements * size - 1);
126
127 while (freed) {
128 cvmx_fpa_free(memory, pool, 0);
129 memory += size;
130 freed--;
131 }
132 } else {
133 while (freed) {
134 /* We need to force alignment to 128 bytes here */
135 memory = kmalloc(size + 127, GFP_ATOMIC);
136 if (unlikely(memory == NULL)) {
137 pr_warning("Unable to allocate %u bytes for "
138 "FPA pool %d\n",
139 elements * size, pool);
140 break;
141 }
142 memory = (char *)(((unsigned long)memory + 127) & -128);
143 cvmx_fpa_free(memory, pool, 0);
144 freed--;
145 }
146 }
147 return elements - freed;
148}
149
150/**
151 * Free memory previously allocated with cvm_oct_fill_hw_memory
152 *
153 * @pool: FPA pool to free
154 * @size: Size of each buffer in the pool
155 * @elements: Number of buffers that should be in the pool
156 */
157static void cvm_oct_free_hw_memory(int pool, int size, int elements)
158{
159 if (USE_32BIT_SHARED) {
160 pr_warning("Warning: 32 shared memory is not freeable\n");
161 } else {
162 char *memory;
163 do {
164 memory = cvmx_fpa_alloc(pool);
165 if (memory) {
166 elements--;
167 kfree(phys_to_virt(cvmx_ptr_to_phys(memory)));
168 }
169 } while (memory);
170
171 if (elements < 0)
172 pr_warning("Freeing of pool %u had too many "
173 "buffers (%d)\n",
174 pool, elements);
175 else if (elements > 0)
176 pr_warning("Warning: Freeing of pool %u is "
177 "missing %d buffers\n",
178 pool, elements);
179 }
180}
181
182int cvm_oct_mem_fill_fpa(int pool, int size, int elements)
183{
184 int freed;
185 if (USE_SKBUFFS_IN_HW)
186 freed = cvm_oct_fill_hw_skbuff(pool, size, elements);
187 else
188 freed = cvm_oct_fill_hw_memory(pool, size, elements);
189 return freed;
190}
191
192void cvm_oct_mem_empty_fpa(int pool, int size, int elements)
193{
194 if (USE_SKBUFFS_IN_HW)
195 cvm_oct_free_hw_skbuff(pool, size, elements);
196 else
197 cvm_oct_free_hw_memory(pool, size, elements);
198}
diff --git a/drivers/staging/octeon/ethernet-mem.h b/drivers/staging/octeon/ethernet-mem.h
new file mode 100644
index 000000000000..713f2edc8b4f
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-mem.h
@@ -0,0 +1,29 @@
1/*********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26********************************************************************/
27
28int cvm_oct_mem_fill_fpa(int pool, int size, int elements);
29void cvm_oct_mem_empty_fpa(int pool, int size, int elements);
diff --git a/drivers/staging/octeon/ethernet-proc.c b/drivers/staging/octeon/ethernet-proc.c
new file mode 100644
index 000000000000..8fa88fc419b7
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-proc.c
@@ -0,0 +1,256 @@
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/kernel.h>
28#include <linux/mii.h>
29#include <linux/seq_file.h>
30#include <linux/proc_fs.h>
31#include <net/dst.h>
32
33#include <asm/octeon/octeon.h>
34
35#include "octeon-ethernet.h"
36#include "ethernet-defines.h"
37
38#include "cvmx-helper.h"
39#include "cvmx-pip.h"
40
41static unsigned long long cvm_oct_stats_read_switch(struct net_device *dev,
42 int phy_id, int offset)
43{
44 struct octeon_ethernet *priv = netdev_priv(dev);
45
46 priv->mii_info.mdio_write(dev, phy_id, 0x1d, 0xcc00 | offset);
47 return ((uint64_t) priv->mii_info.
48 mdio_read(dev, phy_id,
49 0x1e) << 16) | (uint64_t) priv->mii_info.
50 mdio_read(dev, phy_id, 0x1f);
51}
52
53static int cvm_oct_stats_switch_show(struct seq_file *m, void *v)
54{
55 static const int ports[] = { 0, 1, 2, 3, 9, -1 };
56 struct net_device *dev = cvm_oct_device[0];
57 int index = 0;
58
59 while (ports[index] != -1) {
60
61 /* Latch port */
62 struct octeon_ethernet *priv = netdev_priv(dev);
63
64 priv->mii_info.mdio_write(dev, 0x1b, 0x1d,
65 0xdc00 | ports[index]);
66 seq_printf(m, "\nSwitch Port %d\n", ports[index]);
67 seq_printf(m, "InGoodOctets: %12llu\t"
68 "OutOctets: %12llu\t"
69 "64 Octets: %12llu\n",
70 cvm_oct_stats_read_switch(dev, 0x1b,
71 0x00) |
72 (cvm_oct_stats_read_switch(dev, 0x1b, 0x01) << 32),
73 cvm_oct_stats_read_switch(dev, 0x1b,
74 0x0E) |
75 (cvm_oct_stats_read_switch(dev, 0x1b, 0x0F) << 32),
76 cvm_oct_stats_read_switch(dev, 0x1b, 0x08));
77
78 seq_printf(m, "InBadOctets: %12llu\t"
79 "OutUnicast: %12llu\t"
80 "65-127 Octets: %12llu\n",
81 cvm_oct_stats_read_switch(dev, 0x1b, 0x02),
82 cvm_oct_stats_read_switch(dev, 0x1b, 0x10),
83 cvm_oct_stats_read_switch(dev, 0x1b, 0x09));
84
85 seq_printf(m, "InUnicast: %12llu\t"
86 "OutBroadcasts: %12llu\t"
87 "128-255 Octets: %12llu\n",
88 cvm_oct_stats_read_switch(dev, 0x1b, 0x04),
89 cvm_oct_stats_read_switch(dev, 0x1b, 0x13),
90 cvm_oct_stats_read_switch(dev, 0x1b, 0x0A));
91
92 seq_printf(m, "InBroadcasts: %12llu\t"
93 "OutMulticasts: %12llu\t"
94 "256-511 Octets: %12llu\n",
95 cvm_oct_stats_read_switch(dev, 0x1b, 0x06),
96 cvm_oct_stats_read_switch(dev, 0x1b, 0x12),
97 cvm_oct_stats_read_switch(dev, 0x1b, 0x0B));
98
99 seq_printf(m, "InMulticasts: %12llu\t"
100 "OutPause: %12llu\t"
101 "512-1023 Octets:%12llu\n",
102 cvm_oct_stats_read_switch(dev, 0x1b, 0x07),
103 cvm_oct_stats_read_switch(dev, 0x1b, 0x15),
104 cvm_oct_stats_read_switch(dev, 0x1b, 0x0C));
105
106 seq_printf(m, "InPause: %12llu\t"
107 "Excessive: %12llu\t"
108 "1024-Max Octets:%12llu\n",
109 cvm_oct_stats_read_switch(dev, 0x1b, 0x16),
110 cvm_oct_stats_read_switch(dev, 0x1b, 0x11),
111 cvm_oct_stats_read_switch(dev, 0x1b, 0x0D));
112
113 seq_printf(m, "InUndersize: %12llu\t"
114 "Collisions: %12llu\n",
115 cvm_oct_stats_read_switch(dev, 0x1b, 0x18),
116 cvm_oct_stats_read_switch(dev, 0x1b, 0x1E));
117
118 seq_printf(m, "InFragments: %12llu\t"
119 "Deferred: %12llu\n",
120 cvm_oct_stats_read_switch(dev, 0x1b, 0x19),
121 cvm_oct_stats_read_switch(dev, 0x1b, 0x05));
122
123 seq_printf(m, "InOversize: %12llu\t"
124 "Single: %12llu\n",
125 cvm_oct_stats_read_switch(dev, 0x1b, 0x1A),
126 cvm_oct_stats_read_switch(dev, 0x1b, 0x14));
127
128 seq_printf(m, "InJabber: %12llu\t"
129 "Multiple: %12llu\n",
130 cvm_oct_stats_read_switch(dev, 0x1b, 0x1B),
131 cvm_oct_stats_read_switch(dev, 0x1b, 0x17));
132
133 seq_printf(m, "In RxErr: %12llu\t"
134 "OutFCSErr: %12llu\n",
135 cvm_oct_stats_read_switch(dev, 0x1b, 0x1C),
136 cvm_oct_stats_read_switch(dev, 0x1b, 0x03));
137
138 seq_printf(m, "InFCSErr: %12llu\t"
139 "Late: %12llu\n",
140 cvm_oct_stats_read_switch(dev, 0x1b, 0x1D),
141 cvm_oct_stats_read_switch(dev, 0x1b, 0x1F));
142 index++;
143 }
144 return 0;
145}
146
147/**
148 * User is reading /proc/octeon_ethernet_stats
149 *
150 * @m:
151 * @v:
152 * Returns
153 */
154static int cvm_oct_stats_show(struct seq_file *m, void *v)
155{
156 struct octeon_ethernet *priv;
157 int port;
158
159 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
160
161 if (cvm_oct_device[port]) {
162 priv = netdev_priv(cvm_oct_device[port]);
163
164 seq_printf(m, "\nOcteon Port %d (%s)\n", port,
165 cvm_oct_device[port]->name);
166 seq_printf(m,
167 "rx_packets: %12lu\t"
168 "tx_packets: %12lu\n",
169 priv->stats.rx_packets,
170 priv->stats.tx_packets);
171 seq_printf(m,
172 "rx_bytes: %12lu\t"
173 "tx_bytes: %12lu\n",
174 priv->stats.rx_bytes, priv->stats.tx_bytes);
175 seq_printf(m,
176 "rx_errors: %12lu\t"
177 "tx_errors: %12lu\n",
178 priv->stats.rx_errors,
179 priv->stats.tx_errors);
180 seq_printf(m,
181 "rx_dropped: %12lu\t"
182 "tx_dropped: %12lu\n",
183 priv->stats.rx_dropped,
184 priv->stats.tx_dropped);
185 seq_printf(m,
186 "rx_length_errors: %12lu\t"
187 "tx_aborted_errors: %12lu\n",
188 priv->stats.rx_length_errors,
189 priv->stats.tx_aborted_errors);
190 seq_printf(m,
191 "rx_over_errors: %12lu\t"
192 "tx_carrier_errors: %12lu\n",
193 priv->stats.rx_over_errors,
194 priv->stats.tx_carrier_errors);
195 seq_printf(m,
196 "rx_crc_errors: %12lu\t"
197 "tx_fifo_errors: %12lu\n",
198 priv->stats.rx_crc_errors,
199 priv->stats.tx_fifo_errors);
200 seq_printf(m,
201 "rx_frame_errors: %12lu\t"
202 "tx_heartbeat_errors: %12lu\n",
203 priv->stats.rx_frame_errors,
204 priv->stats.tx_heartbeat_errors);
205 seq_printf(m,
206 "rx_fifo_errors: %12lu\t"
207 "tx_window_errors: %12lu\n",
208 priv->stats.rx_fifo_errors,
209 priv->stats.tx_window_errors);
210 seq_printf(m,
211 "rx_missed_errors: %12lu\t"
212 "multicast: %12lu\n",
213 priv->stats.rx_missed_errors,
214 priv->stats.multicast);
215 }
216 }
217
218 if (cvm_oct_device[0]) {
219 priv = netdev_priv(cvm_oct_device[0]);
220 if (priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
221 cvm_oct_stats_switch_show(m, v);
222 }
223 return 0;
224}
225
226/**
227 * /proc/octeon_ethernet_stats was openned. Use the single_open iterator
228 *
229 * @inode:
230 * @file:
231 * Returns
232 */
233static int cvm_oct_stats_open(struct inode *inode, struct file *file)
234{
235 return single_open(file, cvm_oct_stats_show, NULL);
236}
237
238static const struct file_operations cvm_oct_stats_operations = {
239 .open = cvm_oct_stats_open,
240 .read = seq_read,
241 .llseek = seq_lseek,
242 .release = single_release,
243};
244
245void cvm_oct_proc_initialize(void)
246{
247 struct proc_dir_entry *entry =
248 create_proc_entry("octeon_ethernet_stats", 0, NULL);
249 if (entry)
250 entry->proc_fops = &cvm_oct_stats_operations;
251}
252
253void cvm_oct_proc_shutdown(void)
254{
255 remove_proc_entry("octeon_ethernet_stats", NULL);
256}
diff --git a/drivers/staging/octeon/ethernet-proc.h b/drivers/staging/octeon/ethernet-proc.h
new file mode 100644
index 000000000000..82c7d9f78bc4
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-proc.h
@@ -0,0 +1,29 @@
1/*********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26*********************************************************************/
27
28void cvm_oct_proc_initialize(void);
29void cvm_oct_proc_shutdown(void);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
new file mode 100644
index 000000000000..8579f1670d1e
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -0,0 +1,397 @@
1/*********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/kernel.h>
28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h>
31
32#include <asm/octeon/octeon.h>
33
34#include "ethernet-defines.h"
35#include "octeon-ethernet.h"
36#include "ethernet-common.h"
37#include "ethernet-util.h"
38
39#include "cvmx-helper.h"
40
41#include <asm/octeon/cvmx-ipd-defs.h>
42#include <asm/octeon/cvmx-npi-defs.h>
43#include "cvmx-gmxx-defs.h"
44
45DEFINE_SPINLOCK(global_register_lock);
46
47static int number_rgmii_ports;
48
49static void cvm_oct_rgmii_poll(struct net_device *dev)
50{
51 struct octeon_ethernet *priv = netdev_priv(dev);
52 unsigned long flags;
53 cvmx_helper_link_info_t link_info;
54
55 /*
56 * Take the global register lock since we are going to touch
57 * registers that affect more than one port.
58 */
59 spin_lock_irqsave(&global_register_lock, flags);
60
61 link_info = cvmx_helper_link_get(priv->port);
62 if (link_info.u64 == priv->link_info) {
63
64 /*
65 * If the 10Mbps preamble workaround is supported and we're
66 * at 10Mbps we may need to do some special checking.
67 */
68 if (USE_10MBPS_PREAMBLE_WORKAROUND && (link_info.s.speed == 10)) {
69
70 /*
71 * Read the GMXX_RXX_INT_REG[PCTERR] bit and
72 * see if we are getting preamble errors.
73 */
74 int interface = INTERFACE(priv->port);
75 int index = INDEX(priv->port);
76 union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
77 gmxx_rxx_int_reg.u64 =
78 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
79 (index, interface));
80 if (gmxx_rxx_int_reg.s.pcterr) {
81
82 /*
83 * We are getting preamble errors at
84 * 10Mbps. Most likely the PHY is
85 * giving us packets with mis aligned
86 * preambles. In order to get these
87 * packets we need to disable preamble
88 * checking and do it in software.
89 */
90 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
91 union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
92
93 /* Disable preamble checking */
94 gmxx_rxx_frm_ctl.u64 =
95 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL
96 (index, interface));
97 gmxx_rxx_frm_ctl.s.pre_chk = 0;
98 cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL
99 (index, interface),
100 gmxx_rxx_frm_ctl.u64);
101
102 /* Disable FCS stripping */
103 ipd_sub_port_fcs.u64 =
104 cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
105 ipd_sub_port_fcs.s.port_bit &=
106 0xffffffffull ^ (1ull << priv->port);
107 cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS,
108 ipd_sub_port_fcs.u64);
109
110 /* Clear any error bits */
111 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
112 (index, interface),
113 gmxx_rxx_int_reg.u64);
114 DEBUGPRINT("%s: Using 10Mbps with software "
115 "preamble removal\n",
116 dev->name);
117 }
118 }
119 spin_unlock_irqrestore(&global_register_lock, flags);
120 return;
121 }
122
123 /* If the 10Mbps preamble workaround is allowed we need to on
124 preamble checking, FCS stripping, and clear error bits on
125 every speed change. If errors occur during 10Mbps operation
126 the above code will change this stuff */
127 if (USE_10MBPS_PREAMBLE_WORKAROUND) {
128
129 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
130 union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
131 union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
132 int interface = INTERFACE(priv->port);
133 int index = INDEX(priv->port);
134
135 /* Enable preamble checking */
136 gmxx_rxx_frm_ctl.u64 =
137 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
138 gmxx_rxx_frm_ctl.s.pre_chk = 1;
139 cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface),
140 gmxx_rxx_frm_ctl.u64);
141 /* Enable FCS stripping */
142 ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
143 ipd_sub_port_fcs.s.port_bit |= 1ull << priv->port;
144 cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
145 /* Clear any error bits */
146 gmxx_rxx_int_reg.u64 =
147 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, interface));
148 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
149 gmxx_rxx_int_reg.u64);
150 }
151
152 link_info = cvmx_helper_link_autoconf(priv->port);
153 priv->link_info = link_info.u64;
154 spin_unlock_irqrestore(&global_register_lock, flags);
155
156 /* Tell Linux */
157 if (link_info.s.link_up) {
158
159 if (!netif_carrier_ok(dev))
160 netif_carrier_on(dev);
161 if (priv->queue != -1)
162 DEBUGPRINT
163 ("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
164 dev->name, link_info.s.speed,
165 (link_info.s.full_duplex) ? "Full" : "Half",
166 priv->port, priv->queue);
167 else
168 DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n",
169 dev->name, link_info.s.speed,
170 (link_info.s.full_duplex) ? "Full" : "Half",
171 priv->port);
172 } else {
173
174 if (netif_carrier_ok(dev))
175 netif_carrier_off(dev);
176 DEBUGPRINT("%s: Link down\n", dev->name);
177 }
178}
179
180static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
181{
182 union cvmx_npi_rsl_int_blocks rsl_int_blocks;
183 int index;
184 irqreturn_t return_status = IRQ_NONE;
185
186 rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
187
188 /* Check and see if this interrupt was caused by the GMX0 block */
189 if (rsl_int_blocks.s.gmx0) {
190
191 int interface = 0;
192 /* Loop through every port of this interface */
193 for (index = 0;
194 index < cvmx_helper_ports_on_interface(interface);
195 index++) {
196
197 /* Read the GMX interrupt status bits */
198 union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
199 gmx_rx_int_reg.u64 =
200 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
201 (index, interface));
202 gmx_rx_int_reg.u64 &=
203 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
204 (index, interface));
205 /* Poll the port if inband status changed */
206 if (gmx_rx_int_reg.s.phy_dupx
207 || gmx_rx_int_reg.s.phy_link
208 || gmx_rx_int_reg.s.phy_spd) {
209
210 struct net_device *dev =
211 cvm_oct_device[cvmx_helper_get_ipd_port
212 (interface, index)];
213 if (dev)
214 cvm_oct_rgmii_poll(dev);
215 gmx_rx_int_reg.u64 = 0;
216 gmx_rx_int_reg.s.phy_dupx = 1;
217 gmx_rx_int_reg.s.phy_link = 1;
218 gmx_rx_int_reg.s.phy_spd = 1;
219 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
220 (index, interface),
221 gmx_rx_int_reg.u64);
222 return_status = IRQ_HANDLED;
223 }
224 }
225 }
226
227 /* Check and see if this interrupt was caused by the GMX1 block */
228 if (rsl_int_blocks.s.gmx1) {
229
230 int interface = 1;
231 /* Loop through every port of this interface */
232 for (index = 0;
233 index < cvmx_helper_ports_on_interface(interface);
234 index++) {
235
236 /* Read the GMX interrupt status bits */
237 union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
238 gmx_rx_int_reg.u64 =
239 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
240 (index, interface));
241 gmx_rx_int_reg.u64 &=
242 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
243 (index, interface));
244 /* Poll the port if inband status changed */
245 if (gmx_rx_int_reg.s.phy_dupx
246 || gmx_rx_int_reg.s.phy_link
247 || gmx_rx_int_reg.s.phy_spd) {
248
249 struct net_device *dev =
250 cvm_oct_device[cvmx_helper_get_ipd_port
251 (interface, index)];
252 if (dev)
253 cvm_oct_rgmii_poll(dev);
254 gmx_rx_int_reg.u64 = 0;
255 gmx_rx_int_reg.s.phy_dupx = 1;
256 gmx_rx_int_reg.s.phy_link = 1;
257 gmx_rx_int_reg.s.phy_spd = 1;
258 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
259 (index, interface),
260 gmx_rx_int_reg.u64);
261 return_status = IRQ_HANDLED;
262 }
263 }
264 }
265 return return_status;
266}
267
268static int cvm_oct_rgmii_open(struct net_device *dev)
269{
270 union cvmx_gmxx_prtx_cfg gmx_cfg;
271 struct octeon_ethernet *priv = netdev_priv(dev);
272 int interface = INTERFACE(priv->port);
273 int index = INDEX(priv->port);
274 cvmx_helper_link_info_t link_info;
275
276 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
277 gmx_cfg.s.en = 1;
278 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
279
280 if (!octeon_is_simulation()) {
281 link_info = cvmx_helper_link_get(priv->port);
282 if (!link_info.s.link_up)
283 netif_carrier_off(dev);
284 }
285
286 return 0;
287}
288
289static int cvm_oct_rgmii_stop(struct net_device *dev)
290{
291 union cvmx_gmxx_prtx_cfg gmx_cfg;
292 struct octeon_ethernet *priv = netdev_priv(dev);
293 int interface = INTERFACE(priv->port);
294 int index = INDEX(priv->port);
295
296 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
297 gmx_cfg.s.en = 0;
298 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
299 return 0;
300}
301
302int cvm_oct_rgmii_init(struct net_device *dev)
303{
304 struct octeon_ethernet *priv = netdev_priv(dev);
305 int r;
306
307 cvm_oct_common_init(dev);
308 dev->open = cvm_oct_rgmii_open;
309 dev->stop = cvm_oct_rgmii_stop;
310 dev->stop(dev);
311
312 /*
313 * Due to GMX errata in CN3XXX series chips, it is necessary
314 * to take the link down immediately whne the PHY changes
315 * state. In order to do this we call the poll function every
316 * time the RGMII inband status changes. This may cause
317 * problems if the PHY doesn't implement inband status
318 * properly.
319 */
320 if (number_rgmii_ports == 0) {
321 r = request_irq(OCTEON_IRQ_RML, cvm_oct_rgmii_rml_interrupt,
322 IRQF_SHARED, "RGMII", &number_rgmii_ports);
323 }
324 number_rgmii_ports++;
325
326 /*
327 * Only true RGMII ports need to be polled. In GMII mode, port
328 * 0 is really a RGMII port.
329 */
330 if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
331 && (priv->port == 0))
332 || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
333
334 if (!octeon_is_simulation()) {
335
336 union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
337 int interface = INTERFACE(priv->port);
338 int index = INDEX(priv->port);
339
340 /*
341 * Enable interrupts on inband status changes
342 * for this port.
343 */
344 gmx_rx_int_en.u64 =
345 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
346 (index, interface));
347 gmx_rx_int_en.s.phy_dupx = 1;
348 gmx_rx_int_en.s.phy_link = 1;
349 gmx_rx_int_en.s.phy_spd = 1;
350 cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface),
351 gmx_rx_int_en.u64);
352 priv->poll = cvm_oct_rgmii_poll;
353 }
354 }
355
356 return 0;
357}
358
359void cvm_oct_rgmii_uninit(struct net_device *dev)
360{
361 struct octeon_ethernet *priv = netdev_priv(dev);
362 cvm_oct_common_uninit(dev);
363
364 /*
365 * Only true RGMII ports need to be polled. In GMII mode, port
366 * 0 is really a RGMII port.
367 */
368 if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
369 && (priv->port == 0))
370 || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
371
372 if (!octeon_is_simulation()) {
373
374 union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
375 int interface = INTERFACE(priv->port);
376 int index = INDEX(priv->port);
377
378 /*
379 * Disable interrupts on inband status changes
380 * for this port.
381 */
382 gmx_rx_int_en.u64 =
383 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
384 (index, interface));
385 gmx_rx_int_en.s.phy_dupx = 0;
386 gmx_rx_int_en.s.phy_link = 0;
387 gmx_rx_int_en.s.phy_spd = 0;
388 cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface),
389 gmx_rx_int_en.u64);
390 }
391 }
392
393 /* Remove the interrupt handler when the last port is removed. */
394 number_rgmii_ports--;
395 if (number_rgmii_ports == 0)
396 free_irq(OCTEON_IRQ_RML, &number_rgmii_ports);
397}
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
new file mode 100644
index 000000000000..1b237b7e689d
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -0,0 +1,505 @@
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/cache.h>
30#include <linux/netdevice.h>
31#include <linux/init.h>
32#include <linux/etherdevice.h>
33#include <linux/ip.h>
34#include <linux/string.h>
35#include <linux/prefetch.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#include <linux/seq_file.h>
39#include <linux/proc_fs.h>
40#include <net/dst.h>
41#ifdef CONFIG_XFRM
42#include <linux/xfrm.h>
43#include <net/xfrm.h>
44#endif /* CONFIG_XFRM */
45
46#include <asm/atomic.h>
47
48#include <asm/octeon/octeon.h>
49
50#include "ethernet-defines.h"
51#include "octeon-ethernet.h"
52#include "ethernet-mem.h"
53#include "ethernet-util.h"
54
55#include "cvmx-helper.h"
56#include "cvmx-wqe.h"
57#include "cvmx-fau.h"
58#include "cvmx-pow.h"
59#include "cvmx-pip.h"
60#include "cvmx-scratch.h"
61
62#include "cvmx-gmxx-defs.h"
63
64struct cvm_tasklet_wrapper {
65 struct tasklet_struct t;
66};
67
68/*
69 * Aligning the tasklet_struct on cachline boundries seems to decrease
70 * throughput even though in theory it would reduce contantion on the
71 * cache lines containing the locks.
72 */
73
74static struct cvm_tasklet_wrapper cvm_oct_tasklet[NR_CPUS];
75
76/**
77 * Interrupt handler. The interrupt occurs whenever the POW
78 * transitions from 0->1 packets in our group.
79 *
80 * @cpl:
81 * @dev_id:
82 * @regs:
83 * Returns
84 */
85irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
86{
87 /* Acknowledge the interrupt */
88 if (INTERRUPT_LIMIT)
89 cvmx_write_csr(CVMX_POW_WQ_INT, 1 << pow_receive_group);
90 else
91 cvmx_write_csr(CVMX_POW_WQ_INT, 0x10001 << pow_receive_group);
92 preempt_disable();
93 tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t);
94 preempt_enable();
95 return IRQ_HANDLED;
96}
97
98#ifdef CONFIG_NET_POLL_CONTROLLER
99/**
100 * This is called when the kernel needs to manually poll the
101 * device. For Octeon, this is simply calling the interrupt
102 * handler. We actually poll all the devices, not just the
103 * one supplied.
104 *
105 * @dev: Device to poll. Unused
106 */
107void cvm_oct_poll_controller(struct net_device *dev)
108{
109 preempt_disable();
110 tasklet_schedule(&cvm_oct_tasklet[smp_processor_id()].t);
111 preempt_enable();
112}
113#endif
114
115/**
116 * This is called on receive errors, and determines if the packet
117 * can be dropped early-on in cvm_oct_tasklet_rx().
118 *
119 * @work: Work queue entry pointing to the packet.
120 * Returns Non-zero if the packet can be dropped, zero otherwise.
121 */
122static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
123{
124 if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
125 /*
126 * Ignore length errors on min size packets. Some
127 * equipment incorrectly pads packets to 64+4FCS
128 * instead of 60+4FCS. Note these packets still get
129 * counted as frame errors.
130 */
131 } else
132 if (USE_10MBPS_PREAMBLE_WORKAROUND
133 && ((work->word2.snoip.err_code == 5)
134 || (work->word2.snoip.err_code == 7))) {
135
136 /*
137 * We received a packet with either an alignment error
138 * or a FCS error. This may be signalling that we are
139 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK}
140 * off. If this is the case we need to parse the
141 * packet to determine if we can remove a non spec
142 * preamble and generate a correct packet.
143 */
144 int interface = cvmx_helper_get_interface_num(work->ipprt);
145 int index = cvmx_helper_get_interface_index_num(work->ipprt);
146 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
147 gmxx_rxx_frm_ctl.u64 =
148 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
149 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
150
151 uint8_t *ptr =
152 cvmx_phys_to_ptr(work->packet_ptr.s.addr);
153 int i = 0;
154
155 while (i < work->len - 1) {
156 if (*ptr != 0x55)
157 break;
158 ptr++;
159 i++;
160 }
161
162 if (*ptr == 0xd5) {
163 /*
164 DEBUGPRINT("Port %d received 0xd5 preamble\n", work->ipprt);
165 */
166 work->packet_ptr.s.addr += i + 1;
167 work->len -= i + 5;
168 } else if ((*ptr & 0xf) == 0xd) {
169 /*
170 DEBUGPRINT("Port %d received 0x?d preamble\n", work->ipprt);
171 */
172 work->packet_ptr.s.addr += i;
173 work->len -= i + 4;
174 for (i = 0; i < work->len; i++) {
175 *ptr =
176 ((*ptr & 0xf0) >> 4) |
177 ((*(ptr + 1) & 0xf) << 4);
178 ptr++;
179 }
180 } else {
181 DEBUGPRINT("Port %d unknown preamble, packet "
182 "dropped\n",
183 work->ipprt);
184 /*
185 cvmx_helper_dump_packet(work);
186 */
187 cvm_oct_free_work(work);
188 return 1;
189 }
190 }
191 } else {
192 DEBUGPRINT("Port %d receive error code %d, packet dropped\n",
193 work->ipprt, work->word2.snoip.err_code);
194 cvm_oct_free_work(work);
195 return 1;
196 }
197
198 return 0;
199}
200
201/**
202 * Tasklet function that is scheduled on a core when an interrupt occurs.
203 *
204 * @unused:
205 */
206void cvm_oct_tasklet_rx(unsigned long unused)
207{
208 const int coreid = cvmx_get_core_num();
209 uint64_t old_group_mask;
210 uint64_t old_scratch;
211 int rx_count = 0;
212 int number_to_free;
213 int num_freed;
214 int packet_not_copied;
215
216 /* Prefetch cvm_oct_device since we know we need it soon */
217 prefetch(cvm_oct_device);
218
219 if (USE_ASYNC_IOBDMA) {
220 /* Save scratch in case userspace is using it */
221 CVMX_SYNCIOBDMA;
222 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
223 }
224
225 /* Only allow work for our group (and preserve priorities) */
226 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
227 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
228 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
229
230 if (USE_ASYNC_IOBDMA)
231 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
232
233 while (1) {
234 struct sk_buff *skb = NULL;
235 int skb_in_hw;
236 cvmx_wqe_t *work;
237
238 if (USE_ASYNC_IOBDMA) {
239 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
240 } else {
241 if ((INTERRUPT_LIMIT == 0)
242 || likely(rx_count < MAX_RX_PACKETS))
243 work =
244 cvmx_pow_work_request_sync
245 (CVMX_POW_NO_WAIT);
246 else
247 work = NULL;
248 }
249 prefetch(work);
250 if (work == NULL)
251 break;
252
253 /*
254 * Limit each core to processing MAX_RX_PACKETS
255 * packets without a break. This way the RX can't
256 * starve the TX task.
257 */
258 if (USE_ASYNC_IOBDMA) {
259
260 if ((INTERRUPT_LIMIT == 0)
261 || likely(rx_count < MAX_RX_PACKETS))
262 cvmx_pow_work_request_async_nocheck
263 (CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
264 else {
265 cvmx_scratch_write64(CVMX_SCR_SCRATCH,
266 0x8000000000000000ull);
267 cvmx_pow_tag_sw_null_nocheck();
268 }
269 }
270
271 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
272 if (likely(skb_in_hw)) {
273 skb =
274 *(struct sk_buff
275 **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
276 sizeof(void *));
277 prefetch(&skb->head);
278 prefetch(&skb->len);
279 }
280 prefetch(cvm_oct_device[work->ipprt]);
281
282 rx_count++;
283 /* Immediately throw away all packets with receive errors */
284 if (unlikely(work->word2.snoip.rcv_error)) {
285 if (cvm_oct_check_rcv_error(work))
286 continue;
287 }
288
289 /*
290 * We can only use the zero copy path if skbuffs are
291 * in the FPA pool and the packet fits in a single
292 * buffer.
293 */
294 if (likely(skb_in_hw)) {
295 /*
296 * This calculation was changed in case the
297 * skb header is using a different address
298 * aliasing type than the buffer. It doesn't
299 * make any differnece now, but the new one is
300 * more correct.
301 */
302 skb->data =
303 skb->head + work->packet_ptr.s.addr -
304 cvmx_ptr_to_phys(skb->head);
305 prefetch(skb->data);
306 skb->len = work->len;
307 skb_set_tail_pointer(skb, skb->len);
308 packet_not_copied = 1;
309 } else {
310
311 /*
312 * We have to copy the packet. First allocate
313 * an skbuff for it.
314 */
315 skb = dev_alloc_skb(work->len);
316 if (!skb) {
317 DEBUGPRINT("Port %d failed to allocate "
318 "skbuff, packet dropped\n",
319 work->ipprt);
320 cvm_oct_free_work(work);
321 continue;
322 }
323
324 /*
325 * Check if we've received a packet that was
326 * entirely stored in the work entry. This is
327 * untested.
328 */
329 if (unlikely(work->word2.s.bufs == 0)) {
330 uint8_t *ptr = work->packet_data;
331
332 if (likely(!work->word2.s.not_IP)) {
333 /*
334 * The beginning of the packet
335 * moves for IP packets.
336 */
337 if (work->word2.s.is_v6)
338 ptr += 2;
339 else
340 ptr += 6;
341 }
342 memcpy(skb_put(skb, work->len), ptr, work->len);
343 /* No packet buffers to free */
344 } else {
345 int segments = work->word2.s.bufs;
346 union cvmx_buf_ptr segment_ptr =
347 work->packet_ptr;
348 int len = work->len;
349
350 while (segments--) {
351 union cvmx_buf_ptr next_ptr =
352 *(union cvmx_buf_ptr *)
353 cvmx_phys_to_ptr(segment_ptr.s.
354 addr - 8);
355 /*
356 * Octeon Errata PKI-100: The segment size is
357 * wrong. Until it is fixed, calculate the
358 * segment size based on the packet pool
359 * buffer size. When it is fixed, the
360 * following line should be replaced with this
361 * one: int segment_size =
362 * segment_ptr.s.size;
363 */
364 int segment_size =
365 CVMX_FPA_PACKET_POOL_SIZE -
366 (segment_ptr.s.addr -
367 (((segment_ptr.s.addr >> 7) -
368 segment_ptr.s.back) << 7));
369 /* Don't copy more than what is left
370 in the packet */
371 if (segment_size > len)
372 segment_size = len;
373 /* Copy the data into the packet */
374 memcpy(skb_put(skb, segment_size),
375 cvmx_phys_to_ptr(segment_ptr.s.
376 addr),
377 segment_size);
378 /* Reduce the amount of bytes left
379 to copy */
380 len -= segment_size;
381 segment_ptr = next_ptr;
382 }
383 }
384 packet_not_copied = 0;
385 }
386
387 if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) &&
388 cvm_oct_device[work->ipprt])) {
389 struct net_device *dev = cvm_oct_device[work->ipprt];
390 struct octeon_ethernet *priv = netdev_priv(dev);
391
392 /* Only accept packets for devices
393 that are currently up */
394 if (likely(dev->flags & IFF_UP)) {
395 skb->protocol = eth_type_trans(skb, dev);
396 skb->dev = dev;
397
398 if (unlikely
399 (work->word2.s.not_IP
400 || work->word2.s.IP_exc
401 || work->word2.s.L4_error))
402 skb->ip_summed = CHECKSUM_NONE;
403 else
404 skb->ip_summed = CHECKSUM_UNNECESSARY;
405
406 /* Increment RX stats for virtual ports */
407 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
408#ifdef CONFIG_64BIT
409 atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
410 atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
411#else
412 atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
413 atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
414#endif
415 }
416 netif_receive_skb(skb);
417 } else {
418 /*
419 * Drop any packet received for a
420 * device that isn't up.
421 */
422 /*
423 DEBUGPRINT("%s: Device not up, packet dropped\n",
424 dev->name);
425 */
426#ifdef CONFIG_64BIT
427 atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
428#else
429 atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
430#endif
431 dev_kfree_skb_irq(skb);
432 }
433 } else {
434 /*
435 * Drop any packet received for a device that
436 * doesn't exist.
437 */
438 DEBUGPRINT("Port %d not controlled by Linux, packet "
439 "dropped\n",
440 work->ipprt);
441 dev_kfree_skb_irq(skb);
442 }
443 /*
444 * Check to see if the skbuff and work share the same
445 * packet buffer.
446 */
447 if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) {
448 /*
449 * This buffer needs to be replaced, increment
450 * the number of buffers we need to free by
451 * one.
452 */
453 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
454 1);
455
456 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL,
457 DONT_WRITEBACK(1));
458 } else {
459 cvm_oct_free_work(work);
460 }
461 }
462
463 /* Restore the original POW group mask */
464 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
465 if (USE_ASYNC_IOBDMA) {
466 /* Restore the scratch area */
467 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
468 }
469
470 if (USE_SKBUFFS_IN_HW) {
471 /* Refill the packet buffer pool */
472 number_to_free =
473 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
474
475 if (number_to_free > 0) {
476 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
477 -number_to_free);
478 num_freed =
479 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL,
480 CVMX_FPA_PACKET_POOL_SIZE,
481 number_to_free);
482 if (num_freed != number_to_free) {
483 cvmx_fau_atomic_add32
484 (FAU_NUM_PACKET_BUFFERS_TO_FREE,
485 number_to_free - num_freed);
486 }
487 }
488 }
489}
490
491void cvm_oct_rx_initialize(void)
492{
493 int i;
494 /* Initialize all of the tasklets */
495 for (i = 0; i < NR_CPUS; i++)
496 tasklet_init(&cvm_oct_tasklet[i].t, cvm_oct_tasklet_rx, 0);
497}
498
499void cvm_oct_rx_shutdown(void)
500{
501 int i;
502 /* Shutdown all of the tasklets */
503 for (i = 0; i < NR_CPUS; i++)
504 tasklet_kill(&cvm_oct_tasklet[i].t);
505}
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
new file mode 100644
index 000000000000..a9b72b87a7a6
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -0,0 +1,33 @@
1/*********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26*********************************************************************/
27
28irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id);
29void cvm_oct_poll_controller(struct net_device *dev);
30void cvm_oct_tasklet_rx(unsigned long unused);
31
32void cvm_oct_rx_initialize(void);
33void cvm_oct_rx_shutdown(void);
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
new file mode 100644
index 000000000000..58fa39c1d675
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -0,0 +1,129 @@
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/kernel.h>
28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h>
31
32#include <asm/octeon/octeon.h>
33
34#include "ethernet-defines.h"
35#include "octeon-ethernet.h"
36#include "ethernet-util.h"
37#include "ethernet-common.h"
38
39#include "cvmx-helper.h"
40
41#include "cvmx-gmxx-defs.h"
42
43static int cvm_oct_sgmii_open(struct net_device *dev)
44{
45 union cvmx_gmxx_prtx_cfg gmx_cfg;
46 struct octeon_ethernet *priv = netdev_priv(dev);
47 int interface = INTERFACE(priv->port);
48 int index = INDEX(priv->port);
49 cvmx_helper_link_info_t link_info;
50
51 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
52 gmx_cfg.s.en = 1;
53 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
54
55 if (!octeon_is_simulation()) {
56 link_info = cvmx_helper_link_get(priv->port);
57 if (!link_info.s.link_up)
58 netif_carrier_off(dev);
59 }
60
61 return 0;
62}
63
64static int cvm_oct_sgmii_stop(struct net_device *dev)
65{
66 union cvmx_gmxx_prtx_cfg gmx_cfg;
67 struct octeon_ethernet *priv = netdev_priv(dev);
68 int interface = INTERFACE(priv->port);
69 int index = INDEX(priv->port);
70
71 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
72 gmx_cfg.s.en = 0;
73 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
74 return 0;
75}
76
77static void cvm_oct_sgmii_poll(struct net_device *dev)
78{
79 struct octeon_ethernet *priv = netdev_priv(dev);
80 cvmx_helper_link_info_t link_info;
81
82 link_info = cvmx_helper_link_get(priv->port);
83 if (link_info.u64 == priv->link_info)
84 return;
85
86 link_info = cvmx_helper_link_autoconf(priv->port);
87 priv->link_info = link_info.u64;
88
89 /* Tell Linux */
90 if (link_info.s.link_up) {
91
92 if (!netif_carrier_ok(dev))
93 netif_carrier_on(dev);
94 if (priv->queue != -1)
95 DEBUGPRINT
96 ("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
97 dev->name, link_info.s.speed,
98 (link_info.s.full_duplex) ? "Full" : "Half",
99 priv->port, priv->queue);
100 else
101 DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n",
102 dev->name, link_info.s.speed,
103 (link_info.s.full_duplex) ? "Full" : "Half",
104 priv->port);
105 } else {
106 if (netif_carrier_ok(dev))
107 netif_carrier_off(dev);
108 DEBUGPRINT("%s: Link down\n", dev->name);
109 }
110}
111
112int cvm_oct_sgmii_init(struct net_device *dev)
113{
114 struct octeon_ethernet *priv = netdev_priv(dev);
115 cvm_oct_common_init(dev);
116 dev->open = cvm_oct_sgmii_open;
117 dev->stop = cvm_oct_sgmii_stop;
118 dev->stop(dev);
119 if (!octeon_is_simulation())
120 priv->poll = cvm_oct_sgmii_poll;
121
122 /* FIXME: Need autoneg logic */
123 return 0;
124}
125
126void cvm_oct_sgmii_uninit(struct net_device *dev)
127{
128 cvm_oct_common_uninit(dev);
129}
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
new file mode 100644
index 000000000000..e0971bbe4ddc
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -0,0 +1,323 @@
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/kernel.h>
28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h>
31
32#include <asm/octeon/octeon.h>
33
34#include "ethernet-defines.h"
35#include "octeon-ethernet.h"
36#include "ethernet-common.h"
37#include "ethernet-util.h"
38
39#include "cvmx-spi.h"
40
41#include <asm/octeon/cvmx-npi-defs.h>
42#include "cvmx-spxx-defs.h"
43#include "cvmx-stxx-defs.h"
44
45static int number_spi_ports;
46static int need_retrain[2] = { 0, 0 };
47
48static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id)
49{
50 irqreturn_t return_status = IRQ_NONE;
51 union cvmx_npi_rsl_int_blocks rsl_int_blocks;
52
53 /* Check and see if this interrupt was caused by the GMX block */
54 rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
55 if (rsl_int_blocks.s.spx1) { /* 19 - SPX1_INT_REG & STX1_INT_REG */
56
57 union cvmx_spxx_int_reg spx_int_reg;
58 union cvmx_stxx_int_reg stx_int_reg;
59
60 spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(1));
61 cvmx_write_csr(CVMX_SPXX_INT_REG(1), spx_int_reg.u64);
62 if (!need_retrain[1]) {
63
64 spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(1));
65 if (spx_int_reg.s.spf)
66 pr_err("SPI1: SRX Spi4 interface down\n");
67 if (spx_int_reg.s.calerr)
68 pr_err("SPI1: SRX Spi4 Calendar table "
69 "parity error\n");
70 if (spx_int_reg.s.syncerr)
71 pr_err("SPI1: SRX Consecutive Spi4 DIP4 "
72 "errors have exceeded "
73 "SPX_ERR_CTL[ERRCNT]\n");
74 if (spx_int_reg.s.diperr)
75 pr_err("SPI1: SRX Spi4 DIP4 error\n");
76 if (spx_int_reg.s.tpaovr)
77 pr_err("SPI1: SRX Selected port has hit "
78 "TPA overflow\n");
79 if (spx_int_reg.s.rsverr)
80 pr_err("SPI1: SRX Spi4 reserved control "
81 "word detected\n");
82 if (spx_int_reg.s.drwnng)
83 pr_err("SPI1: SRX Spi4 receive FIFO "
84 "drowning/overflow\n");
85 if (spx_int_reg.s.clserr)
86 pr_err("SPI1: SRX Spi4 packet closed on "
87 "non-16B alignment without EOP\n");
88 if (spx_int_reg.s.spiovr)
89 pr_err("SPI1: SRX Spi4 async FIFO overflow\n");
90 if (spx_int_reg.s.abnorm)
91 pr_err("SPI1: SRX Abnormal packet "
92 "termination (ERR bit)\n");
93 if (spx_int_reg.s.prtnxa)
94 pr_err("SPI1: SRX Port out of range\n");
95 }
96
97 stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(1));
98 cvmx_write_csr(CVMX_STXX_INT_REG(1), stx_int_reg.u64);
99 if (!need_retrain[1]) {
100
101 stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(1));
102 if (stx_int_reg.s.syncerr)
103 pr_err("SPI1: STX Interface encountered a "
104 "fatal error\n");
105 if (stx_int_reg.s.frmerr)
106 pr_err("SPI1: STX FRMCNT has exceeded "
107 "STX_DIP_CNT[MAXFRM]\n");
108 if (stx_int_reg.s.unxfrm)
109 pr_err("SPI1: STX Unexpected framing "
110 "sequence\n");
111 if (stx_int_reg.s.nosync)
112 pr_err("SPI1: STX ERRCNT has exceeded "
113 "STX_DIP_CNT[MAXDIP]\n");
114 if (stx_int_reg.s.diperr)
115 pr_err("SPI1: STX DIP2 error on the Spi4 "
116 "Status channel\n");
117 if (stx_int_reg.s.datovr)
118 pr_err("SPI1: STX Spi4 FIFO overflow error\n");
119 if (stx_int_reg.s.ovrbst)
120 pr_err("SPI1: STX Transmit packet burst "
121 "too big\n");
122 if (stx_int_reg.s.calpar1)
123 pr_err("SPI1: STX Calendar Table Parity "
124 "Error Bank1\n");
125 if (stx_int_reg.s.calpar0)
126 pr_err("SPI1: STX Calendar Table Parity "
127 "Error Bank0\n");
128 }
129
130 cvmx_write_csr(CVMX_SPXX_INT_MSK(1), 0);
131 cvmx_write_csr(CVMX_STXX_INT_MSK(1), 0);
132 need_retrain[1] = 1;
133 return_status = IRQ_HANDLED;
134 }
135
136 if (rsl_int_blocks.s.spx0) { /* 18 - SPX0_INT_REG & STX0_INT_REG */
137 union cvmx_spxx_int_reg spx_int_reg;
138 union cvmx_stxx_int_reg stx_int_reg;
139
140 spx_int_reg.u64 = cvmx_read_csr(CVMX_SPXX_INT_REG(0));
141 cvmx_write_csr(CVMX_SPXX_INT_REG(0), spx_int_reg.u64);
142 if (!need_retrain[0]) {
143
144 spx_int_reg.u64 &= cvmx_read_csr(CVMX_SPXX_INT_MSK(0));
145 if (spx_int_reg.s.spf)
146 pr_err("SPI0: SRX Spi4 interface down\n");
147 if (spx_int_reg.s.calerr)
148 pr_err("SPI0: SRX Spi4 Calendar table "
149 "parity error\n");
150 if (spx_int_reg.s.syncerr)
151 pr_err("SPI0: SRX Consecutive Spi4 DIP4 "
152 "errors have exceeded "
153 "SPX_ERR_CTL[ERRCNT]\n");
154 if (spx_int_reg.s.diperr)
155 pr_err("SPI0: SRX Spi4 DIP4 error\n");
156 if (spx_int_reg.s.tpaovr)
157 pr_err("SPI0: SRX Selected port has hit "
158 "TPA overflow\n");
159 if (spx_int_reg.s.rsverr)
160 pr_err("SPI0: SRX Spi4 reserved control "
161 "word detected\n");
162 if (spx_int_reg.s.drwnng)
163 pr_err("SPI0: SRX Spi4 receive FIFO "
164 "drowning/overflow\n");
165 if (spx_int_reg.s.clserr)
166 pr_err("SPI0: SRX Spi4 packet closed on "
167 "non-16B alignment without EOP\n");
168 if (spx_int_reg.s.spiovr)
169 pr_err("SPI0: SRX Spi4 async FIFO overflow\n");
170 if (spx_int_reg.s.abnorm)
171 pr_err("SPI0: SRX Abnormal packet "
172 "termination (ERR bit)\n");
173 if (spx_int_reg.s.prtnxa)
174 pr_err("SPI0: SRX Port out of range\n");
175 }
176
177 stx_int_reg.u64 = cvmx_read_csr(CVMX_STXX_INT_REG(0));
178 cvmx_write_csr(CVMX_STXX_INT_REG(0), stx_int_reg.u64);
179 if (!need_retrain[0]) {
180
181 stx_int_reg.u64 &= cvmx_read_csr(CVMX_STXX_INT_MSK(0));
182 if (stx_int_reg.s.syncerr)
183 pr_err("SPI0: STX Interface encountered a "
184 "fatal error\n");
185 if (stx_int_reg.s.frmerr)
186 pr_err("SPI0: STX FRMCNT has exceeded "
187 "STX_DIP_CNT[MAXFRM]\n");
188 if (stx_int_reg.s.unxfrm)
189 pr_err("SPI0: STX Unexpected framing "
190 "sequence\n");
191 if (stx_int_reg.s.nosync)
192 pr_err("SPI0: STX ERRCNT has exceeded "
193 "STX_DIP_CNT[MAXDIP]\n");
194 if (stx_int_reg.s.diperr)
195 pr_err("SPI0: STX DIP2 error on the Spi4 "
196 "Status channel\n");
197 if (stx_int_reg.s.datovr)
198 pr_err("SPI0: STX Spi4 FIFO overflow error\n");
199 if (stx_int_reg.s.ovrbst)
200 pr_err("SPI0: STX Transmit packet burst "
201 "too big\n");
202 if (stx_int_reg.s.calpar1)
203 pr_err("SPI0: STX Calendar Table Parity "
204 "Error Bank1\n");
205 if (stx_int_reg.s.calpar0)
206 pr_err("SPI0: STX Calendar Table Parity "
207 "Error Bank0\n");
208 }
209
210 cvmx_write_csr(CVMX_SPXX_INT_MSK(0), 0);
211 cvmx_write_csr(CVMX_STXX_INT_MSK(0), 0);
212 need_retrain[0] = 1;
213 return_status = IRQ_HANDLED;
214 }
215
216 return return_status;
217}
218
219static void cvm_oct_spi_enable_error_reporting(int interface)
220{
221 union cvmx_spxx_int_msk spxx_int_msk;
222 union cvmx_stxx_int_msk stxx_int_msk;
223
224 spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
225 spxx_int_msk.s.calerr = 1;
226 spxx_int_msk.s.syncerr = 1;
227 spxx_int_msk.s.diperr = 1;
228 spxx_int_msk.s.tpaovr = 1;
229 spxx_int_msk.s.rsverr = 1;
230 spxx_int_msk.s.drwnng = 1;
231 spxx_int_msk.s.clserr = 1;
232 spxx_int_msk.s.spiovr = 1;
233 spxx_int_msk.s.abnorm = 1;
234 spxx_int_msk.s.prtnxa = 1;
235 cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
236
237 stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
238 stxx_int_msk.s.frmerr = 1;
239 stxx_int_msk.s.unxfrm = 1;
240 stxx_int_msk.s.nosync = 1;
241 stxx_int_msk.s.diperr = 1;
242 stxx_int_msk.s.datovr = 1;
243 stxx_int_msk.s.ovrbst = 1;
244 stxx_int_msk.s.calpar1 = 1;
245 stxx_int_msk.s.calpar0 = 1;
246 cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
247}
248
249static void cvm_oct_spi_poll(struct net_device *dev)
250{
251 static int spi4000_port;
252 struct octeon_ethernet *priv = netdev_priv(dev);
253 int interface;
254
255 for (interface = 0; interface < 2; interface++) {
256
257 if ((priv->port == interface * 16) && need_retrain[interface]) {
258
259 if (cvmx_spi_restart_interface
260 (interface, CVMX_SPI_MODE_DUPLEX, 10) == 0) {
261 need_retrain[interface] = 0;
262 cvm_oct_spi_enable_error_reporting(interface);
263 }
264 }
265
266 /*
267 * The SPI4000 TWSI interface is very slow. In order
268 * not to bring the system to a crawl, we only poll a
269 * single port every second. This means negotiation
270 * speed changes take up to 10 seconds, but at least
271 * we don't waste absurd amounts of time waiting for
272 * TWSI.
273 */
274 if (priv->port == spi4000_port) {
275 /*
276 * This function does nothing if it is called on an
277 * interface without a SPI4000.
278 */
279 cvmx_spi4000_check_speed(interface, priv->port);
280 /*
281 * Normal ordering increments. By decrementing
282 * we only match once per iteration.
283 */
284 spi4000_port--;
285 if (spi4000_port < 0)
286 spi4000_port = 10;
287 }
288 }
289}
290
291int cvm_oct_spi_init(struct net_device *dev)
292{
293 int r;
294 struct octeon_ethernet *priv = netdev_priv(dev);
295
296 if (number_spi_ports == 0) {
297 r = request_irq(OCTEON_IRQ_RML, cvm_oct_spi_rml_interrupt,
298 IRQF_SHARED, "SPI", &number_spi_ports);
299 }
300 number_spi_ports++;
301
302 if ((priv->port == 0) || (priv->port == 16)) {
303 cvm_oct_spi_enable_error_reporting(INTERFACE(priv->port));
304 priv->poll = cvm_oct_spi_poll;
305 }
306 cvm_oct_common_init(dev);
307 return 0;
308}
309
310void cvm_oct_spi_uninit(struct net_device *dev)
311{
312 int interface;
313
314 cvm_oct_common_uninit(dev);
315 number_spi_ports--;
316 if (number_spi_ports == 0) {
317 for (interface = 0; interface < 2; interface++) {
318 cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
319 cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
320 }
321 free_irq(8 + 46, &number_spi_ports);
322 }
323}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
new file mode 100644
index 000000000000..77b7122c8fdb
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -0,0 +1,634 @@
1/*********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26*********************************************************************/
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/netdevice.h>
30#include <linux/init.h>
31#include <linux/etherdevice.h>
32#include <linux/ip.h>
33#include <linux/string.h>
34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/seq_file.h>
37#include <linux/proc_fs.h>
38#include <net/dst.h>
39#ifdef CONFIG_XFRM
40#include <linux/xfrm.h>
41#include <net/xfrm.h>
42#endif /* CONFIG_XFRM */
43
44#include <asm/atomic.h>
45
46#include <asm/octeon/octeon.h>
47
48#include "ethernet-defines.h"
49#include "octeon-ethernet.h"
50#include "ethernet-util.h"
51
52#include "cvmx-wqe.h"
53#include "cvmx-fau.h"
54#include "cvmx-pko.h"
55#include "cvmx-helper.h"
56
57#include "cvmx-gmxx-defs.h"
58
59/*
60 * You can define GET_SKBUFF_QOS() to override how the skbuff output
61 * function determines which output queue is used. The default
62 * implementation always uses the base queue for the port. If, for
63 * example, you wanted to use the skb->priority fieid, define
64 * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
65 */
66#ifndef GET_SKBUFF_QOS
67#define GET_SKBUFF_QOS(skb) 0
68#endif
69
70/**
71 * Packet transmit
72 *
73 * @skb: Packet to send
74 * @dev: Device info structure
75 * Returns Always returns zero
76 */
77int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
78{
79 cvmx_pko_command_word0_t pko_command;
80 union cvmx_buf_ptr hw_buffer;
81 uint64_t old_scratch;
82 uint64_t old_scratch2;
83 int dropped;
84 int qos;
85 struct octeon_ethernet *priv = netdev_priv(dev);
86 int32_t in_use;
87 int32_t buffers_to_free;
88#if REUSE_SKBUFFS_WITHOUT_FREE
89 unsigned char *fpa_head;
90#endif
91
92 /*
93 * Prefetch the private data structure. It is larger that one
94 * cache line.
95 */
96 prefetch(priv);
97
98 /* Start off assuming no drop */
99 dropped = 0;
100
101 /*
102 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
103 * completely remove "qos" in the event neither interface
104 * supports multiple queues per port.
105 */
106 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
107 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
108 qos = GET_SKBUFF_QOS(skb);
109 if (qos <= 0)
110 qos = 0;
111 else if (qos >= cvmx_pko_get_num_queues(priv->port))
112 qos = 0;
113 } else
114 qos = 0;
115
116 if (USE_ASYNC_IOBDMA) {
117 /* Save scratch in case userspace is using it */
118 CVMX_SYNCIOBDMA;
119 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
120 old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
121
122 /*
123 * Assume we're going to be able t osend this
124 * packet. Fetch and increment the number of pending
125 * packets for output.
126 */
127 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
128 FAU_NUM_PACKET_BUFFERS_TO_FREE,
129 0);
130 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
131 priv->fau + qos * 4, 1);
132 }
133
134 /*
135 * The CN3XXX series of parts has an errata (GMX-401) which
136 * causes the GMX block to hang if a collision occurs towards
137 * the end of a <68 byte packet. As a workaround for this, we
138 * pad packets to be 68 bytes whenever we are in half duplex
139 * mode. We don't handle the case of having a small packet but
140 * no room to add the padding. The kernel should always give
141 * us at least a cache line
142 */
143 if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
144 union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
145 int interface = INTERFACE(priv->port);
146 int index = INDEX(priv->port);
147
148 if (interface < 2) {
149 /* We only need to pad packet in half duplex mode */
150 gmx_prt_cfg.u64 =
151 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
152 if (gmx_prt_cfg.s.duplex == 0) {
153 int add_bytes = 64 - skb->len;
154 if ((skb_tail_pointer(skb) + add_bytes) <=
155 skb_end_pointer(skb))
156 memset(__skb_put(skb, add_bytes), 0,
157 add_bytes);
158 }
159 }
160 }
161
162 /* Build the PKO buffer pointer */
163 hw_buffer.u64 = 0;
164 hw_buffer.s.addr = cvmx_ptr_to_phys(skb->data);
165 hw_buffer.s.pool = 0;
166 hw_buffer.s.size =
167 (unsigned long)skb_end_pointer(skb) - (unsigned long)skb->head;
168
169 /* Build the PKO command */
170 pko_command.u64 = 0;
171 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
172 pko_command.s.segs = 1;
173 pko_command.s.total_bytes = skb->len;
174 pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
175 pko_command.s.subone0 = 1;
176
177 pko_command.s.dontfree = 1;
178 pko_command.s.reg0 = priv->fau + qos * 4;
179 /*
180 * See if we can put this skb in the FPA pool. Any strange
181 * behavior from the Linux networking stack will most likely
182 * be caused by a bug in the following code. If some field is
183 * in use by the network stack and get carried over when a
184 * buffer is reused, bad thing may happen. If in doubt and
185 * you dont need the absolute best performance, disable the
186 * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
187 * shown a 25% increase in performance under some loads.
188 */
189#if REUSE_SKBUFFS_WITHOUT_FREE
190 fpa_head = skb->head + 128 - ((unsigned long)skb->head & 0x7f);
191 if (unlikely(skb->data < fpa_head)) {
192 /*
193 * printk("TX buffer beginning can't meet FPA
194 * alignment constraints\n");
195 */
196 goto dont_put_skbuff_in_hw;
197 }
198 if (unlikely
199 ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
200 /*
201 printk("TX buffer isn't large enough for the FPA\n");
202 */
203 goto dont_put_skbuff_in_hw;
204 }
205 if (unlikely(skb_shared(skb))) {
206 /*
207 printk("TX buffer sharing data with someone else\n");
208 */
209 goto dont_put_skbuff_in_hw;
210 }
211 if (unlikely(skb_cloned(skb))) {
212 /*
213 printk("TX buffer has been cloned\n");
214 */
215 goto dont_put_skbuff_in_hw;
216 }
217 if (unlikely(skb_header_cloned(skb))) {
218 /*
219 printk("TX buffer header has been cloned\n");
220 */
221 goto dont_put_skbuff_in_hw;
222 }
223 if (unlikely(skb->destructor)) {
224 /*
225 printk("TX buffer has a destructor\n");
226 */
227 goto dont_put_skbuff_in_hw;
228 }
229 if (unlikely(skb_shinfo(skb)->nr_frags)) {
230 /*
231 printk("TX buffer has fragments\n");
232 */
233 goto dont_put_skbuff_in_hw;
234 }
235 if (unlikely
236 (skb->truesize !=
237 sizeof(*skb) + skb_end_pointer(skb) - skb->head)) {
238 /*
239 printk("TX buffer truesize has been changed\n");
240 */
241 goto dont_put_skbuff_in_hw;
242 }
243
244 /*
245 * We can use this buffer in the FPA. We don't need the FAU
246 * update anymore
247 */
248 pko_command.s.reg0 = 0;
249 pko_command.s.dontfree = 0;
250
251 hw_buffer.s.back = (skb->data - fpa_head) >> 7;
252 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
253
254 /*
255 * The skbuff will be reused without ever being freed. We must
256 * cleanup a bunch of Linux stuff.
257 */
258 dst_release(skb->dst);
259 skb->dst = NULL;
260#ifdef CONFIG_XFRM
261 secpath_put(skb->sp);
262 skb->sp = NULL;
263#endif
264 nf_reset(skb);
265
266#ifdef CONFIG_NET_SCHED
267 skb->tc_index = 0;
268#ifdef CONFIG_NET_CLS_ACT
269 skb->tc_verd = 0;
270#endif /* CONFIG_NET_CLS_ACT */
271#endif /* CONFIG_NET_SCHED */
272
273dont_put_skbuff_in_hw:
274#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
275
276 /* Check if we can use the hardware checksumming */
277 if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
278 (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
279 ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
280 && ((ip_hdr(skb)->protocol == IP_PROTOCOL_TCP)
281 || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP))) {
282 /* Use hardware checksum calc */
283 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
284 }
285
286 if (USE_ASYNC_IOBDMA) {
287 /* Get the number of skbuffs in use by the hardware */
288 CVMX_SYNCIOBDMA;
289 in_use = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
290 buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
291 } else {
292 /* Get the number of skbuffs in use by the hardware */
293 in_use = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 1);
294 buffers_to_free =
295 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
296 }
297
298 /*
299 * If we're sending faster than the receive can free them then
300 * don't do the HW free.
301 */
302 if ((buffers_to_free < -100) && !pko_command.s.dontfree) {
303 pko_command.s.dontfree = 1;
304 pko_command.s.reg0 = priv->fau + qos * 4;
305 }
306
307 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
308 CVMX_PKO_LOCK_CMD_QUEUE);
309
310 /* Drop this packet if we have too many already queued to the HW */
311 if (unlikely
312 (skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
313 /*
314 DEBUGPRINT("%s: Tx dropped. Too many queued\n", dev->name);
315 */
316 dropped = 1;
317 }
318 /* Send the packet to the output queue */
319 else if (unlikely
320 (cvmx_pko_send_packet_finish
321 (priv->port, priv->queue + qos, pko_command, hw_buffer,
322 CVMX_PKO_LOCK_CMD_QUEUE))) {
323 DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
324 dropped = 1;
325 }
326
327 if (USE_ASYNC_IOBDMA) {
328 /* Restore the scratch area */
329 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
330 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
331 }
332
333 if (unlikely(dropped)) {
334 dev_kfree_skb_any(skb);
335 cvmx_fau_atomic_add32(priv->fau + qos * 4, -1);
336 priv->stats.tx_dropped++;
337 } else {
338 if (USE_SKBUFFS_IN_HW) {
339 /* Put this packet on the queue to be freed later */
340 if (pko_command.s.dontfree)
341 skb_queue_tail(&priv->tx_free_list[qos], skb);
342 else {
343 cvmx_fau_atomic_add32
344 (FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
345 cvmx_fau_atomic_add32(priv->fau + qos * 4, -1);
346 }
347 } else {
348 /* Put this packet on the queue to be freed later */
349 skb_queue_tail(&priv->tx_free_list[qos], skb);
350 }
351 }
352
353 /* Free skbuffs not in use by the hardware, possibly two at a time */
354 if (skb_queue_len(&priv->tx_free_list[qos]) > in_use) {
355 spin_lock(&priv->tx_free_list[qos].lock);
356 /*
357 * Check again now that we have the lock. It might
358 * have changed.
359 */
360 if (skb_queue_len(&priv->tx_free_list[qos]) > in_use)
361 dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
362 if (skb_queue_len(&priv->tx_free_list[qos]) > in_use)
363 dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
364 spin_unlock(&priv->tx_free_list[qos].lock);
365 }
366
367 return 0;
368}
369
370/**
371 * Packet transmit to the POW
372 *
373 * @skb: Packet to send
374 * @dev: Device info structure
375 * Returns Always returns zero
376 */
377int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
378{
379 struct octeon_ethernet *priv = netdev_priv(dev);
380 void *packet_buffer;
381 void *copy_location;
382
383 /* Get a work queue entry */
384 cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
385 if (unlikely(work == NULL)) {
386 DEBUGPRINT("%s: Failed to allocate a work queue entry\n",
387 dev->name);
388 priv->stats.tx_dropped++;
389 dev_kfree_skb(skb);
390 return 0;
391 }
392
393 /* Get a packet buffer */
394 packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
395 if (unlikely(packet_buffer == NULL)) {
396 DEBUGPRINT("%s: Failed to allocate a packet buffer\n",
397 dev->name);
398 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
399 priv->stats.tx_dropped++;
400 dev_kfree_skb(skb);
401 return 0;
402 }
403
404 /*
405 * Calculate where we need to copy the data to. We need to
406 * leave 8 bytes for a next pointer (unused). We also need to
407 * include any configure skip. Then we need to align the IP
408 * packet src and dest into the same 64bit word. The below
409 * calculation may add a little extra, but that doesn't
410 * hurt.
411 */
412 copy_location = packet_buffer + sizeof(uint64_t);
413 copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
414
415 /*
416 * We have to copy the packet since whoever processes this
417 * packet will free it to a hardware pool. We can't use the
418 * trick of counting outstanding packets like in
419 * cvm_oct_xmit.
420 */
421 memcpy(copy_location, skb->data, skb->len);
422
423 /*
424 * Fill in some of the work queue fields. We may need to add
425 * more if the software at the other end needs them.
426 */
427 work->hw_chksum = skb->csum;
428 work->len = skb->len;
429 work->ipprt = priv->port;
430 work->qos = priv->port & 0x7;
431 work->grp = pow_send_group;
432 work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
433 work->tag = pow_send_group; /* FIXME */
434 /* Default to zero. Sets of zero later are commented out */
435 work->word2.u64 = 0;
436 work->word2.s.bufs = 1;
437 work->packet_ptr.u64 = 0;
438 work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
439 work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
440 work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
441 work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
442
443 if (skb->protocol == htons(ETH_P_IP)) {
444 work->word2.s.ip_offset = 14;
445#if 0
446 work->word2.s.vlan_valid = 0; /* FIXME */
447 work->word2.s.vlan_cfi = 0; /* FIXME */
448 work->word2.s.vlan_id = 0; /* FIXME */
449 work->word2.s.dec_ipcomp = 0; /* FIXME */
450#endif
451 work->word2.s.tcp_or_udp =
452 (ip_hdr(skb)->protocol == IP_PROTOCOL_TCP)
453 || (ip_hdr(skb)->protocol == IP_PROTOCOL_UDP);
454#if 0
455 /* FIXME */
456 work->word2.s.dec_ipsec = 0;
457 /* We only support IPv4 right now */
458 work->word2.s.is_v6 = 0;
459 /* Hardware would set to zero */
460 work->word2.s.software = 0;
461 /* No error, packet is internal */
462 work->word2.s.L4_error = 0;
463#endif
464 work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0)
465 || (ip_hdr(skb)->frag_off ==
466 1 << 14));
467#if 0
468 /* Assume Linux is sending a good packet */
469 work->word2.s.IP_exc = 0;
470#endif
471 work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
472 work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
473#if 0
474 /* This is an IP packet */
475 work->word2.s.not_IP = 0;
476 /* No error, packet is internal */
477 work->word2.s.rcv_error = 0;
478 /* No error, packet is internal */
479 work->word2.s.err_code = 0;
480#endif
481
482 /*
483 * When copying the data, include 4 bytes of the
484 * ethernet header to align the same way hardware
485 * does.
486 */
487 memcpy(work->packet_data, skb->data + 10,
488 sizeof(work->packet_data));
489 } else {
490#if 0
491 work->word2.snoip.vlan_valid = 0; /* FIXME */
492 work->word2.snoip.vlan_cfi = 0; /* FIXME */
493 work->word2.snoip.vlan_id = 0; /* FIXME */
494 work->word2.snoip.software = 0; /* Hardware would set to zero */
495#endif
496 work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
497 work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
498 work->word2.snoip.is_bcast =
499 (skb->pkt_type == PACKET_BROADCAST);
500 work->word2.snoip.is_mcast =
501 (skb->pkt_type == PACKET_MULTICAST);
502 work->word2.snoip.not_IP = 1; /* IP was done up above */
503#if 0
504 /* No error, packet is internal */
505 work->word2.snoip.rcv_error = 0;
506 /* No error, packet is internal */
507 work->word2.snoip.err_code = 0;
508#endif
509 memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
510 }
511
512 /* Submit the packet to the POW */
513 cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos,
514 work->grp);
515 priv->stats.tx_packets++;
516 priv->stats.tx_bytes += skb->len;
517 dev_kfree_skb(skb);
518 return 0;
519}
520
521/**
522 * Transmit a work queue entry out of the ethernet port. Both
523 * the work queue entry and the packet data can optionally be
524 * freed. The work will be freed on error as well.
525 *
526 * @dev: Device to transmit out.
527 * @work_queue_entry:
528 * Work queue entry to send
529 * @do_free: True if the work queue entry and packet data should be
530 * freed. If false, neither will be freed.
531 * @qos: Index into the queues for this port to transmit on. This
532 * is used to implement QoS if their are multiple queues per
533 * port. This parameter must be between 0 and the number of
534 * queues per port minus 1. Values outside of this range will
535 * be change to zero.
536 *
537 * Returns Zero on success, negative on failure.
538 */
539int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
540 int do_free, int qos)
541{
542 unsigned long flags;
543 union cvmx_buf_ptr hw_buffer;
544 cvmx_pko_command_word0_t pko_command;
545 int dropped;
546 struct octeon_ethernet *priv = netdev_priv(dev);
547 cvmx_wqe_t *work = work_queue_entry;
548
549 if (!(dev->flags & IFF_UP)) {
550 DEBUGPRINT("%s: Device not up\n", dev->name);
551 if (do_free)
552 cvm_oct_free_work(work);
553 return -1;
554 }
555
556 /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely
557 remove "qos" in the event neither interface supports
558 multiple queues per port */
559 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
560 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
561 if (qos <= 0)
562 qos = 0;
563 else if (qos >= cvmx_pko_get_num_queues(priv->port))
564 qos = 0;
565 } else
566 qos = 0;
567
568 /* Start off assuming no drop */
569 dropped = 0;
570
571 local_irq_save(flags);
572 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
573 CVMX_PKO_LOCK_CMD_QUEUE);
574
575 /* Build the PKO buffer pointer */
576 hw_buffer.u64 = 0;
577 hw_buffer.s.addr = work->packet_ptr.s.addr;
578 hw_buffer.s.pool = CVMX_FPA_PACKET_POOL;
579 hw_buffer.s.size = CVMX_FPA_PACKET_POOL_SIZE;
580 hw_buffer.s.back = work->packet_ptr.s.back;
581
582 /* Build the PKO command */
583 pko_command.u64 = 0;
584 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
585 pko_command.s.dontfree = !do_free;
586 pko_command.s.segs = work->word2.s.bufs;
587 pko_command.s.total_bytes = work->len;
588
589 /* Check if we can use the hardware checksumming */
590 if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc))
591 pko_command.s.ipoffp1 = 0;
592 else
593 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
594
595 /* Send the packet to the output queue */
596 if (unlikely
597 (cvmx_pko_send_packet_finish
598 (priv->port, priv->queue + qos, pko_command, hw_buffer,
599 CVMX_PKO_LOCK_CMD_QUEUE))) {
600 DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
601 dropped = -1;
602 }
603 local_irq_restore(flags);
604
605 if (unlikely(dropped)) {
606 if (do_free)
607 cvm_oct_free_work(work);
608 priv->stats.tx_dropped++;
609 } else if (do_free)
610 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
611
612 return dropped;
613}
614EXPORT_SYMBOL(cvm_oct_transmit_qos);
615
616/**
617 * This function frees all skb that are currenty queued for TX.
618 *
619 * @dev: Device being shutdown
620 */
621void cvm_oct_tx_shutdown(struct net_device *dev)
622{
623 struct octeon_ethernet *priv = netdev_priv(dev);
624 unsigned long flags;
625 int qos;
626
627 for (qos = 0; qos < 16; qos++) {
628 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
629 while (skb_queue_len(&priv->tx_free_list[qos]))
630 dev_kfree_skb_any(__skb_dequeue
631 (&priv->tx_free_list[qos]));
632 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
633 }
634}
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
new file mode 100644
index 000000000000..5106236fe981
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -0,0 +1,32 @@
1/*********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26*********************************************************************/
27
28int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
29int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
30int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
31 int do_free, int qos);
32void cvm_oct_tx_shutdown(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
new file mode 100644
index 000000000000..37b665918000
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -0,0 +1,81 @@
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26*********************************************************************/
27
28#define DEBUGPRINT(format, ...) do { if (printk_ratelimit()) \
29 printk(format, ##__VA_ARGS__); \
30 } while (0)
31
32/**
33 * Given a packet data address, return a pointer to the
34 * beginning of the packet buffer.
35 *
36 * @packet_ptr: Packet data hardware address
37 * Returns Packet buffer pointer
38 */
39static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
40{
41 return cvmx_phys_to_ptr(((packet_ptr.s.addr >> 7) - packet_ptr.s.back)
42 << 7);
43}
44
45/**
46 * Given an IPD/PKO port number, return the logical interface it is
47 * on.
48 *
49 * @ipd_port: Port to check
50 *
51 * Returns Logical interface
52 */
53static inline int INTERFACE(int ipd_port)
54{
55 if (ipd_port < 32) /* Interface 0 or 1 for RGMII,GMII,SPI, etc */
56 return ipd_port >> 4;
57 else if (ipd_port < 36) /* Interface 2 for NPI */
58 return 2;
59 else if (ipd_port < 40) /* Interface 3 for loopback */
60 return 3;
61 else if (ipd_port == 40) /* Non existant interface for POW0 */
62 return 4;
63 else
64 panic("Illegal ipd_port %d passed to INTERFACE\n", ipd_port);
65}
66
67/**
68 * Given an IPD/PKO port number, return the port's index on a
69 * logical interface.
70 *
71 * @ipd_port: Port to check
72 *
73 * Returns Index into interface port list
74 */
75static inline int INDEX(int ipd_port)
76{
77 if (ipd_port < 32)
78 return ipd_port & 15;
79 else
80 return ipd_port & 3;
81}
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
new file mode 100644
index 000000000000..f08eb32e04fc
--- /dev/null
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -0,0 +1,127 @@
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/kernel.h>
28#include <linux/netdevice.h>
29#include <linux/mii.h>
30#include <net/dst.h>
31
32#include <asm/octeon/octeon.h>
33
34#include "ethernet-defines.h"
35#include "octeon-ethernet.h"
36#include "ethernet-common.h"
37#include "ethernet-util.h"
38
39#include "cvmx-helper.h"
40
41#include "cvmx-gmxx-defs.h"
42
43static int cvm_oct_xaui_open(struct net_device *dev)
44{
45 union cvmx_gmxx_prtx_cfg gmx_cfg;
46 struct octeon_ethernet *priv = netdev_priv(dev);
47 int interface = INTERFACE(priv->port);
48 int index = INDEX(priv->port);
49 cvmx_helper_link_info_t link_info;
50
51 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
52 gmx_cfg.s.en = 1;
53 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
54
55 if (!octeon_is_simulation()) {
56 link_info = cvmx_helper_link_get(priv->port);
57 if (!link_info.s.link_up)
58 netif_carrier_off(dev);
59 }
60 return 0;
61}
62
63static int cvm_oct_xaui_stop(struct net_device *dev)
64{
65 union cvmx_gmxx_prtx_cfg gmx_cfg;
66 struct octeon_ethernet *priv = netdev_priv(dev);
67 int interface = INTERFACE(priv->port);
68 int index = INDEX(priv->port);
69
70 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
71 gmx_cfg.s.en = 0;
72 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
73 return 0;
74}
75
76static void cvm_oct_xaui_poll(struct net_device *dev)
77{
78 struct octeon_ethernet *priv = netdev_priv(dev);
79 cvmx_helper_link_info_t link_info;
80
81 link_info = cvmx_helper_link_get(priv->port);
82 if (link_info.u64 == priv->link_info)
83 return;
84
85 link_info = cvmx_helper_link_autoconf(priv->port);
86 priv->link_info = link_info.u64;
87
88 /* Tell Linux */
89 if (link_info.s.link_up) {
90
91 if (!netif_carrier_ok(dev))
92 netif_carrier_on(dev);
93 if (priv->queue != -1)
94 DEBUGPRINT
95 ("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
96 dev->name, link_info.s.speed,
97 (link_info.s.full_duplex) ? "Full" : "Half",
98 priv->port, priv->queue);
99 else
100 DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n",
101 dev->name, link_info.s.speed,
102 (link_info.s.full_duplex) ? "Full" : "Half",
103 priv->port);
104 } else {
105 if (netif_carrier_ok(dev))
106 netif_carrier_off(dev);
107 DEBUGPRINT("%s: Link down\n", dev->name);
108 }
109}
110
111int cvm_oct_xaui_init(struct net_device *dev)
112{
113 struct octeon_ethernet *priv = netdev_priv(dev);
114 cvm_oct_common_init(dev);
115 dev->open = cvm_oct_xaui_open;
116 dev->stop = cvm_oct_xaui_stop;
117 dev->stop(dev);
118 if (!octeon_is_simulation())
119 priv->poll = cvm_oct_xaui_poll;
120
121 return 0;
122}
123
124void cvm_oct_xaui_uninit(struct net_device *dev)
125{
126 cvm_oct_common_uninit(dev);
127}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
new file mode 100644
index 000000000000..e8ef9e0b791f
--- /dev/null
+++ b/drivers/staging/octeon/ethernet.c
@@ -0,0 +1,507 @@
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/kernel.h>
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/delay.h>
33#include <linux/mii.h>
34
35#include <net/dst.h>
36
37#include <asm/octeon/octeon.h>
38
39#include "ethernet-defines.h"
40#include "ethernet-mem.h"
41#include "ethernet-rx.h"
42#include "ethernet-tx.h"
43#include "ethernet-util.h"
44#include "ethernet-proc.h"
45#include "ethernet-common.h"
46#include "octeon-ethernet.h"
47
48#include "cvmx-pip.h"
49#include "cvmx-pko.h"
50#include "cvmx-fau.h"
51#include "cvmx-ipd.h"
52#include "cvmx-helper.h"
53
54#include "cvmx-smix-defs.h"
55
56#if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
57 && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
58int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS;
59#else
60int num_packet_buffers = 1024;
61#endif
62module_param(num_packet_buffers, int, 0444);
63MODULE_PARM_DESC(num_packet_buffers, "\n"
64 "\tNumber of packet buffers to allocate and store in the\n"
65 "\tFPA. By default, 1024 packet buffers are used unless\n"
66 "\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined.");
67
68int pow_receive_group = 15;
69module_param(pow_receive_group, int, 0444);
70MODULE_PARM_DESC(pow_receive_group, "\n"
71 "\tPOW group to receive packets from. All ethernet hardware\n"
72 "\twill be configured to send incomming packets to this POW\n"
73 "\tgroup. Also any other software can submit packets to this\n"
74 "\tgroup for the kernel to process.");
75
76int pow_send_group = -1;
77module_param(pow_send_group, int, 0644);
78MODULE_PARM_DESC(pow_send_group, "\n"
79 "\tPOW group to send packets to other software on. This\n"
80 "\tcontrols the creation of the virtual device pow0.\n"
81 "\talways_use_pow also depends on this value.");
82
83int always_use_pow;
84module_param(always_use_pow, int, 0444);
85MODULE_PARM_DESC(always_use_pow, "\n"
86 "\tWhen set, always send to the pow group. This will cause\n"
87 "\tpackets sent to real ethernet devices to be sent to the\n"
88 "\tPOW group instead of the hardware. Unless some other\n"
89 "\tapplication changes the config, packets will still be\n"
90 "\treceived from the low level hardware. Use this option\n"
91 "\tto allow a CVMX app to intercept all packets from the\n"
92 "\tlinux kernel. You must specify pow_send_group along with\n"
93 "\tthis option.");
94
95char pow_send_list[128] = "";
96module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
97MODULE_PARM_DESC(pow_send_list, "\n"
98 "\tComma separated list of ethernet devices that should use the\n"
99 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
100 "\tis a per port version of always_use_pow. always_use_pow takes\n"
101 "\tprecedence over this list. For example, setting this to\n"
102 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
103 "\tusing the pow_send_group.");
104
105static int disable_core_queueing = 1;
106module_param(disable_core_queueing, int, 0444);
107MODULE_PARM_DESC(disable_core_queueing, "\n"
108 "\tWhen set the networking core's tx_queue_len is set to zero. This\n"
109 "\tallows packets to be sent without lock contention in the packet\n"
110 "\tscheduler resulting in some cases in improved throughput.\n");
111
112/**
113 * Periodic timer to check auto negotiation
114 */
115static struct timer_list cvm_oct_poll_timer;
116
117/**
118 * Array of every ethernet device owned by this driver indexed by
119 * the ipd input port number.
120 */
121struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
122
123extern struct semaphore mdio_sem;
124
125/**
126 * Periodic timer tick for slow management operations
127 *
128 * @arg: Device to check
129 */
130static void cvm_do_timer(unsigned long arg)
131{
132 static int port;
133 if (port < CVMX_PIP_NUM_INPUT_PORTS) {
134 if (cvm_oct_device[port]) {
135 int queues_per_port;
136 int qos;
137 struct octeon_ethernet *priv =
138 netdev_priv(cvm_oct_device[port]);
139 if (priv->poll) {
140 /* skip polling if we don't get the lock */
141 if (!down_trylock(&mdio_sem)) {
142 priv->poll(cvm_oct_device[port]);
143 up(&mdio_sem);
144 }
145 }
146
147 queues_per_port = cvmx_pko_get_num_queues(port);
148 /* Drain any pending packets in the free list */
149 for (qos = 0; qos < queues_per_port; qos++) {
150 if (skb_queue_len(&priv->tx_free_list[qos])) {
151 spin_lock(&priv->tx_free_list[qos].
152 lock);
153 while (skb_queue_len
154 (&priv->tx_free_list[qos]) >
155 cvmx_fau_fetch_and_add32(priv->
156 fau +
157 qos * 4,
158 0))
159 dev_kfree_skb(__skb_dequeue
160 (&priv->
161 tx_free_list
162 [qos]));
163 spin_unlock(&priv->tx_free_list[qos].
164 lock);
165 }
166 }
167 cvm_oct_device[port]->get_stats(cvm_oct_device[port]);
168 }
169 port++;
170 /* Poll the next port in a 50th of a second.
171 This spreads the polling of ports out a little bit */
172 mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
173 } else {
174 port = 0;
175 /* All ports have been polled. Start the next iteration through
176 the ports in one second */
177 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
178 }
179}
180
181/**
182 * Configure common hardware for all interfaces
183 */
184static __init void cvm_oct_configure_common_hw(void)
185{
186 int r;
187 /* Setup the FPA */
188 cvmx_fpa_enable();
189 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
190 num_packet_buffers);
191 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
192 num_packet_buffers);
193 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
194 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
195 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
196
197 if (USE_RED)
198 cvmx_helper_setup_red(num_packet_buffers / 4,
199 num_packet_buffers / 8);
200
201 /* Enable the MII interface */
202 if (!octeon_is_simulation())
203 cvmx_write_csr(CVMX_SMIX_EN(0), 1);
204
205 /* Register an IRQ hander for to receive POW interrupts */
206 r = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
207 cvm_oct_do_interrupt, IRQF_SHARED, "Ethernet",
208 cvm_oct_device);
209
210#if defined(CONFIG_SMP) && 0
211 if (USE_MULTICORE_RECEIVE) {
212 irq_set_affinity(OCTEON_IRQ_WORKQ0 + pow_receive_group,
213 cpu_online_mask);
214 }
215#endif
216}
217
218/**
219 * Free a work queue entry received in a intercept callback.
220 *
221 * @work_queue_entry:
222 * Work queue entry to free
223 * Returns Zero on success, Negative on failure.
224 */
225int cvm_oct_free_work(void *work_queue_entry)
226{
227 cvmx_wqe_t *work = work_queue_entry;
228
229 int segments = work->word2.s.bufs;
230 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
231
232 while (segments--) {
233 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
234 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
235 if (unlikely(!segment_ptr.s.i))
236 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
237 segment_ptr.s.pool,
238 DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE /
239 128));
240 segment_ptr = next_ptr;
241 }
242 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
243
244 return 0;
245}
246EXPORT_SYMBOL(cvm_oct_free_work);
247
248/**
249 * Module/ driver initialization. Creates the linux network
250 * devices.
251 *
252 * Returns Zero on success
253 */
254static int __init cvm_oct_init_module(void)
255{
256 int num_interfaces;
257 int interface;
258 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
259 int qos;
260
261 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
262
263 cvm_oct_proc_initialize();
264 cvm_oct_rx_initialize();
265 cvm_oct_configure_common_hw();
266
267 cvmx_helper_initialize_packet_io_global();
268
269 /* Change the input group for all ports before input is enabled */
270 num_interfaces = cvmx_helper_get_number_of_interfaces();
271 for (interface = 0; interface < num_interfaces; interface++) {
272 int num_ports = cvmx_helper_ports_on_interface(interface);
273 int port;
274
275 for (port = cvmx_helper_get_ipd_port(interface, 0);
276 port < cvmx_helper_get_ipd_port(interface, num_ports);
277 port++) {
278 union cvmx_pip_prt_tagx pip_prt_tagx;
279 pip_prt_tagx.u64 =
280 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
281 pip_prt_tagx.s.grp = pow_receive_group;
282 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
283 pip_prt_tagx.u64);
284 }
285 }
286
287 cvmx_helper_ipd_and_packet_input_enable();
288
289 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
290
291 /*
292 * Initialize the FAU used for counting packet buffers that
293 * need to be freed.
294 */
295 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
296
297 if ((pow_send_group != -1)) {
298 struct net_device *dev;
299 pr_info("\tConfiguring device for POW only access\n");
300 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
301 if (dev) {
302 /* Initialize the device private structure. */
303 struct octeon_ethernet *priv = netdev_priv(dev);
304 memset(priv, 0, sizeof(struct octeon_ethernet));
305
306 dev->init = cvm_oct_common_init;
307 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
308 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
309 priv->queue = -1;
310 strcpy(dev->name, "pow%d");
311 for (qos = 0; qos < 16; qos++)
312 skb_queue_head_init(&priv->tx_free_list[qos]);
313
314 if (register_netdev(dev) < 0) {
315 pr_err("Failed to register ethernet "
316 "device for POW\n");
317 kfree(dev);
318 } else {
319 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
320 pr_info("%s: POW send group %d, receive "
321 "group %d\n",
322 dev->name, pow_send_group,
323 pow_receive_group);
324 }
325 } else {
326 pr_err("Failed to allocate ethernet device "
327 "for POW\n");
328 }
329 }
330
331 num_interfaces = cvmx_helper_get_number_of_interfaces();
332 for (interface = 0; interface < num_interfaces; interface++) {
333 cvmx_helper_interface_mode_t imode =
334 cvmx_helper_interface_get_mode(interface);
335 int num_ports = cvmx_helper_ports_on_interface(interface);
336 int port;
337
338 for (port = cvmx_helper_get_ipd_port(interface, 0);
339 port < cvmx_helper_get_ipd_port(interface, num_ports);
340 port++) {
341 struct octeon_ethernet *priv;
342 struct net_device *dev =
343 alloc_etherdev(sizeof(struct octeon_ethernet));
344 if (!dev) {
345 pr_err("Failed to allocate ethernet device "
346 "for port %d\n", port);
347 continue;
348 }
349 if (disable_core_queueing)
350 dev->tx_queue_len = 0;
351
352 /* Initialize the device private structure. */
353 priv = netdev_priv(dev);
354 memset(priv, 0, sizeof(struct octeon_ethernet));
355
356 priv->imode = imode;
357 priv->port = port;
358 priv->queue = cvmx_pko_get_base_queue(priv->port);
359 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
360 for (qos = 0; qos < 16; qos++)
361 skb_queue_head_init(&priv->tx_free_list[qos]);
362 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
363 qos++)
364 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
365
366 switch (priv->imode) {
367
368 /* These types don't support ports to IPD/PKO */
369 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
370 case CVMX_HELPER_INTERFACE_MODE_PCIE:
371 case CVMX_HELPER_INTERFACE_MODE_PICMG:
372 break;
373
374 case CVMX_HELPER_INTERFACE_MODE_NPI:
375 dev->init = cvm_oct_common_init;
376 dev->uninit = cvm_oct_common_uninit;
377 strcpy(dev->name, "npi%d");
378 break;
379
380 case CVMX_HELPER_INTERFACE_MODE_XAUI:
381 dev->init = cvm_oct_xaui_init;
382 dev->uninit = cvm_oct_xaui_uninit;
383 strcpy(dev->name, "xaui%d");
384 break;
385
386 case CVMX_HELPER_INTERFACE_MODE_LOOP:
387 dev->init = cvm_oct_common_init;
388 dev->uninit = cvm_oct_common_uninit;
389 strcpy(dev->name, "loop%d");
390 break;
391
392 case CVMX_HELPER_INTERFACE_MODE_SGMII:
393 dev->init = cvm_oct_sgmii_init;
394 dev->uninit = cvm_oct_sgmii_uninit;
395 strcpy(dev->name, "eth%d");
396 break;
397
398 case CVMX_HELPER_INTERFACE_MODE_SPI:
399 dev->init = cvm_oct_spi_init;
400 dev->uninit = cvm_oct_spi_uninit;
401 strcpy(dev->name, "spi%d");
402 break;
403
404 case CVMX_HELPER_INTERFACE_MODE_RGMII:
405 case CVMX_HELPER_INTERFACE_MODE_GMII:
406 dev->init = cvm_oct_rgmii_init;
407 dev->uninit = cvm_oct_rgmii_uninit;
408 strcpy(dev->name, "eth%d");
409 break;
410 }
411
412 if (!dev->init) {
413 kfree(dev);
414 } else if (register_netdev(dev) < 0) {
415 pr_err("Failed to register ethernet device "
416 "for interface %d, port %d\n",
417 interface, priv->port);
418 kfree(dev);
419 } else {
420 cvm_oct_device[priv->port] = dev;
421 fau -=
422 cvmx_pko_get_num_queues(priv->port) *
423 sizeof(uint32_t);
424 }
425 }
426 }
427
428 if (INTERRUPT_LIMIT) {
429 /*
430 * Set the POW timer rate to give an interrupt at most
431 * INTERRUPT_LIMIT times per second.
432 */
433 cvmx_write_csr(CVMX_POW_WQ_INT_PC,
434 octeon_bootinfo->eclock_hz / (INTERRUPT_LIMIT *
435 16 * 256) << 8);
436
437 /*
438 * Enable POW timer interrupt. It will count when
439 * there are packets available.
440 */
441 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
442 0x1ful << 24);
443 } else {
444 /* Enable POW interrupt when our port has at least one packet */
445 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
446 }
447
448 /* Enable the poll timer for checking RGMII status */
449 init_timer(&cvm_oct_poll_timer);
450 cvm_oct_poll_timer.data = 0;
451 cvm_oct_poll_timer.function = cvm_do_timer;
452 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
453
454 return 0;
455}
456
457/**
458 * Module / driver shutdown
459 *
460 * Returns Zero on success
461 */
462static void __exit cvm_oct_cleanup_module(void)
463{
464 int port;
465
466 /* Disable POW interrupt */
467 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
468
469 cvmx_ipd_disable();
470
471 /* Free the interrupt handler */
472 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
473
474 del_timer(&cvm_oct_poll_timer);
475 cvm_oct_rx_shutdown();
476 cvmx_pko_disable();
477
478 /* Free the ethernet devices */
479 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
480 if (cvm_oct_device[port]) {
481 cvm_oct_tx_shutdown(cvm_oct_device[port]);
482 unregister_netdev(cvm_oct_device[port]);
483 kfree(cvm_oct_device[port]);
484 cvm_oct_device[port] = NULL;
485 }
486 }
487
488 cvmx_pko_shutdown();
489 cvm_oct_proc_shutdown();
490
491 cvmx_ipd_free_ptr();
492
493 /* Free the HW pools */
494 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
495 num_packet_buffers);
496 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
497 num_packet_buffers);
498 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
499 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
500 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
501}
502
503MODULE_LICENSE("GPL");
504MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
505MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
506module_init(cvm_oct_init_module);
507module_exit(cvm_oct_cleanup_module);
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
new file mode 100644
index 000000000000..b3199076ef5e
--- /dev/null
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -0,0 +1,127 @@
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27
28/*
29 * External interface for the Cavium Octeon ethernet driver.
30 */
31#ifndef OCTEON_ETHERNET_H
32#define OCTEON_ETHERNET_H
33
34/**
35 * This is the definition of the Ethernet driver's private
36 * driver state stored in netdev_priv(dev).
37 */
38struct octeon_ethernet {
39 /* PKO hardware output port */
40 int port;
41 /* PKO hardware queue for the port */
42 int queue;
43 /* Hardware fetch and add to count outstanding tx buffers */
44 int fau;
45 /*
46 * Type of port. This is one of the enums in
47 * cvmx_helper_interface_mode_t
48 */
49 int imode;
50 /* List of outstanding tx buffers per queue */
51 struct sk_buff_head tx_free_list[16];
52 /* Device statistics */
53 struct net_device_stats stats
54; /* Generic MII info structure */
55 struct mii_if_info mii_info;
56 /* Last negotiated link state */
57 uint64_t link_info;
58 /* Called periodically to check link status */
59 void (*poll) (struct net_device *dev);
60};
61
62/**
63 * Free a work queue entry received in a intercept callback.
64 *
65 * @work_queue_entry:
66 * Work queue entry to free
67 * Returns Zero on success, Negative on failure.
68 */
69int cvm_oct_free_work(void *work_queue_entry);
70
71/**
72 * Transmit a work queue entry out of the ethernet port. Both
73 * the work queue entry and the packet data can optionally be
74 * freed. The work will be freed on error as well.
75 *
76 * @dev: Device to transmit out.
77 * @work_queue_entry:
78 * Work queue entry to send
79 * @do_free: True if the work queue entry and packet data should be
80 * freed. If false, neither will be freed.
81 * @qos: Index into the queues for this port to transmit on. This
82 * is used to implement QoS if their are multiple queues per
83 * port. This parameter must be between 0 and the number of
84 * queues per port minus 1. Values outside of this range will
85 * be change to zero.
86 *
87 * Returns Zero on success, negative on failure.
88 */
89int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
90 int do_free, int qos);
91
92/**
93 * Transmit a work queue entry out of the ethernet port. Both
94 * the work queue entry and the packet data can optionally be
95 * freed. The work will be freed on error as well. This simply
96 * wraps cvmx_oct_transmit_qos() for backwards compatability.
97 *
98 * @dev: Device to transmit out.
99 * @work_queue_entry:
100 * Work queue entry to send
101 * @do_free: True if the work queue entry and packet data should be
102 * freed. If false, neither will be freed.
103 *
104 * Returns Zero on success, negative on failure.
105 */
106static inline int cvm_oct_transmit(struct net_device *dev,
107 void *work_queue_entry, int do_free)
108{
109 return cvm_oct_transmit_qos(dev, work_queue_entry, do_free, 0);
110}
111
112extern int cvm_oct_rgmii_init(struct net_device *dev);
113extern void cvm_oct_rgmii_uninit(struct net_device *dev);
114extern int cvm_oct_sgmii_init(struct net_device *dev);
115extern void cvm_oct_sgmii_uninit(struct net_device *dev);
116extern int cvm_oct_spi_init(struct net_device *dev);
117extern void cvm_oct_spi_uninit(struct net_device *dev);
118extern int cvm_oct_xaui_init(struct net_device *dev);
119extern void cvm_oct_xaui_uninit(struct net_device *dev);
120
121extern int always_use_pow;
122extern int pow_send_group;
123extern int pow_receive_group;
124extern char pow_send_list[];
125extern struct net_device *cvm_oct_device[];
126
127#endif
diff --git a/drivers/staging/uc2322/aten2011.c b/drivers/staging/uc2322/aten2011.c
index 9c62f787cc9c..39d0926d1a90 100644
--- a/drivers/staging/uc2322/aten2011.c
+++ b/drivers/staging/uc2322/aten2011.c
@@ -2336,7 +2336,7 @@ static int ATEN2011_startup(struct usb_serial *serial)
2336 return 0; 2336 return 0;
2337} 2337}
2338 2338
2339static void ATEN2011_shutdown(struct usb_serial *serial) 2339static void ATEN2011_release(struct usb_serial *serial)
2340{ 2340{
2341 int i; 2341 int i;
2342 struct ATENINTL_port *ATEN2011_port; 2342 struct ATENINTL_port *ATEN2011_port;
@@ -2382,7 +2382,7 @@ static struct usb_serial_driver aten_serial_driver = {
2382 .tiocmget = ATEN2011_tiocmget, 2382 .tiocmget = ATEN2011_tiocmget,
2383 .tiocmset = ATEN2011_tiocmset, 2383 .tiocmset = ATEN2011_tiocmset,
2384 .attach = ATEN2011_startup, 2384 .attach = ATEN2011_startup,
2385 .shutdown = ATEN2011_shutdown, 2385 .release = ATEN2011_release,
2386 .read_bulk_callback = ATEN2011_bulk_in_callback, 2386 .read_bulk_callback = ATEN2011_bulk_in_callback,
2387 .read_int_callback = ATEN2011_interrupt_callback, 2387 .read_int_callback = ATEN2011_interrupt_callback,
2388}; 2388};
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 5e38ba10a3a9..0a69672097a8 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -417,7 +417,7 @@ static LIST_HEAD(thermal_hwmon_list);
417static ssize_t 417static ssize_t
418name_show(struct device *dev, struct device_attribute *attr, char *buf) 418name_show(struct device *dev, struct device_attribute *attr, char *buf)
419{ 419{
420 struct thermal_hwmon_device *hwmon = dev->driver_data; 420 struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev);
421 return sprintf(buf, "%s\n", hwmon->type); 421 return sprintf(buf, "%s\n", hwmon->type);
422} 422}
423static DEVICE_ATTR(name, 0444, name_show, NULL); 423static DEVICE_ATTR(name, 0444, name_show, NULL);
@@ -488,7 +488,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
488 result = PTR_ERR(hwmon->device); 488 result = PTR_ERR(hwmon->device);
489 goto free_mem; 489 goto free_mem;
490 } 490 }
491 hwmon->device->driver_data = hwmon; 491 dev_set_drvdata(hwmon->device, hwmon);
492 result = device_create_file(hwmon->device, &dev_attr_name); 492 result = device_create_file(hwmon->device, &dev_attr_name);
493 if (result) 493 if (result)
494 goto unregister_hwmon_device; 494 goto unregister_hwmon_device;
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 5eee3f82be5d..dcd49f1e96d0 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -64,6 +64,7 @@ config USB_ARCH_HAS_EHCI
64config USB 64config USB
65 tristate "Support for Host-side USB" 65 tristate "Support for Host-side USB"
66 depends on USB_ARCH_HAS_HCD 66 depends on USB_ARCH_HAS_HCD
67 select NLS # for UTF-8 strings
67 ---help--- 68 ---help---
68 Universal Serial Bus (USB) is a specification for a serial bus 69 Universal Serial Bus (USB) is a specification for a serial bus
69 subsystem which offers higher speeds and more features than the 70 subsystem which offers higher speeds and more features than the
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 0a3dc5ece634..19cb7d5480d7 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_USB_ISP116X_HCD) += host/
14obj-$(CONFIG_USB_OHCI_HCD) += host/ 14obj-$(CONFIG_USB_OHCI_HCD) += host/
15obj-$(CONFIG_USB_UHCI_HCD) += host/ 15obj-$(CONFIG_USB_UHCI_HCD) += host/
16obj-$(CONFIG_USB_FHCI_HCD) += host/ 16obj-$(CONFIG_USB_FHCI_HCD) += host/
17obj-$(CONFIG_USB_XHCI_HCD) += host/
17obj-$(CONFIG_USB_SL811_HCD) += host/ 18obj-$(CONFIG_USB_SL811_HCD) += host/
18obj-$(CONFIG_USB_U132_HCD) += host/ 19obj-$(CONFIG_USB_U132_HCD) += host/
19obj-$(CONFIG_USB_R8A66597_HCD) += host/ 20obj-$(CONFIG_USB_R8A66597_HCD) += host/
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 9cf9ff69e3e3..d171b563e94c 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -306,6 +306,7 @@ enum {
306#define FW_GET_BYTE(p) *((__u8 *) (p)) 306#define FW_GET_BYTE(p) *((__u8 *) (p))
307 307
308#define FW_DIR "ueagle-atm/" 308#define FW_DIR "ueagle-atm/"
309#define UEA_FW_NAME_MAX 30
309#define NB_MODEM 4 310#define NB_MODEM 4
310 311
311#define BULK_TIMEOUT 300 312#define BULK_TIMEOUT 300
@@ -1564,9 +1565,9 @@ static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver)
1564 file = cmv_file[sc->modem_index]; 1565 file = cmv_file[sc->modem_index];
1565 1566
1566 strcpy(cmv_name, FW_DIR); 1567 strcpy(cmv_name, FW_DIR);
1567 strlcat(cmv_name, file, FIRMWARE_NAME_MAX); 1568 strlcat(cmv_name, file, UEA_FW_NAME_MAX);
1568 if (ver == 2) 1569 if (ver == 2)
1569 strlcat(cmv_name, ".v2", FIRMWARE_NAME_MAX); 1570 strlcat(cmv_name, ".v2", UEA_FW_NAME_MAX);
1570} 1571}
1571 1572
1572static int request_cmvs_old(struct uea_softc *sc, 1573static int request_cmvs_old(struct uea_softc *sc,
@@ -1574,7 +1575,7 @@ static int request_cmvs_old(struct uea_softc *sc,
1574{ 1575{
1575 int ret, size; 1576 int ret, size;
1576 u8 *data; 1577 u8 *data;
1577 char cmv_name[FIRMWARE_NAME_MAX]; /* 30 bytes stack variable */ 1578 char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */
1578 1579
1579 cmvs_file_name(sc, cmv_name, 1); 1580 cmvs_file_name(sc, cmv_name, 1);
1580 ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev); 1581 ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
@@ -1608,7 +1609,7 @@ static int request_cmvs(struct uea_softc *sc,
1608 int ret, size; 1609 int ret, size;
1609 u32 crc; 1610 u32 crc;
1610 u8 *data; 1611 u8 *data;
1611 char cmv_name[FIRMWARE_NAME_MAX]; /* 30 bytes stack variable */ 1612 char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */
1612 1613
1613 cmvs_file_name(sc, cmv_name, 2); 1614 cmvs_file_name(sc, cmv_name, 2);
1614 ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev); 1615 ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index ddeb69192537..38bfdb0f6660 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -937,9 +937,9 @@ static int acm_probe(struct usb_interface *intf,
937 int buflen = intf->altsetting->extralen; 937 int buflen = intf->altsetting->extralen;
938 struct usb_interface *control_interface; 938 struct usb_interface *control_interface;
939 struct usb_interface *data_interface; 939 struct usb_interface *data_interface;
940 struct usb_endpoint_descriptor *epctrl; 940 struct usb_endpoint_descriptor *epctrl = NULL;
941 struct usb_endpoint_descriptor *epread; 941 struct usb_endpoint_descriptor *epread = NULL;
942 struct usb_endpoint_descriptor *epwrite; 942 struct usb_endpoint_descriptor *epwrite = NULL;
943 struct usb_device *usb_dev = interface_to_usbdev(intf); 943 struct usb_device *usb_dev = interface_to_usbdev(intf);
944 struct acm *acm; 944 struct acm *acm;
945 int minor; 945 int minor;
@@ -952,6 +952,7 @@ static int acm_probe(struct usb_interface *intf,
952 unsigned long quirks; 952 unsigned long quirks;
953 int num_rx_buf; 953 int num_rx_buf;
954 int i; 954 int i;
955 int combined_interfaces = 0;
955 956
956 /* normal quirks */ 957 /* normal quirks */
957 quirks = (unsigned long)id->driver_info; 958 quirks = (unsigned long)id->driver_info;
@@ -1033,9 +1034,15 @@ next_desc:
1033 data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num)); 1034 data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num));
1034 control_interface = intf; 1035 control_interface = intf;
1035 } else { 1036 } else {
1036 dev_dbg(&intf->dev, 1037 if (intf->cur_altsetting->desc.bNumEndpoints != 3) {
1037 "No union descriptor, giving up\n"); 1038 dev_dbg(&intf->dev,"No union descriptor, giving up\n");
1038 return -ENODEV; 1039 return -ENODEV;
1040 } else {
1041 dev_warn(&intf->dev,"No union descriptor, testing for castrated device\n");
1042 combined_interfaces = 1;
1043 control_interface = data_interface = intf;
1044 goto look_for_collapsed_interface;
1045 }
1039 } 1046 }
1040 } else { 1047 } else {
1041 control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0); 1048 control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
@@ -1049,6 +1056,36 @@ next_desc:
1049 if (data_interface_num != call_interface_num) 1056 if (data_interface_num != call_interface_num)
1050 dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n"); 1057 dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n");
1051 1058
1059 if (control_interface == data_interface) {
1060 /* some broken devices designed for windows work this way */
1061 dev_warn(&intf->dev,"Control and data interfaces are not separated!\n");
1062 combined_interfaces = 1;
1063 /* a popular other OS doesn't use it */
1064 quirks |= NO_CAP_LINE;
1065 if (data_interface->cur_altsetting->desc.bNumEndpoints != 3) {
1066 dev_err(&intf->dev, "This needs exactly 3 endpoints\n");
1067 return -EINVAL;
1068 }
1069look_for_collapsed_interface:
1070 for (i = 0; i < 3; i++) {
1071 struct usb_endpoint_descriptor *ep;
1072 ep = &data_interface->cur_altsetting->endpoint[i].desc;
1073
1074 if (usb_endpoint_is_int_in(ep))
1075 epctrl = ep;
1076 else if (usb_endpoint_is_bulk_out(ep))
1077 epwrite = ep;
1078 else if (usb_endpoint_is_bulk_in(ep))
1079 epread = ep;
1080 else
1081 return -EINVAL;
1082 }
1083 if (!epctrl || !epread || !epwrite)
1084 return -ENODEV;
1085 else
1086 goto made_compressed_probe;
1087 }
1088
1052skip_normal_probe: 1089skip_normal_probe:
1053 1090
1054 /*workaround for switched interfaces */ 1091 /*workaround for switched interfaces */
@@ -1068,10 +1105,11 @@ skip_normal_probe:
1068 } 1105 }
1069 1106
1070 /* Accept probe requests only for the control interface */ 1107 /* Accept probe requests only for the control interface */
1071 if (intf != control_interface) 1108 if (!combined_interfaces && intf != control_interface)
1072 return -ENODEV; 1109 return -ENODEV;
1073 1110
1074 if (usb_interface_claimed(data_interface)) { /* valid in this context */ 1111 if (!combined_interfaces && usb_interface_claimed(data_interface)) {
1112 /* valid in this context */
1075 dev_dbg(&intf->dev, "The data interface isn't available\n"); 1113 dev_dbg(&intf->dev, "The data interface isn't available\n");
1076 return -EBUSY; 1114 return -EBUSY;
1077 } 1115 }
@@ -1095,6 +1133,7 @@ skip_normal_probe:
1095 epread = epwrite; 1133 epread = epwrite;
1096 epwrite = t; 1134 epwrite = t;
1097 } 1135 }
1136made_compressed_probe:
1098 dbg("interfaces are valid"); 1137 dbg("interfaces are valid");
1099 for (minor = 0; minor < ACM_TTY_MINORS && acm_table[minor]; minor++); 1138 for (minor = 0; minor < ACM_TTY_MINORS && acm_table[minor]; minor++);
1100 1139
@@ -1112,12 +1151,15 @@ skip_normal_probe:
1112 ctrlsize = le16_to_cpu(epctrl->wMaxPacketSize); 1151 ctrlsize = le16_to_cpu(epctrl->wMaxPacketSize);
1113 readsize = le16_to_cpu(epread->wMaxPacketSize) * 1152 readsize = le16_to_cpu(epread->wMaxPacketSize) *
1114 (quirks == SINGLE_RX_URB ? 1 : 2); 1153 (quirks == SINGLE_RX_URB ? 1 : 2);
1154 acm->combined_interfaces = combined_interfaces;
1115 acm->writesize = le16_to_cpu(epwrite->wMaxPacketSize) * 20; 1155 acm->writesize = le16_to_cpu(epwrite->wMaxPacketSize) * 20;
1116 acm->control = control_interface; 1156 acm->control = control_interface;
1117 acm->data = data_interface; 1157 acm->data = data_interface;
1118 acm->minor = minor; 1158 acm->minor = minor;
1119 acm->dev = usb_dev; 1159 acm->dev = usb_dev;
1120 acm->ctrl_caps = ac_management_function; 1160 acm->ctrl_caps = ac_management_function;
1161 if (quirks & NO_CAP_LINE)
1162 acm->ctrl_caps &= ~USB_CDC_CAP_LINE;
1121 acm->ctrlsize = ctrlsize; 1163 acm->ctrlsize = ctrlsize;
1122 acm->readsize = readsize; 1164 acm->readsize = readsize;
1123 acm->rx_buflimit = num_rx_buf; 1165 acm->rx_buflimit = num_rx_buf;
@@ -1223,9 +1265,10 @@ skip_normal_probe:
1223 1265
1224skip_countries: 1266skip_countries:
1225 usb_fill_int_urb(acm->ctrlurb, usb_dev, 1267 usb_fill_int_urb(acm->ctrlurb, usb_dev,
1226 usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress), 1268 usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress),
1227 acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm, 1269 acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm,
1228 epctrl->bInterval); 1270 /* works around buggy devices */
1271 epctrl->bInterval ? epctrl->bInterval : 0xff);
1229 acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 1272 acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1230 acm->ctrlurb->transfer_dma = acm->ctrl_dma; 1273 acm->ctrlurb->transfer_dma = acm->ctrl_dma;
1231 1274
@@ -1312,7 +1355,8 @@ static void acm_disconnect(struct usb_interface *intf)
1312 acm->ctrl_dma); 1355 acm->ctrl_dma);
1313 acm_read_buffers_free(acm); 1356 acm_read_buffers_free(acm);
1314 1357
1315 usb_driver_release_interface(&acm_driver, intf == acm->control ? 1358 if (!acm->combined_interfaces)
1359 usb_driver_release_interface(&acm_driver, intf == acm->control ?
1316 acm->data : acm->control); 1360 acm->data : acm->control);
1317 1361
1318 if (acm->port.count == 0) { 1362 if (acm->port.count == 0) {
@@ -1451,6 +1495,9 @@ static struct usb_device_id acm_ids[] = {
1451 Maybe we should define a new 1495 Maybe we should define a new
1452 quirk for this. */ 1496 quirk for this. */
1453 }, 1497 },
1498 { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
1499 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1500 },
1454 1501
1455 /* control interfaces with various AT-command sets */ 1502 /* control interfaces with various AT-command sets */
1456 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1503 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 4c3856420add..1602324808ba 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -125,6 +125,7 @@ struct acm {
125 unsigned char clocal; /* termios CLOCAL */ 125 unsigned char clocal; /* termios CLOCAL */
126 unsigned int ctrl_caps; /* control capabilities from the class specific header */ 126 unsigned int ctrl_caps; /* control capabilities from the class specific header */
127 unsigned int susp_count; /* number of suspended interfaces */ 127 unsigned int susp_count; /* number of suspended interfaces */
128 int combined_interfaces:1; /* control and data collapsed */
128 struct acm_wb *delayed_wb; /* write queued for a device about to be woken */ 129 struct acm_wb *delayed_wb; /* write queued for a device about to be woken */
129}; 130};
130 131
@@ -133,3 +134,4 @@ struct acm {
133/* constants describing various quirks and errors */ 134/* constants describing various quirks and errors */
134#define NO_UNION_NORMAL 1 135#define NO_UNION_NORMAL 1
135#define SINGLE_RX_URB 2 136#define SINGLE_RX_URB 2
137#define NO_CAP_LINE 4
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index d2747a49b974..26c09f0257db 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -1057,8 +1057,14 @@ static const struct file_operations usblp_fops = {
1057 .release = usblp_release, 1057 .release = usblp_release,
1058}; 1058};
1059 1059
1060static char *usblp_nodename(struct device *dev)
1061{
1062 return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
1063}
1064
1060static struct usb_class_driver usblp_class = { 1065static struct usb_class_driver usblp_class = {
1061 .name = "lp%d", 1066 .name = "lp%d",
1067 .nodename = usblp_nodename,
1062 .fops = &usblp_fops, 1068 .fops = &usblp_fops,
1063 .minor_base = USBLP_MINOR_BASE, 1069 .minor_base = USBLP_MINOR_BASE,
1064}; 1070};
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index c40a9b284cc9..3703789d0d2a 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -927,21 +927,27 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
927 switch (cmd) { 927 switch (cmd) {
928 case USBTMC_IOCTL_CLEAR_OUT_HALT: 928 case USBTMC_IOCTL_CLEAR_OUT_HALT:
929 retval = usbtmc_ioctl_clear_out_halt(data); 929 retval = usbtmc_ioctl_clear_out_halt(data);
930 break;
930 931
931 case USBTMC_IOCTL_CLEAR_IN_HALT: 932 case USBTMC_IOCTL_CLEAR_IN_HALT:
932 retval = usbtmc_ioctl_clear_in_halt(data); 933 retval = usbtmc_ioctl_clear_in_halt(data);
934 break;
933 935
934 case USBTMC_IOCTL_INDICATOR_PULSE: 936 case USBTMC_IOCTL_INDICATOR_PULSE:
935 retval = usbtmc_ioctl_indicator_pulse(data); 937 retval = usbtmc_ioctl_indicator_pulse(data);
938 break;
936 939
937 case USBTMC_IOCTL_CLEAR: 940 case USBTMC_IOCTL_CLEAR:
938 retval = usbtmc_ioctl_clear(data); 941 retval = usbtmc_ioctl_clear(data);
942 break;
939 943
940 case USBTMC_IOCTL_ABORT_BULK_OUT: 944 case USBTMC_IOCTL_ABORT_BULK_OUT:
941 retval = usbtmc_ioctl_abort_bulk_out(data); 945 retval = usbtmc_ioctl_abort_bulk_out(data);
946 break;
942 947
943 case USBTMC_IOCTL_ABORT_BULK_IN: 948 case USBTMC_IOCTL_ABORT_BULK_IN:
944 retval = usbtmc_ioctl_abort_bulk_in(data); 949 retval = usbtmc_ioctl_abort_bulk_in(data);
950 break;
945 } 951 }
946 952
947 mutex_unlock(&data->io_mutex); 953 mutex_unlock(&data->io_mutex);
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index e1759d17ac5d..69280c35b5cb 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -28,7 +28,7 @@ comment "Miscellaneous USB options"
28 depends on USB 28 depends on USB
29 29
30config USB_DEVICEFS 30config USB_DEVICEFS
31 bool "USB device filesystem" 31 bool "USB device filesystem (DEPRECATED)" if EMBEDDED
32 depends on USB 32 depends on USB
33 ---help--- 33 ---help---
34 If you say Y here (and to "/proc file system support" in the "File 34 If you say Y here (and to "/proc file system support" in the "File
@@ -46,11 +46,15 @@ config USB_DEVICEFS
46 For the format of the various /proc/bus/usb/ files, please read 46 For the format of the various /proc/bus/usb/ files, please read
47 <file:Documentation/usb/proc_usb_info.txt>. 47 <file:Documentation/usb/proc_usb_info.txt>.
48 48
49 Usbfs files can't handle Access Control Lists (ACL), which are the 49 Modern Linux systems do not use this.
50 default way to grant access to USB devices for untrusted users of a 50
51 desktop system. The usbfs functionality is replaced by real 51 Usbfs entries are files and not character devices; usbfs can't
52 device-nodes managed by udev. These nodes live in /dev/bus/usb and 52 handle Access Control Lists (ACL) which are the default way to
53 are used by libusb. 53 grant access to USB devices for untrusted users of a desktop
54 system.
55
56 The usbfs functionality is replaced by real device-nodes managed by
57 udev. These nodes lived in /dev/bus/usb and are used by libusb.
54 58
55config USB_DEVICE_CLASS 59config USB_DEVICE_CLASS
56 bool "USB device class-devices (DEPRECATED)" 60 bool "USB device class-devices (DEPRECATED)"
diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile
index b6078706fb93..ec16e6029905 100644
--- a/drivers/usb/core/Makefile
+++ b/drivers/usb/core/Makefile
@@ -4,14 +4,14 @@
4 4
5usbcore-objs := usb.o hub.o hcd.o urb.o message.o driver.o \ 5usbcore-objs := usb.o hub.o hcd.o urb.o message.o driver.o \
6 config.o file.o buffer.o sysfs.o endpoint.o \ 6 config.o file.o buffer.o sysfs.o endpoint.o \
7 devio.o notify.o generic.o quirks.o 7 devio.o notify.o generic.o quirks.o devices.o
8 8
9ifeq ($(CONFIG_PCI),y) 9ifeq ($(CONFIG_PCI),y)
10 usbcore-objs += hcd-pci.o 10 usbcore-objs += hcd-pci.o
11endif 11endif
12 12
13ifeq ($(CONFIG_USB_DEVICEFS),y) 13ifeq ($(CONFIG_USB_DEVICEFS),y)
14 usbcore-objs += inode.o devices.o 14 usbcore-objs += inode.o
15endif 15endif
16 16
17obj-$(CONFIG_USB) += usbcore.o 17obj-$(CONFIG_USB) += usbcore.o
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 568244c99bdc..24dfb33f90cb 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -19,6 +19,32 @@ static inline const char *plural(int n)
19 return (n == 1 ? "" : "s"); 19 return (n == 1 ? "" : "s");
20} 20}
21 21
22/* FIXME: this is a kludge */
23static int find_next_descriptor_more(unsigned char *buffer, int size,
24 int dt1, int dt2, int dt3, int *num_skipped)
25{
26 struct usb_descriptor_header *h;
27 int n = 0;
28 unsigned char *buffer0 = buffer;
29
30 /* Find the next descriptor of type dt1 or dt2 or dt3 */
31 while (size > 0) {
32 h = (struct usb_descriptor_header *) buffer;
33 if (h->bDescriptorType == dt1 || h->bDescriptorType == dt2 ||
34 h->bDescriptorType == dt3)
35 break;
36 buffer += h->bLength;
37 size -= h->bLength;
38 ++n;
39 }
40
41 /* Store the number of descriptors skipped and return the
42 * number of bytes skipped */
43 if (num_skipped)
44 *num_skipped = n;
45 return buffer - buffer0;
46}
47
22static int find_next_descriptor(unsigned char *buffer, int size, 48static int find_next_descriptor(unsigned char *buffer, int size,
23 int dt1, int dt2, int *num_skipped) 49 int dt1, int dt2, int *num_skipped)
24{ 50{
@@ -43,6 +69,129 @@ static int find_next_descriptor(unsigned char *buffer, int size,
43 return buffer - buffer0; 69 return buffer - buffer0;
44} 70}
45 71
72static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
73 int inum, int asnum, struct usb_host_endpoint *ep,
74 int num_ep, unsigned char *buffer, int size)
75{
76 unsigned char *buffer_start = buffer;
77 struct usb_ss_ep_comp_descriptor *desc;
78 int retval;
79 int num_skipped;
80 int max_tx;
81 int i;
82
83 /* Allocate space for the SS endpoint companion descriptor */
84 ep->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
85 GFP_KERNEL);
86 if (!ep->ss_ep_comp)
87 return -ENOMEM;
88 desc = (struct usb_ss_ep_comp_descriptor *) buffer;
89 if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
90 dev_warn(ddev, "No SuperSpeed endpoint companion for config %d "
91 " interface %d altsetting %d ep %d: "
92 "using minimum values\n",
93 cfgno, inum, asnum, ep->desc.bEndpointAddress);
94 ep->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
95 ep->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
96 ep->ss_ep_comp->desc.bMaxBurst = 0;
97 /*
98 * Leave bmAttributes as zero, which will mean no streams for
99 * bulk, and isoc won't support multiple bursts of packets.
100 * With bursts of only one packet, and a Mult of 1, the max
101 * amount of data moved per endpoint service interval is one
102 * packet.
103 */
104 if (usb_endpoint_xfer_isoc(&ep->desc) ||
105 usb_endpoint_xfer_int(&ep->desc))
106 ep->ss_ep_comp->desc.wBytesPerInterval =
107 ep->desc.wMaxPacketSize;
108 /*
109 * The next descriptor is for an Endpoint or Interface,
110 * no extra descriptors to copy into the companion structure,
111 * and we didn't eat up any of the buffer.
112 */
113 retval = 0;
114 goto valid;
115 }
116 memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE);
117 desc = &ep->ss_ep_comp->desc;
118 buffer += desc->bLength;
119 size -= desc->bLength;
120
121 /* Eat up the other descriptors we don't care about */
122 ep->ss_ep_comp->extra = buffer;
123 i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
124 USB_DT_INTERFACE, &num_skipped);
125 ep->ss_ep_comp->extralen = i;
126 buffer += i;
127 size -= i;
128 retval = buffer - buffer_start + i;
129 if (num_skipped > 0)
130 dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
131 num_skipped, plural(num_skipped),
132 "SuperSpeed endpoint companion");
133
134 /* Check the various values */
135 if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) {
136 dev_warn(ddev, "Control endpoint with bMaxBurst = %d in "
137 "config %d interface %d altsetting %d ep %d: "
138 "setting to zero\n", desc->bMaxBurst,
139 cfgno, inum, asnum, ep->desc.bEndpointAddress);
140 desc->bMaxBurst = 0;
141 }
142 if (desc->bMaxBurst > 15) {
143 dev_warn(ddev, "Endpoint with bMaxBurst = %d in "
144 "config %d interface %d altsetting %d ep %d: "
145 "setting to 15\n", desc->bMaxBurst,
146 cfgno, inum, asnum, ep->desc.bEndpointAddress);
147 desc->bMaxBurst = 15;
148 }
149 if ((usb_endpoint_xfer_control(&ep->desc) || usb_endpoint_xfer_int(&ep->desc))
150 && desc->bmAttributes != 0) {
151 dev_warn(ddev, "%s endpoint with bmAttributes = %d in "
152 "config %d interface %d altsetting %d ep %d: "
153 "setting to zero\n",
154 usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk",
155 desc->bmAttributes,
156 cfgno, inum, asnum, ep->desc.bEndpointAddress);
157 desc->bmAttributes = 0;
158 }
159 if (usb_endpoint_xfer_bulk(&ep->desc) && desc->bmAttributes > 16) {
160 dev_warn(ddev, "Bulk endpoint with more than 65536 streams in "
161 "config %d interface %d altsetting %d ep %d: "
162 "setting to max\n",
163 cfgno, inum, asnum, ep->desc.bEndpointAddress);
164 desc->bmAttributes = 16;
165 }
166 if (usb_endpoint_xfer_isoc(&ep->desc) && desc->bmAttributes > 2) {
167 dev_warn(ddev, "Isoc endpoint has Mult of %d in "
168 "config %d interface %d altsetting %d ep %d: "
169 "setting to 3\n", desc->bmAttributes + 1,
170 cfgno, inum, asnum, ep->desc.bEndpointAddress);
171 desc->bmAttributes = 2;
172 }
173 if (usb_endpoint_xfer_isoc(&ep->desc)) {
174 max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1) *
175 (desc->bmAttributes + 1);
176 } else if (usb_endpoint_xfer_int(&ep->desc)) {
177 max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1);
178 } else {
179 goto valid;
180 }
181 if (desc->wBytesPerInterval > max_tx) {
182 dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in "
183 "config %d interface %d altsetting %d ep %d: "
184 "setting to %d\n",
185 usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int",
186 desc->wBytesPerInterval,
187 cfgno, inum, asnum, ep->desc.bEndpointAddress,
188 max_tx);
189 desc->wBytesPerInterval = max_tx;
190 }
191valid:
192 return retval;
193}
194
46static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, 195static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
47 int asnum, struct usb_host_interface *ifp, int num_ep, 196 int asnum, struct usb_host_interface *ifp, int num_ep,
48 unsigned char *buffer, int size) 197 unsigned char *buffer, int size)
@@ -50,7 +199,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
50 unsigned char *buffer0 = buffer; 199 unsigned char *buffer0 = buffer;
51 struct usb_endpoint_descriptor *d; 200 struct usb_endpoint_descriptor *d;
52 struct usb_host_endpoint *endpoint; 201 struct usb_host_endpoint *endpoint;
53 int n, i, j; 202 int n, i, j, retval;
54 203
55 d = (struct usb_endpoint_descriptor *) buffer; 204 d = (struct usb_endpoint_descriptor *) buffer;
56 buffer += d->bLength; 205 buffer += d->bLength;
@@ -92,6 +241,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
92 if (usb_endpoint_xfer_int(d)) { 241 if (usb_endpoint_xfer_int(d)) {
93 i = 1; 242 i = 1;
94 switch (to_usb_device(ddev)->speed) { 243 switch (to_usb_device(ddev)->speed) {
244 case USB_SPEED_SUPER:
95 case USB_SPEED_HIGH: 245 case USB_SPEED_HIGH:
96 /* Many device manufacturers are using full-speed 246 /* Many device manufacturers are using full-speed
97 * bInterval values in high-speed interrupt endpoint 247 * bInterval values in high-speed interrupt endpoint
@@ -161,17 +311,39 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
161 cfgno, inum, asnum, d->bEndpointAddress, 311 cfgno, inum, asnum, d->bEndpointAddress,
162 maxp); 312 maxp);
163 } 313 }
164 314 /* Allocate room for and parse any SS endpoint companion descriptors */
165 /* Skip over any Class Specific or Vendor Specific descriptors; 315 if (to_usb_device(ddev)->speed == USB_SPEED_SUPER) {
166 * find the next endpoint or interface descriptor */ 316 endpoint->extra = buffer;
167 endpoint->extra = buffer; 317 i = find_next_descriptor_more(buffer, size, USB_DT_SS_ENDPOINT_COMP,
168 i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT, 318 USB_DT_ENDPOINT, USB_DT_INTERFACE, &n);
169 USB_DT_INTERFACE, &n); 319 endpoint->extralen = i;
170 endpoint->extralen = i; 320 buffer += i;
321 size -= i;
322
323 if (size > 0) {
324 retval = usb_parse_ss_endpoint_companion(ddev, cfgno,
325 inum, asnum, endpoint, num_ep, buffer,
326 size);
327 if (retval >= 0) {
328 buffer += retval;
329 retval = buffer - buffer0;
330 }
331 } else {
332 retval = buffer - buffer0;
333 }
334 } else {
335 /* Skip over any Class Specific or Vendor Specific descriptors;
336 * find the next endpoint or interface descriptor */
337 endpoint->extra = buffer;
338 i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
339 USB_DT_INTERFACE, &n);
340 endpoint->extralen = i;
341 retval = buffer - buffer0 + i;
342 }
171 if (n > 0) 343 if (n > 0)
172 dev_dbg(ddev, "skipped %d descriptor%s after %s\n", 344 dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
173 n, plural(n), "endpoint"); 345 n, plural(n), "endpoint");
174 return buffer - buffer0 + i; 346 return retval;
175 347
176skip_to_next_endpoint_or_interface_descriptor: 348skip_to_next_endpoint_or_interface_descriptor:
177 i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT, 349 i = find_next_descriptor(buffer, size, USB_DT_ENDPOINT,
@@ -452,6 +624,8 @@ static int usb_parse_configuration(struct device *ddev, int cfgidx,
452 kref_init(&intfc->ref); 624 kref_init(&intfc->ref);
453 } 625 }
454 626
627 /* FIXME: parse the BOS descriptor */
628
455 /* Skip over any Class Specific or Vendor Specific descriptors; 629 /* Skip over any Class Specific or Vendor Specific descriptors;
456 * find the first interface descriptor */ 630 * find the first interface descriptor */
457 config->extra = buffer; 631 config->extra = buffer;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index d0a21a5f8201..69e5773abfce 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -154,16 +154,11 @@ static const struct usb_device_id *usb_match_dynamic_id(struct usb_interface *in
154static int usb_probe_device(struct device *dev) 154static int usb_probe_device(struct device *dev)
155{ 155{
156 struct usb_device_driver *udriver = to_usb_device_driver(dev->driver); 156 struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
157 struct usb_device *udev; 157 struct usb_device *udev = to_usb_device(dev);
158 int error = -ENODEV; 158 int error = -ENODEV;
159 159
160 dev_dbg(dev, "%s\n", __func__); 160 dev_dbg(dev, "%s\n", __func__);
161 161
162 if (!is_usb_device(dev)) /* Sanity check */
163 return error;
164
165 udev = to_usb_device(dev);
166
167 /* TODO: Add real matching code */ 162 /* TODO: Add real matching code */
168 163
169 /* The device should always appear to be in use 164 /* The device should always appear to be in use
@@ -203,18 +198,13 @@ static void usb_cancel_queued_reset(struct usb_interface *iface)
203static int usb_probe_interface(struct device *dev) 198static int usb_probe_interface(struct device *dev)
204{ 199{
205 struct usb_driver *driver = to_usb_driver(dev->driver); 200 struct usb_driver *driver = to_usb_driver(dev->driver);
206 struct usb_interface *intf; 201 struct usb_interface *intf = to_usb_interface(dev);
207 struct usb_device *udev; 202 struct usb_device *udev = interface_to_usbdev(intf);
208 const struct usb_device_id *id; 203 const struct usb_device_id *id;
209 int error = -ENODEV; 204 int error = -ENODEV;
210 205
211 dev_dbg(dev, "%s\n", __func__); 206 dev_dbg(dev, "%s\n", __func__);
212 207
213 if (is_usb_device(dev)) /* Sanity check */
214 return error;
215
216 intf = to_usb_interface(dev);
217 udev = interface_to_usbdev(intf);
218 intf->needs_binding = 0; 208 intf->needs_binding = 0;
219 209
220 if (udev->authorized == 0) { 210 if (udev->authorized == 0) {
@@ -385,7 +375,6 @@ void usb_driver_release_interface(struct usb_driver *driver,
385 struct usb_interface *iface) 375 struct usb_interface *iface)
386{ 376{
387 struct device *dev = &iface->dev; 377 struct device *dev = &iface->dev;
388 struct usb_device *udev = interface_to_usbdev(iface);
389 378
390 /* this should never happen, don't release something that's not ours */ 379 /* this should never happen, don't release something that's not ours */
391 if (!dev->driver || dev->driver != &driver->drvwrap.driver) 380 if (!dev->driver || dev->driver != &driver->drvwrap.driver)
@@ -394,23 +383,19 @@ void usb_driver_release_interface(struct usb_driver *driver,
394 /* don't release from within disconnect() */ 383 /* don't release from within disconnect() */
395 if (iface->condition != USB_INTERFACE_BOUND) 384 if (iface->condition != USB_INTERFACE_BOUND)
396 return; 385 return;
386 iface->condition = USB_INTERFACE_UNBINDING;
397 387
398 /* don't release if the interface hasn't been added yet */ 388 /* Release via the driver core only if the interface
389 * has already been registered
390 */
399 if (device_is_registered(dev)) { 391 if (device_is_registered(dev)) {
400 iface->condition = USB_INTERFACE_UNBINDING;
401 device_release_driver(dev); 392 device_release_driver(dev);
402 } else { 393 } else {
403 iface->condition = USB_INTERFACE_UNBOUND; 394 down(&dev->sem);
404 usb_cancel_queued_reset(iface); 395 usb_unbind_interface(dev);
396 dev->driver = NULL;
397 up(&dev->sem);
405 } 398 }
406 dev->driver = NULL;
407 usb_set_intfdata(iface, NULL);
408
409 usb_pm_lock(udev);
410 iface->condition = USB_INTERFACE_UNBOUND;
411 mark_quiesced(iface);
412 iface->needs_remote_wakeup = 0;
413 usb_pm_unlock(udev);
414} 399}
415EXPORT_SYMBOL_GPL(usb_driver_release_interface); 400EXPORT_SYMBOL_GPL(usb_driver_release_interface);
416 401
@@ -598,7 +583,7 @@ static int usb_device_match(struct device *dev, struct device_driver *drv)
598 /* TODO: Add real matching code */ 583 /* TODO: Add real matching code */
599 return 1; 584 return 1;
600 585
601 } else { 586 } else if (is_usb_interface(dev)) {
602 struct usb_interface *intf; 587 struct usb_interface *intf;
603 struct usb_driver *usb_drv; 588 struct usb_driver *usb_drv;
604 const struct usb_device_id *id; 589 const struct usb_device_id *id;
@@ -630,11 +615,14 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
630 /* driver is often null here; dev_dbg() would oops */ 615 /* driver is often null here; dev_dbg() would oops */
631 pr_debug("usb %s: uevent\n", dev_name(dev)); 616 pr_debug("usb %s: uevent\n", dev_name(dev));
632 617
633 if (is_usb_device(dev)) 618 if (is_usb_device(dev)) {
634 usb_dev = to_usb_device(dev); 619 usb_dev = to_usb_device(dev);
635 else { 620 } else if (is_usb_interface(dev)) {
636 struct usb_interface *intf = to_usb_interface(dev); 621 struct usb_interface *intf = to_usb_interface(dev);
622
637 usb_dev = interface_to_usbdev(intf); 623 usb_dev = interface_to_usbdev(intf);
624 } else {
625 return 0;
638 } 626 }
639 627
640 if (usb_dev->devnum < 0) { 628 if (usb_dev->devnum < 0) {
@@ -1762,6 +1750,7 @@ int usb_suspend(struct device *dev, pm_message_t msg)
1762int usb_resume(struct device *dev, pm_message_t msg) 1750int usb_resume(struct device *dev, pm_message_t msg)
1763{ 1751{
1764 struct usb_device *udev; 1752 struct usb_device *udev;
1753 int status;
1765 1754
1766 udev = to_usb_device(dev); 1755 udev = to_usb_device(dev);
1767 1756
@@ -1771,7 +1760,14 @@ int usb_resume(struct device *dev, pm_message_t msg)
1771 */ 1760 */
1772 if (udev->skip_sys_resume) 1761 if (udev->skip_sys_resume)
1773 return 0; 1762 return 0;
1774 return usb_external_resume_device(udev, msg); 1763 status = usb_external_resume_device(udev, msg);
1764
1765 /* Avoid PM error messages for devices disconnected while suspended
1766 * as we'll display regular disconnect messages just a bit later.
1767 */
1768 if (status == -ENODEV)
1769 return 0;
1770 return status;
1775} 1771}
1776 1772
1777#endif /* CONFIG_PM */ 1773#endif /* CONFIG_PM */
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 40dee2ac0133..bc39fc40bbde 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -15,19 +15,18 @@
15#include <linux/usb.h> 15#include <linux/usb.h>
16#include "usb.h" 16#include "usb.h"
17 17
18#define MAX_ENDPOINT_MINORS (64*128*32)
19static int usb_endpoint_major;
20static DEFINE_IDR(endpoint_idr);
21
22struct ep_device { 18struct ep_device {
23 struct usb_endpoint_descriptor *desc; 19 struct usb_endpoint_descriptor *desc;
24 struct usb_device *udev; 20 struct usb_device *udev;
25 struct device dev; 21 struct device dev;
26 int minor;
27}; 22};
28#define to_ep_device(_dev) \ 23#define to_ep_device(_dev) \
29 container_of(_dev, struct ep_device, dev) 24 container_of(_dev, struct ep_device, dev)
30 25
26struct device_type usb_ep_device_type = {
27 .name = "usb_endpoint",
28};
29
31struct ep_attribute { 30struct ep_attribute {
32 struct attribute attr; 31 struct attribute attr;
33 ssize_t (*show)(struct usb_device *, 32 ssize_t (*show)(struct usb_device *,
@@ -160,118 +159,10 @@ static struct attribute_group *ep_dev_groups[] = {
160 NULL 159 NULL
161}; 160};
162 161
163static int usb_endpoint_major_init(void)
164{
165 dev_t dev;
166 int error;
167
168 error = alloc_chrdev_region(&dev, 0, MAX_ENDPOINT_MINORS,
169 "usb_endpoint");
170 if (error) {
171 printk(KERN_ERR "Unable to get a dynamic major for "
172 "usb endpoints.\n");
173 return error;
174 }
175 usb_endpoint_major = MAJOR(dev);
176
177 return error;
178}
179
180static void usb_endpoint_major_cleanup(void)
181{
182 unregister_chrdev_region(MKDEV(usb_endpoint_major, 0),
183 MAX_ENDPOINT_MINORS);
184}
185
186static int endpoint_get_minor(struct ep_device *ep_dev)
187{
188 static DEFINE_MUTEX(minor_lock);
189 int retval = -ENOMEM;
190 int id;
191
192 mutex_lock(&minor_lock);
193 if (idr_pre_get(&endpoint_idr, GFP_KERNEL) == 0)
194 goto exit;
195
196 retval = idr_get_new(&endpoint_idr, ep_dev, &id);
197 if (retval < 0) {
198 if (retval == -EAGAIN)
199 retval = -ENOMEM;
200 goto exit;
201 }
202 ep_dev->minor = id & MAX_ID_MASK;
203exit:
204 mutex_unlock(&minor_lock);
205 return retval;
206}
207
208static void endpoint_free_minor(struct ep_device *ep_dev)
209{
210 idr_remove(&endpoint_idr, ep_dev->minor);
211}
212
213static struct endpoint_class {
214 struct kref kref;
215 struct class *class;
216} *ep_class;
217
218static int init_endpoint_class(void)
219{
220 int result = 0;
221
222 if (ep_class != NULL) {
223 kref_get(&ep_class->kref);
224 goto exit;
225 }
226
227 ep_class = kmalloc(sizeof(*ep_class), GFP_KERNEL);
228 if (!ep_class) {
229 result = -ENOMEM;
230 goto exit;
231 }
232
233 kref_init(&ep_class->kref);
234 ep_class->class = class_create(THIS_MODULE, "usb_endpoint");
235 if (IS_ERR(ep_class->class)) {
236 result = PTR_ERR(ep_class->class);
237 goto class_create_error;
238 }
239
240 result = usb_endpoint_major_init();
241 if (result)
242 goto endpoint_major_error;
243
244 goto exit;
245
246endpoint_major_error:
247 class_destroy(ep_class->class);
248class_create_error:
249 kfree(ep_class);
250 ep_class = NULL;
251exit:
252 return result;
253}
254
255static void release_endpoint_class(struct kref *kref)
256{
257 /* Ok, we cheat as we know we only have one ep_class */
258 class_destroy(ep_class->class);
259 kfree(ep_class);
260 ep_class = NULL;
261 usb_endpoint_major_cleanup();
262}
263
264static void destroy_endpoint_class(void)
265{
266 if (ep_class)
267 kref_put(&ep_class->kref, release_endpoint_class);
268}
269
270static void ep_device_release(struct device *dev) 162static void ep_device_release(struct device *dev)
271{ 163{
272 struct ep_device *ep_dev = to_ep_device(dev); 164 struct ep_device *ep_dev = to_ep_device(dev);
273 165
274 endpoint_free_minor(ep_dev);
275 kfree(ep_dev); 166 kfree(ep_dev);
276} 167}
277 168
@@ -279,62 +170,32 @@ int usb_create_ep_devs(struct device *parent,
279 struct usb_host_endpoint *endpoint, 170 struct usb_host_endpoint *endpoint,
280 struct usb_device *udev) 171 struct usb_device *udev)
281{ 172{
282 char name[8];
283 struct ep_device *ep_dev; 173 struct ep_device *ep_dev;
284 int retval; 174 int retval;
285 175
286 retval = init_endpoint_class();
287 if (retval)
288 goto exit;
289
290 ep_dev = kzalloc(sizeof(*ep_dev), GFP_KERNEL); 176 ep_dev = kzalloc(sizeof(*ep_dev), GFP_KERNEL);
291 if (!ep_dev) { 177 if (!ep_dev) {
292 retval = -ENOMEM; 178 retval = -ENOMEM;
293 goto error_alloc; 179 goto exit;
294 }
295
296 retval = endpoint_get_minor(ep_dev);
297 if (retval) {
298 dev_err(parent, "can not allocate minor number for %s\n",
299 dev_name(&ep_dev->dev));
300 goto error_register;
301 } 180 }
302 181
303 ep_dev->desc = &endpoint->desc; 182 ep_dev->desc = &endpoint->desc;
304 ep_dev->udev = udev; 183 ep_dev->udev = udev;
305 ep_dev->dev.groups = ep_dev_groups; 184 ep_dev->dev.groups = ep_dev_groups;
306 ep_dev->dev.devt = MKDEV(usb_endpoint_major, ep_dev->minor); 185 ep_dev->dev.type = &usb_ep_device_type;
307 ep_dev->dev.class = ep_class->class;
308 ep_dev->dev.parent = parent; 186 ep_dev->dev.parent = parent;
309 ep_dev->dev.release = ep_device_release; 187 ep_dev->dev.release = ep_device_release;
310 dev_set_name(&ep_dev->dev, "usbdev%d.%d_ep%02x", 188 dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress);
311 udev->bus->busnum, udev->devnum,
312 endpoint->desc.bEndpointAddress);
313 189
314 retval = device_register(&ep_dev->dev); 190 retval = device_register(&ep_dev->dev);
315 if (retval) 191 if (retval)
316 goto error_chrdev; 192 goto error_register;
317 193
318 /* create the symlink to the old-style "ep_XX" directory */
319 sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress);
320 retval = sysfs_create_link(&parent->kobj, &ep_dev->dev.kobj, name);
321 if (retval)
322 goto error_link;
323 endpoint->ep_dev = ep_dev; 194 endpoint->ep_dev = ep_dev;
324 return retval; 195 return retval;
325 196
326error_link:
327 device_unregister(&ep_dev->dev);
328 destroy_endpoint_class();
329 return retval;
330
331error_chrdev:
332 endpoint_free_minor(ep_dev);
333
334error_register: 197error_register:
335 kfree(ep_dev); 198 kfree(ep_dev);
336error_alloc:
337 destroy_endpoint_class();
338exit: 199exit:
339 return retval; 200 return retval;
340} 201}
@@ -344,12 +205,7 @@ void usb_remove_ep_devs(struct usb_host_endpoint *endpoint)
344 struct ep_device *ep_dev = endpoint->ep_dev; 205 struct ep_device *ep_dev = endpoint->ep_dev;
345 206
346 if (ep_dev) { 207 if (ep_dev) {
347 char name[8];
348
349 sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress);
350 sysfs_remove_link(&ep_dev->dev.parent->kobj, name);
351 device_unregister(&ep_dev->dev); 208 device_unregister(&ep_dev->dev);
352 endpoint->ep_dev = NULL; 209 endpoint->ep_dev = NULL;
353 destroy_endpoint_class();
354 } 210 }
355} 211}
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 997e659ff693..5cef88929b3e 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -67,6 +67,16 @@ static struct usb_class {
67 struct class *class; 67 struct class *class;
68} *usb_class; 68} *usb_class;
69 69
70static char *usb_nodename(struct device *dev)
71{
72 struct usb_class_driver *drv;
73
74 drv = dev_get_drvdata(dev);
75 if (!drv || !drv->nodename)
76 return NULL;
77 return drv->nodename(dev);
78}
79
70static int init_usb_class(void) 80static int init_usb_class(void)
71{ 81{
72 int result = 0; 82 int result = 0;
@@ -90,6 +100,7 @@ static int init_usb_class(void)
90 kfree(usb_class); 100 kfree(usb_class);
91 usb_class = NULL; 101 usb_class = NULL;
92 } 102 }
103 usb_class->class->nodename = usb_nodename;
93 104
94exit: 105exit:
95 return result; 106 return result;
@@ -198,7 +209,7 @@ int usb_register_dev(struct usb_interface *intf,
198 else 209 else
199 temp = name; 210 temp = name;
200 intf->usb_dev = device_create(usb_class->class, &intf->dev, 211 intf->usb_dev = device_create(usb_class->class, &intf->dev,
201 MKDEV(USB_MAJOR, minor), NULL, 212 MKDEV(USB_MAJOR, minor), class_driver,
202 "%s", temp); 213 "%s", temp);
203 if (IS_ERR(intf->usb_dev)) { 214 if (IS_ERR(intf->usb_dev)) {
204 down_write(&minor_rwsem); 215 down_write(&minor_rwsem);
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index a4301dc02d27..91f2885b6ee1 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -185,194 +185,198 @@ void usb_hcd_pci_remove(struct pci_dev *dev)
185} 185}
186EXPORT_SYMBOL_GPL(usb_hcd_pci_remove); 186EXPORT_SYMBOL_GPL(usb_hcd_pci_remove);
187 187
188
189#ifdef CONFIG_PM
190
191/** 188/**
192 * usb_hcd_pci_suspend - power management suspend of a PCI-based HCD 189 * usb_hcd_pci_shutdown - shutdown host controller
193 * @dev: USB Host Controller being suspended 190 * @dev: USB Host Controller being shutdown
194 * @message: Power Management message describing this state transition
195 *
196 * Store this function in the HCD's struct pci_driver as .suspend.
197 */ 191 */
198int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t message) 192void usb_hcd_pci_shutdown(struct pci_dev *dev)
193{
194 struct usb_hcd *hcd;
195
196 hcd = pci_get_drvdata(dev);
197 if (!hcd)
198 return;
199
200 if (hcd->driver->shutdown)
201 hcd->driver->shutdown(hcd);
202}
203EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown);
204
205#ifdef CONFIG_PM_SLEEP
206
207static int check_root_hub_suspended(struct device *dev)
208{
209 struct pci_dev *pci_dev = to_pci_dev(dev);
210 struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
211
212 if (!(hcd->state == HC_STATE_SUSPENDED ||
213 hcd->state == HC_STATE_HALT)) {
214 dev_warn(dev, "Root hub is not suspended\n");
215 return -EBUSY;
216 }
217 return 0;
218}
219
220static int hcd_pci_suspend(struct device *dev)
199{ 221{
200 struct usb_hcd *hcd = pci_get_drvdata(dev); 222 struct pci_dev *pci_dev = to_pci_dev(dev);
201 int retval = 0; 223 struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
202 int wake, w; 224 int retval;
203 int has_pci_pm;
204 225
205 /* Root hub suspend should have stopped all downstream traffic, 226 /* Root hub suspend should have stopped all downstream traffic,
206 * and all bus master traffic. And done so for both the interface 227 * and all bus master traffic. And done so for both the interface
207 * and the stub usb_device (which we check here). But maybe it 228 * and the stub usb_device (which we check here). But maybe it
208 * didn't; writing sysfs power/state files ignores such rules... 229 * didn't; writing sysfs power/state files ignores such rules...
209 *
210 * We must ignore the FREEZE vs SUSPEND distinction here, because
211 * otherwise the swsusp will save (and restore) garbage state.
212 */ 230 */
213 if (!(hcd->state == HC_STATE_SUSPENDED || 231 retval = check_root_hub_suspended(dev);
214 hcd->state == HC_STATE_HALT)) { 232 if (retval)
215 dev_warn(&dev->dev, "Root hub is not suspended\n"); 233 return retval;
216 retval = -EBUSY;
217 goto done;
218 }
219 234
220 /* We might already be suspended (runtime PM -- not yet written) */ 235 /* We might already be suspended (runtime PM -- not yet written) */
221 if (dev->current_state != PCI_D0) 236 if (pci_dev->current_state != PCI_D0)
222 goto done; 237 return retval;
223 238
224 if (hcd->driver->pci_suspend) { 239 if (hcd->driver->pci_suspend) {
225 retval = hcd->driver->pci_suspend(hcd, message); 240 retval = hcd->driver->pci_suspend(hcd);
226 suspend_report_result(hcd->driver->pci_suspend, retval); 241 suspend_report_result(hcd->driver->pci_suspend, retval);
227 if (retval) 242 if (retval)
228 goto done; 243 return retval;
229 } 244 }
230 245
231 synchronize_irq(dev->irq); 246 synchronize_irq(pci_dev->irq);
232 247
233 /* Downstream ports from this root hub should already be quiesced, so 248 /* Downstream ports from this root hub should already be quiesced, so
234 * there will be no DMA activity. Now we can shut down the upstream 249 * there will be no DMA activity. Now we can shut down the upstream
235 * link (except maybe for PME# resume signaling) and enter some PCI 250 * link (except maybe for PME# resume signaling). We'll enter a
236 * low power state, if the hardware allows. 251 * low power state during suspend_noirq, if the hardware allows.
237 */ 252 */
238 pci_disable_device(dev); 253 pci_disable_device(pci_dev);
254 return retval;
255}
256
257static int hcd_pci_suspend_noirq(struct device *dev)
258{
259 struct pci_dev *pci_dev = to_pci_dev(dev);
260 struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
261 int retval;
262
263 retval = check_root_hub_suspended(dev);
264 if (retval)
265 return retval;
239 266
240 pci_save_state(dev); 267 pci_save_state(pci_dev);
241 268
242 /* Don't fail on error to enable wakeup. We rely on pci code 269 /* If the root hub is HALTed rather than SUSPENDed,
243 * to reject requests the hardware can't implement, rather 270 * disallow remote wakeup.
244 * than coding the same thing.
245 */ 271 */
246 wake = (hcd->state == HC_STATE_SUSPENDED && 272 if (hcd->state == HC_STATE_HALT)
247 device_may_wakeup(&dev->dev)); 273 device_set_wakeup_enable(dev, 0);
248 w = pci_wake_from_d3(dev, wake); 274 dev_dbg(dev, "wakeup: %d\n", device_may_wakeup(dev));
249 if (w < 0)
250 wake = w;
251 dev_dbg(&dev->dev, "wakeup: %d\n", wake);
252
253 /* Don't change state if we don't need to */
254 if (message.event == PM_EVENT_FREEZE ||
255 message.event == PM_EVENT_PRETHAW) {
256 dev_dbg(&dev->dev, "--> no state change\n");
257 goto done;
258 }
259 275
260 has_pci_pm = pci_find_capability(dev, PCI_CAP_ID_PM); 276 /* Possibly enable remote wakeup,
261 if (!has_pci_pm) { 277 * choose the appropriate low-power state, and go to that state.
262 dev_dbg(&dev->dev, "--> PCI D0 legacy\n"); 278 */
279 retval = pci_prepare_to_sleep(pci_dev);
280 if (retval == -EIO) { /* Low-power not supported */
281 dev_dbg(dev, "--> PCI D0 legacy\n");
282 retval = 0;
283 } else if (retval == 0) {
284 dev_dbg(dev, "--> PCI %s\n",
285 pci_power_name(pci_dev->current_state));
263 } else { 286 } else {
264 287 suspend_report_result(pci_prepare_to_sleep, retval);
265 /* NOTE: dev->current_state becomes nonzero only here, and 288 return retval;
266 * only for devices that support PCI PM. Also, exiting
267 * PCI_D3 (but not PCI_D1 or PCI_D2) is allowed to reset
268 * some device state (e.g. as part of clock reinit).
269 */
270 retval = pci_set_power_state(dev, PCI_D3hot);
271 suspend_report_result(pci_set_power_state, retval);
272 if (retval == 0) {
273 dev_dbg(&dev->dev, "--> PCI D3\n");
274 } else {
275 dev_dbg(&dev->dev, "PCI D3 suspend fail, %d\n",
276 retval);
277 pci_restore_state(dev);
278 }
279 } 289 }
280 290
281#ifdef CONFIG_PPC_PMAC 291#ifdef CONFIG_PPC_PMAC
282 if (retval == 0) { 292 /* Disable ASIC clocks for USB */
283 /* Disable ASIC clocks for USB */ 293 if (machine_is(powermac)) {
284 if (machine_is(powermac)) { 294 struct device_node *of_node;
285 struct device_node *of_node; 295
286 296 of_node = pci_device_to_OF_node(pci_dev);
287 of_node = pci_device_to_OF_node(dev); 297 if (of_node)
288 if (of_node) 298 pmac_call_feature(PMAC_FTR_USB_ENABLE, of_node, 0, 0);
289 pmac_call_feature(PMAC_FTR_USB_ENABLE,
290 of_node, 0, 0);
291 }
292 } 299 }
293#endif 300#endif
294
295 done:
296 return retval; 301 return retval;
297} 302}
298EXPORT_SYMBOL_GPL(usb_hcd_pci_suspend);
299 303
300/** 304static int hcd_pci_resume_noirq(struct device *dev)
301 * usb_hcd_pci_resume - power management resume of a PCI-based HCD
302 * @dev: USB Host Controller being resumed
303 *
304 * Store this function in the HCD's struct pci_driver as .resume.
305 */
306int usb_hcd_pci_resume(struct pci_dev *dev)
307{ 305{
308 struct usb_hcd *hcd; 306 struct pci_dev *pci_dev = to_pci_dev(dev);
309 int retval;
310 307
311#ifdef CONFIG_PPC_PMAC 308#ifdef CONFIG_PPC_PMAC
312 /* Reenable ASIC clocks for USB */ 309 /* Reenable ASIC clocks for USB */
313 if (machine_is(powermac)) { 310 if (machine_is(powermac)) {
314 struct device_node *of_node; 311 struct device_node *of_node;
315 312
316 of_node = pci_device_to_OF_node(dev); 313 of_node = pci_device_to_OF_node(pci_dev);
317 if (of_node) 314 if (of_node)
318 pmac_call_feature(PMAC_FTR_USB_ENABLE, 315 pmac_call_feature(PMAC_FTR_USB_ENABLE,
319 of_node, 0, 1); 316 of_node, 0, 1);
320 } 317 }
321#endif 318#endif
322 319
323 pci_restore_state(dev); 320 /* Go back to D0 and disable remote wakeup */
321 pci_back_from_sleep(pci_dev);
322 return 0;
323}
324
325static int resume_common(struct device *dev, bool hibernated)
326{
327 struct pci_dev *pci_dev = to_pci_dev(dev);
328 struct usb_hcd *hcd = pci_get_drvdata(pci_dev);
329 int retval;
324 330
325 hcd = pci_get_drvdata(dev);
326 if (hcd->state != HC_STATE_SUSPENDED) { 331 if (hcd->state != HC_STATE_SUSPENDED) {
327 dev_dbg(hcd->self.controller, 332 dev_dbg(dev, "can't resume, not suspended!\n");
328 "can't resume, not suspended!\n");
329 return 0; 333 return 0;
330 } 334 }
331 335
332 pci_enable_wake(dev, PCI_D0, false); 336 retval = pci_enable_device(pci_dev);
333
334 retval = pci_enable_device(dev);
335 if (retval < 0) { 337 if (retval < 0) {
336 dev_err(&dev->dev, "can't re-enable after resume, %d!\n", 338 dev_err(dev, "can't re-enable after resume, %d!\n", retval);
337 retval);
338 return retval; 339 return retval;
339 } 340 }
340 341
341 pci_set_master(dev); 342 pci_set_master(pci_dev);
342
343 /* yes, ignore this result too... */
344 (void) pci_wake_from_d3(dev, 0);
345 343
346 clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags); 344 clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
347 345
348 if (hcd->driver->pci_resume) { 346 if (hcd->driver->pci_resume) {
349 retval = hcd->driver->pci_resume(hcd); 347 retval = hcd->driver->pci_resume(hcd, hibernated);
350 if (retval) { 348 if (retval) {
351 dev_err(hcd->self.controller, 349 dev_err(dev, "PCI post-resume error %d!\n", retval);
352 "PCI post-resume error %d!\n", retval);
353 usb_hc_died(hcd); 350 usb_hc_died(hcd);
354 } 351 }
355 } 352 }
356 return retval; 353 return retval;
357} 354}
358EXPORT_SYMBOL_GPL(usb_hcd_pci_resume);
359 355
360#endif /* CONFIG_PM */ 356static int hcd_pci_resume(struct device *dev)
361
362/**
363 * usb_hcd_pci_shutdown - shutdown host controller
364 * @dev: USB Host Controller being shutdown
365 */
366void usb_hcd_pci_shutdown(struct pci_dev *dev)
367{ 357{
368 struct usb_hcd *hcd; 358 return resume_common(dev, false);
369 359}
370 hcd = pci_get_drvdata(dev);
371 if (!hcd)
372 return;
373 360
374 if (hcd->driver->shutdown) 361static int hcd_pci_restore(struct device *dev)
375 hcd->driver->shutdown(hcd); 362{
363 return resume_common(dev, true);
376} 364}
377EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown);
378 365
366struct dev_pm_ops usb_hcd_pci_pm_ops = {
367 .suspend = hcd_pci_suspend,
368 .suspend_noirq = hcd_pci_suspend_noirq,
369 .resume_noirq = hcd_pci_resume_noirq,
370 .resume = hcd_pci_resume,
371 .freeze = check_root_hub_suspended,
372 .freeze_noirq = check_root_hub_suspended,
373 .thaw_noirq = NULL,
374 .thaw = NULL,
375 .poweroff = hcd_pci_suspend,
376 .poweroff_noirq = hcd_pci_suspend_noirq,
377 .restore_noirq = hcd_pci_resume_noirq,
378 .restore = hcd_pci_restore,
379};
380EXPORT_SYMBOL_GPL(usb_hcd_pci_pm_ops);
381
382#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 42b93da1085d..ce3f453f02ef 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -128,6 +128,27 @@ static inline int is_root_hub(struct usb_device *udev)
128#define KERNEL_REL ((LINUX_VERSION_CODE >> 16) & 0x0ff) 128#define KERNEL_REL ((LINUX_VERSION_CODE >> 16) & 0x0ff)
129#define KERNEL_VER ((LINUX_VERSION_CODE >> 8) & 0x0ff) 129#define KERNEL_VER ((LINUX_VERSION_CODE >> 8) & 0x0ff)
130 130
131/* usb 3.0 root hub device descriptor */
132static const u8 usb3_rh_dev_descriptor[18] = {
133 0x12, /* __u8 bLength; */
134 0x01, /* __u8 bDescriptorType; Device */
135 0x00, 0x03, /* __le16 bcdUSB; v3.0 */
136
137 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
138 0x00, /* __u8 bDeviceSubClass; */
139 0x03, /* __u8 bDeviceProtocol; USB 3.0 hub */
140 0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */
141
142 0x6b, 0x1d, /* __le16 idVendor; Linux Foundation */
143 0x02, 0x00, /* __le16 idProduct; device 0x0002 */
144 KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
145
146 0x03, /* __u8 iManufacturer; */
147 0x02, /* __u8 iProduct; */
148 0x01, /* __u8 iSerialNumber; */
149 0x01 /* __u8 bNumConfigurations; */
150};
151
131/* usb 2.0 root hub device descriptor */ 152/* usb 2.0 root hub device descriptor */
132static const u8 usb2_rh_dev_descriptor [18] = { 153static const u8 usb2_rh_dev_descriptor [18] = {
133 0x12, /* __u8 bLength; */ 154 0x12, /* __u8 bLength; */
@@ -273,6 +294,47 @@ static const u8 hs_rh_config_descriptor [] = {
273 0x0c /* __u8 ep_bInterval; (256ms -- usb 2.0 spec) */ 294 0x0c /* __u8 ep_bInterval; (256ms -- usb 2.0 spec) */
274}; 295};
275 296
297static const u8 ss_rh_config_descriptor[] = {
298 /* one configuration */
299 0x09, /* __u8 bLength; */
300 0x02, /* __u8 bDescriptorType; Configuration */
301 0x19, 0x00, /* __le16 wTotalLength; FIXME */
302 0x01, /* __u8 bNumInterfaces; (1) */
303 0x01, /* __u8 bConfigurationValue; */
304 0x00, /* __u8 iConfiguration; */
305 0xc0, /* __u8 bmAttributes;
306 Bit 7: must be set,
307 6: Self-powered,
308 5: Remote wakeup,
309 4..0: resvd */
310 0x00, /* __u8 MaxPower; */
311
312 /* one interface */
313 0x09, /* __u8 if_bLength; */
314 0x04, /* __u8 if_bDescriptorType; Interface */
315 0x00, /* __u8 if_bInterfaceNumber; */
316 0x00, /* __u8 if_bAlternateSetting; */
317 0x01, /* __u8 if_bNumEndpoints; */
318 0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */
319 0x00, /* __u8 if_bInterfaceSubClass; */
320 0x00, /* __u8 if_bInterfaceProtocol; */
321 0x00, /* __u8 if_iInterface; */
322
323 /* one endpoint (status change endpoint) */
324 0x07, /* __u8 ep_bLength; */
325 0x05, /* __u8 ep_bDescriptorType; Endpoint */
326 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
327 0x03, /* __u8 ep_bmAttributes; Interrupt */
328 /* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8)
329 * see hub.c:hub_configure() for details. */
330 (USB_MAXCHILDREN + 1 + 7) / 8, 0x00,
331 0x0c /* __u8 ep_bInterval; (256ms -- usb 2.0 spec) */
332 /*
333 * All 3.0 hubs should have an endpoint companion descriptor,
334 * but we're ignoring that for now. FIXME?
335 */
336};
337
276/*-------------------------------------------------------------------------*/ 338/*-------------------------------------------------------------------------*/
277 339
278/* 340/*
@@ -426,23 +488,39 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
426 case DeviceRequest | USB_REQ_GET_DESCRIPTOR: 488 case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
427 switch (wValue & 0xff00) { 489 switch (wValue & 0xff00) {
428 case USB_DT_DEVICE << 8: 490 case USB_DT_DEVICE << 8:
429 if (hcd->driver->flags & HCD_USB2) 491 switch (hcd->driver->flags & HCD_MASK) {
492 case HCD_USB3:
493 bufp = usb3_rh_dev_descriptor;
494 break;
495 case HCD_USB2:
430 bufp = usb2_rh_dev_descriptor; 496 bufp = usb2_rh_dev_descriptor;
431 else if (hcd->driver->flags & HCD_USB11) 497 break;
498 case HCD_USB11:
432 bufp = usb11_rh_dev_descriptor; 499 bufp = usb11_rh_dev_descriptor;
433 else 500 break;
501 default:
434 goto error; 502 goto error;
503 }
435 len = 18; 504 len = 18;
436 if (hcd->has_tt) 505 if (hcd->has_tt)
437 patch_protocol = 1; 506 patch_protocol = 1;
438 break; 507 break;
439 case USB_DT_CONFIG << 8: 508 case USB_DT_CONFIG << 8:
440 if (hcd->driver->flags & HCD_USB2) { 509 switch (hcd->driver->flags & HCD_MASK) {
510 case HCD_USB3:
511 bufp = ss_rh_config_descriptor;
512 len = sizeof ss_rh_config_descriptor;
513 break;
514 case HCD_USB2:
441 bufp = hs_rh_config_descriptor; 515 bufp = hs_rh_config_descriptor;
442 len = sizeof hs_rh_config_descriptor; 516 len = sizeof hs_rh_config_descriptor;
443 } else { 517 break;
518 case HCD_USB11:
444 bufp = fs_rh_config_descriptor; 519 bufp = fs_rh_config_descriptor;
445 len = sizeof fs_rh_config_descriptor; 520 len = sizeof fs_rh_config_descriptor;
521 break;
522 default:
523 goto error;
446 } 524 }
447 if (device_can_wakeup(&hcd->self.root_hub->dev)) 525 if (device_can_wakeup(&hcd->self.root_hub->dev))
448 patch_wakeup = 1; 526 patch_wakeup = 1;
@@ -755,23 +833,6 @@ static struct attribute_group usb_bus_attr_group = {
755 833
756/*-------------------------------------------------------------------------*/ 834/*-------------------------------------------------------------------------*/
757 835
758static struct class *usb_host_class;
759
760int usb_host_init(void)
761{
762 int retval = 0;
763
764 usb_host_class = class_create(THIS_MODULE, "usb_host");
765 if (IS_ERR(usb_host_class))
766 retval = PTR_ERR(usb_host_class);
767 return retval;
768}
769
770void usb_host_cleanup(void)
771{
772 class_destroy(usb_host_class);
773}
774
775/** 836/**
776 * usb_bus_init - shared initialization code 837 * usb_bus_init - shared initialization code
777 * @bus: the bus structure being initialized 838 * @bus: the bus structure being initialized
@@ -818,12 +879,6 @@ static int usb_register_bus(struct usb_bus *bus)
818 set_bit (busnum, busmap.busmap); 879 set_bit (busnum, busmap.busmap);
819 bus->busnum = busnum; 880 bus->busnum = busnum;
820 881
821 bus->dev = device_create(usb_host_class, bus->controller, MKDEV(0, 0),
822 bus, "usb_host%d", busnum);
823 result = PTR_ERR(bus->dev);
824 if (IS_ERR(bus->dev))
825 goto error_create_class_dev;
826
827 /* Add it to the local list of buses */ 882 /* Add it to the local list of buses */
828 list_add (&bus->bus_list, &usb_bus_list); 883 list_add (&bus->bus_list, &usb_bus_list);
829 mutex_unlock(&usb_bus_list_lock); 884 mutex_unlock(&usb_bus_list_lock);
@@ -834,8 +889,6 @@ static int usb_register_bus(struct usb_bus *bus)
834 "number %d\n", bus->busnum); 889 "number %d\n", bus->busnum);
835 return 0; 890 return 0;
836 891
837error_create_class_dev:
838 clear_bit(busnum, busmap.busmap);
839error_find_busnum: 892error_find_busnum:
840 mutex_unlock(&usb_bus_list_lock); 893 mutex_unlock(&usb_bus_list_lock);
841 return result; 894 return result;
@@ -865,8 +918,6 @@ static void usb_deregister_bus (struct usb_bus *bus)
865 usb_notify_remove_bus(bus); 918 usb_notify_remove_bus(bus);
866 919
867 clear_bit (bus->busnum, busmap.busmap); 920 clear_bit (bus->busnum, busmap.busmap);
868
869 device_unregister(bus->dev);
870} 921}
871 922
872/** 923/**
@@ -1199,7 +1250,8 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1199 1250
1200 /* Map the URB's buffers for DMA access. 1251 /* Map the URB's buffers for DMA access.
1201 * Lower level HCD code should use *_dma exclusively, 1252 * Lower level HCD code should use *_dma exclusively,
1202 * unless it uses pio or talks to another transport. 1253 * unless it uses pio or talks to another transport,
1254 * or uses the provided scatter gather list for bulk.
1203 */ 1255 */
1204 if (is_root_hub(urb->dev)) 1256 if (is_root_hub(urb->dev))
1205 return 0; 1257 return 0;
@@ -1520,6 +1572,92 @@ rescan:
1520 } 1572 }
1521} 1573}
1522 1574
1575/* Check whether a new configuration or alt setting for an interface
1576 * will exceed the bandwidth for the bus (or the host controller resources).
1577 * Only pass in a non-NULL config or interface, not both!
1578 * Passing NULL for both new_config and new_intf means the device will be
1579 * de-configured by issuing a set configuration 0 command.
1580 */
1581int usb_hcd_check_bandwidth(struct usb_device *udev,
1582 struct usb_host_config *new_config,
1583 struct usb_interface *new_intf)
1584{
1585 int num_intfs, i, j;
1586 struct usb_interface_cache *intf_cache;
1587 struct usb_host_interface *alt = 0;
1588 int ret = 0;
1589 struct usb_hcd *hcd;
1590 struct usb_host_endpoint *ep;
1591
1592 hcd = bus_to_hcd(udev->bus);
1593 if (!hcd->driver->check_bandwidth)
1594 return 0;
1595
1596 /* Configuration is being removed - set configuration 0 */
1597 if (!new_config && !new_intf) {
1598 for (i = 1; i < 16; ++i) {
1599 ep = udev->ep_out[i];
1600 if (ep)
1601 hcd->driver->drop_endpoint(hcd, udev, ep);
1602 ep = udev->ep_in[i];
1603 if (ep)
1604 hcd->driver->drop_endpoint(hcd, udev, ep);
1605 }
1606 hcd->driver->check_bandwidth(hcd, udev);
1607 return 0;
1608 }
1609 /* Check if the HCD says there's enough bandwidth. Enable all endpoints
1610 * each interface's alt setting 0 and ask the HCD to check the bandwidth
1611 * of the bus. There will always be bandwidth for endpoint 0, so it's
1612 * ok to exclude it.
1613 */
1614 if (new_config) {
1615 num_intfs = new_config->desc.bNumInterfaces;
1616 /* Remove endpoints (except endpoint 0, which is always on the
1617 * schedule) from the old config from the schedule
1618 */
1619 for (i = 1; i < 16; ++i) {
1620 ep = udev->ep_out[i];
1621 if (ep) {
1622 ret = hcd->driver->drop_endpoint(hcd, udev, ep);
1623 if (ret < 0)
1624 goto reset;
1625 }
1626 ep = udev->ep_in[i];
1627 if (ep) {
1628 ret = hcd->driver->drop_endpoint(hcd, udev, ep);
1629 if (ret < 0)
1630 goto reset;
1631 }
1632 }
1633 for (i = 0; i < num_intfs; ++i) {
1634
1635 /* Dig the endpoints for alt setting 0 out of the
1636 * interface cache for this interface
1637 */
1638 intf_cache = new_config->intf_cache[i];
1639 for (j = 0; j < intf_cache->num_altsetting; j++) {
1640 if (intf_cache->altsetting[j].desc.bAlternateSetting == 0)
1641 alt = &intf_cache->altsetting[j];
1642 }
1643 if (!alt) {
1644 printk(KERN_DEBUG "Did not find alt setting 0 for intf %d\n", i);
1645 continue;
1646 }
1647 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
1648 ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]);
1649 if (ret < 0)
1650 goto reset;
1651 }
1652 }
1653 }
1654 ret = hcd->driver->check_bandwidth(hcd, udev);
1655reset:
1656 if (ret < 0)
1657 hcd->driver->reset_bandwidth(hcd, udev);
1658 return ret;
1659}
1660
1523/* Disables the endpoint: synchronizes with the hcd to make sure all 1661/* Disables the endpoint: synchronizes with the hcd to make sure all
1524 * endpoint state is gone from hardware. usb_hcd_flush_endpoint() must 1662 * endpoint state is gone from hardware. usb_hcd_flush_endpoint() must
1525 * have been called previously. Use for set_configuration, set_interface, 1663 * have been called previously. Use for set_configuration, set_interface,
@@ -1897,8 +2035,20 @@ int usb_add_hcd(struct usb_hcd *hcd,
1897 retval = -ENOMEM; 2035 retval = -ENOMEM;
1898 goto err_allocate_root_hub; 2036 goto err_allocate_root_hub;
1899 } 2037 }
1900 rhdev->speed = (hcd->driver->flags & HCD_USB2) ? USB_SPEED_HIGH : 2038
1901 USB_SPEED_FULL; 2039 switch (hcd->driver->flags & HCD_MASK) {
2040 case HCD_USB11:
2041 rhdev->speed = USB_SPEED_FULL;
2042 break;
2043 case HCD_USB2:
2044 rhdev->speed = USB_SPEED_HIGH;
2045 break;
2046 case HCD_USB3:
2047 rhdev->speed = USB_SPEED_SUPER;
2048 break;
2049 default:
2050 goto err_allocate_root_hub;
2051 }
1902 hcd->self.root_hub = rhdev; 2052 hcd->self.root_hub = rhdev;
1903 2053
1904 /* wakeup flag init defaults to "everything works" for root hubs, 2054 /* wakeup flag init defaults to "everything works" for root hubs,
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index e7d4479de41c..d397ecfd5b17 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -173,6 +173,8 @@ struct hc_driver {
173#define HCD_LOCAL_MEM 0x0002 /* HC needs local memory */ 173#define HCD_LOCAL_MEM 0x0002 /* HC needs local memory */
174#define HCD_USB11 0x0010 /* USB 1.1 */ 174#define HCD_USB11 0x0010 /* USB 1.1 */
175#define HCD_USB2 0x0020 /* USB 2.0 */ 175#define HCD_USB2 0x0020 /* USB 2.0 */
176#define HCD_USB3 0x0040 /* USB 3.0 */
177#define HCD_MASK 0x0070
176 178
177 /* called to init HCD and root hub */ 179 /* called to init HCD and root hub */
178 int (*reset) (struct usb_hcd *hcd); 180 int (*reset) (struct usb_hcd *hcd);
@@ -182,10 +184,10 @@ struct hc_driver {
182 * a whole, not just the root hub; they're for PCI bus glue. 184 * a whole, not just the root hub; they're for PCI bus glue.
183 */ 185 */
184 /* called after suspending the hub, before entering D3 etc */ 186 /* called after suspending the hub, before entering D3 etc */
185 int (*pci_suspend) (struct usb_hcd *hcd, pm_message_t message); 187 int (*pci_suspend)(struct usb_hcd *hcd);
186 188
187 /* called after entering D0 (etc), before resuming the hub */ 189 /* called after entering D0 (etc), before resuming the hub */
188 int (*pci_resume) (struct usb_hcd *hcd); 190 int (*pci_resume)(struct usb_hcd *hcd, bool hibernated);
189 191
190 /* cleanly make HCD stop writing memory and doing I/O */ 192 /* cleanly make HCD stop writing memory and doing I/O */
191 void (*stop) (struct usb_hcd *hcd); 193 void (*stop) (struct usb_hcd *hcd);
@@ -224,6 +226,43 @@ struct hc_driver {
224 void (*relinquish_port)(struct usb_hcd *, int); 226 void (*relinquish_port)(struct usb_hcd *, int);
225 /* has a port been handed over to a companion? */ 227 /* has a port been handed over to a companion? */
226 int (*port_handed_over)(struct usb_hcd *, int); 228 int (*port_handed_over)(struct usb_hcd *, int);
229
230 /* xHCI specific functions */
231 /* Called by usb_alloc_dev to alloc HC device structures */
232 int (*alloc_dev)(struct usb_hcd *, struct usb_device *);
233 /* Called by usb_release_dev to free HC device structures */
234 void (*free_dev)(struct usb_hcd *, struct usb_device *);
235
236 /* Bandwidth computation functions */
237 /* Note that add_endpoint() can only be called once per endpoint before
238 * check_bandwidth() or reset_bandwidth() must be called.
239 * drop_endpoint() can only be called once per endpoint also.
240 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
241 * add the endpoint to the schedule with possibly new parameters denoted by a
242 * different endpoint descriptor in usb_host_endpoint.
243 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
244 * not allowed.
245 */
246 /* Allocate endpoint resources and add them to a new schedule */
247 int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *);
248 /* Drop an endpoint from a new schedule */
249 int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *);
250 /* Check that a new hardware configuration, set using
251 * endpoint_enable and endpoint_disable, does not exceed bus
252 * bandwidth. This must be called before any set configuration
253 * or set interface requests are sent to the device.
254 */
255 int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
256 /* Reset the device schedule to the last known good schedule,
257 * which was set from a previous successful call to
258 * check_bandwidth(). This reverts any add_endpoint() and
259 * drop_endpoint() calls since that last successful call.
260 * Used for when a check_bandwidth() call fails due to resource
261 * or bandwidth constraints.
262 */
263 void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
264 /* Returns the hardware-chosen device address */
265 int (*address_device)(struct usb_hcd *, struct usb_device *udev);
227}; 266};
228 267
229extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); 268extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
@@ -242,6 +281,9 @@ extern void usb_hcd_disable_endpoint(struct usb_device *udev,
242extern void usb_hcd_reset_endpoint(struct usb_device *udev, 281extern void usb_hcd_reset_endpoint(struct usb_device *udev,
243 struct usb_host_endpoint *ep); 282 struct usb_host_endpoint *ep);
244extern void usb_hcd_synchronize_unlinks(struct usb_device *udev); 283extern void usb_hcd_synchronize_unlinks(struct usb_device *udev);
284extern int usb_hcd_check_bandwidth(struct usb_device *udev,
285 struct usb_host_config *new_config,
286 struct usb_interface *new_intf);
245extern int usb_hcd_get_frame_number(struct usb_device *udev); 287extern int usb_hcd_get_frame_number(struct usb_device *udev);
246 288
247extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver, 289extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
@@ -261,14 +303,11 @@ struct pci_device_id;
261extern int usb_hcd_pci_probe(struct pci_dev *dev, 303extern int usb_hcd_pci_probe(struct pci_dev *dev,
262 const struct pci_device_id *id); 304 const struct pci_device_id *id);
263extern void usb_hcd_pci_remove(struct pci_dev *dev); 305extern void usb_hcd_pci_remove(struct pci_dev *dev);
264
265#ifdef CONFIG_PM
266extern int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t msg);
267extern int usb_hcd_pci_resume(struct pci_dev *dev);
268#endif /* CONFIG_PM */
269
270extern void usb_hcd_pci_shutdown(struct pci_dev *dev); 306extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
271 307
308#ifdef CONFIG_PM_SLEEP
309extern struct dev_pm_ops usb_hcd_pci_pm_ops;
310#endif
272#endif /* CONFIG_PCI */ 311#endif /* CONFIG_PCI */
273 312
274/* pci-ish (pdev null is ok) buffer alloc/mapping support */ 313/* pci-ish (pdev null is ok) buffer alloc/mapping support */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index be86ae3f4088..2af3b4f06054 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -155,6 +155,8 @@ static inline char *portspeed(int portstatus)
155 return "480 Mb/s"; 155 return "480 Mb/s";
156 else if (portstatus & (1 << USB_PORT_FEAT_LOWSPEED)) 156 else if (portstatus & (1 << USB_PORT_FEAT_LOWSPEED))
157 return "1.5 Mb/s"; 157 return "1.5 Mb/s";
158 else if (portstatus & (1 << USB_PORT_FEAT_SUPERSPEED))
159 return "5.0 Gb/s";
158 else 160 else
159 return "12 Mb/s"; 161 return "12 Mb/s";
160} 162}
@@ -457,13 +459,13 @@ static void hub_tt_kevent (struct work_struct *work)
457 459
458 spin_lock_irqsave (&hub->tt.lock, flags); 460 spin_lock_irqsave (&hub->tt.lock, flags);
459 while (--limit && !list_empty (&hub->tt.clear_list)) { 461 while (--limit && !list_empty (&hub->tt.clear_list)) {
460 struct list_head *temp; 462 struct list_head *next;
461 struct usb_tt_clear *clear; 463 struct usb_tt_clear *clear;
462 struct usb_device *hdev = hub->hdev; 464 struct usb_device *hdev = hub->hdev;
463 int status; 465 int status;
464 466
465 temp = hub->tt.clear_list.next; 467 next = hub->tt.clear_list.next;
466 clear = list_entry (temp, struct usb_tt_clear, clear_list); 468 clear = list_entry (next, struct usb_tt_clear, clear_list);
467 list_del (&clear->clear_list); 469 list_del (&clear->clear_list);
468 470
469 /* drop lock so HCD can concurrently report other TT errors */ 471 /* drop lock so HCD can concurrently report other TT errors */
@@ -951,6 +953,9 @@ static int hub_configure(struct usb_hub *hub,
951 ret); 953 ret);
952 hub->tt.hub = hdev; 954 hub->tt.hub = hdev;
953 break; 955 break;
956 case 3:
957 /* USB 3.0 hubs don't have a TT */
958 break;
954 default: 959 default:
955 dev_dbg(hub_dev, "Unrecognized hub protocol %d\n", 960 dev_dbg(hub_dev, "Unrecognized hub protocol %d\n",
956 hdev->descriptor.bDeviceProtocol); 961 hdev->descriptor.bDeviceProtocol);
@@ -1323,6 +1328,11 @@ EXPORT_SYMBOL_GPL(usb_set_device_state);
1323 * 0 is reserved by USB for default address; (b) Linux's USB stack 1328 * 0 is reserved by USB for default address; (b) Linux's USB stack
1324 * uses always #1 for the root hub of the controller. So USB stack's 1329 * uses always #1 for the root hub of the controller. So USB stack's
1325 * port #1, which is wusb virtual-port #0 has address #2. 1330 * port #1, which is wusb virtual-port #0 has address #2.
1331 *
1332 * Devices connected under xHCI are not as simple. The host controller
1333 * supports virtualization, so the hardware assigns device addresses and
1334 * the HCD must setup data structures before issuing a set address
1335 * command to the hardware.
1326 */ 1336 */
1327static void choose_address(struct usb_device *udev) 1337static void choose_address(struct usb_device *udev)
1328{ 1338{
@@ -1642,6 +1652,9 @@ int usb_new_device(struct usb_device *udev)
1642 err = usb_configure_device(udev); /* detect & probe dev/intfs */ 1652 err = usb_configure_device(udev); /* detect & probe dev/intfs */
1643 if (err < 0) 1653 if (err < 0)
1644 goto fail; 1654 goto fail;
1655 dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n",
1656 udev->devnum, udev->bus->busnum,
1657 (((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
1645 /* export the usbdev device-node for libusb */ 1658 /* export the usbdev device-node for libusb */
1646 udev->dev.devt = MKDEV(USB_DEVICE_MAJOR, 1659 udev->dev.devt = MKDEV(USB_DEVICE_MAJOR,
1647 (((udev->bus->busnum-1) * 128) + (udev->devnum-1))); 1660 (((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
@@ -2395,19 +2408,29 @@ EXPORT_SYMBOL_GPL(usb_ep0_reinit);
2395static int hub_set_address(struct usb_device *udev, int devnum) 2408static int hub_set_address(struct usb_device *udev, int devnum)
2396{ 2409{
2397 int retval; 2410 int retval;
2411 struct usb_hcd *hcd = bus_to_hcd(udev->bus);
2398 2412
2399 if (devnum <= 1) 2413 /*
2414 * The host controller will choose the device address,
2415 * instead of the core having chosen it earlier
2416 */
2417 if (!hcd->driver->address_device && devnum <= 1)
2400 return -EINVAL; 2418 return -EINVAL;
2401 if (udev->state == USB_STATE_ADDRESS) 2419 if (udev->state == USB_STATE_ADDRESS)
2402 return 0; 2420 return 0;
2403 if (udev->state != USB_STATE_DEFAULT) 2421 if (udev->state != USB_STATE_DEFAULT)
2404 return -EINVAL; 2422 return -EINVAL;
2405 retval = usb_control_msg(udev, usb_sndaddr0pipe(), 2423 if (hcd->driver->address_device) {
2406 USB_REQ_SET_ADDRESS, 0, devnum, 0, 2424 retval = hcd->driver->address_device(hcd, udev);
2407 NULL, 0, USB_CTRL_SET_TIMEOUT); 2425 } else {
2426 retval = usb_control_msg(udev, usb_sndaddr0pipe(),
2427 USB_REQ_SET_ADDRESS, 0, devnum, 0,
2428 NULL, 0, USB_CTRL_SET_TIMEOUT);
2429 if (retval == 0)
2430 update_address(udev, devnum);
2431 }
2408 if (retval == 0) { 2432 if (retval == 0) {
2409 /* Device now using proper address. */ 2433 /* Device now using proper address. */
2410 update_address(udev, devnum);
2411 usb_set_device_state(udev, USB_STATE_ADDRESS); 2434 usb_set_device_state(udev, USB_STATE_ADDRESS);
2412 usb_ep0_reinit(udev); 2435 usb_ep0_reinit(udev);
2413 } 2436 }
@@ -2430,6 +2453,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2430 static DEFINE_MUTEX(usb_address0_mutex); 2453 static DEFINE_MUTEX(usb_address0_mutex);
2431 2454
2432 struct usb_device *hdev = hub->hdev; 2455 struct usb_device *hdev = hub->hdev;
2456 struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
2433 int i, j, retval; 2457 int i, j, retval;
2434 unsigned delay = HUB_SHORT_RESET_TIME; 2458 unsigned delay = HUB_SHORT_RESET_TIME;
2435 enum usb_device_speed oldspeed = udev->speed; 2459 enum usb_device_speed oldspeed = udev->speed;
@@ -2452,11 +2476,24 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2452 2476
2453 mutex_lock(&usb_address0_mutex); 2477 mutex_lock(&usb_address0_mutex);
2454 2478
2455 /* Reset the device; full speed may morph to high speed */ 2479 if ((hcd->driver->flags & HCD_USB3) && udev->config) {
2456 retval = hub_port_reset(hub, port1, udev, delay); 2480 /* FIXME this will need special handling by the xHCI driver. */
2457 if (retval < 0) /* error or disconnect */ 2481 dev_dbg(&udev->dev,
2482 "xHCI reset of configured device "
2483 "not supported yet.\n");
2484 retval = -EINVAL;
2458 goto fail; 2485 goto fail;
2459 /* success, speed is known */ 2486 } else if (!udev->config && oldspeed == USB_SPEED_SUPER) {
2487 /* Don't reset USB 3.0 devices during an initial setup */
2488 usb_set_device_state(udev, USB_STATE_DEFAULT);
2489 } else {
2490 /* Reset the device; full speed may morph to high speed */
2491 /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
2492 retval = hub_port_reset(hub, port1, udev, delay);
2493 if (retval < 0) /* error or disconnect */
2494 goto fail;
2495 /* success, speed is known */
2496 }
2460 retval = -ENODEV; 2497 retval = -ENODEV;
2461 2498
2462 if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { 2499 if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
@@ -2471,6 +2508,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2471 * reported as 0xff in the device descriptor). WUSB1.0[4.8.1]. 2508 * reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
2472 */ 2509 */
2473 switch (udev->speed) { 2510 switch (udev->speed) {
2511 case USB_SPEED_SUPER:
2474 case USB_SPEED_VARIABLE: /* fixed at 512 */ 2512 case USB_SPEED_VARIABLE: /* fixed at 512 */
2475 udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512); 2513 udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
2476 break; 2514 break;
@@ -2496,16 +2534,20 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2496 case USB_SPEED_LOW: speed = "low"; break; 2534 case USB_SPEED_LOW: speed = "low"; break;
2497 case USB_SPEED_FULL: speed = "full"; break; 2535 case USB_SPEED_FULL: speed = "full"; break;
2498 case USB_SPEED_HIGH: speed = "high"; break; 2536 case USB_SPEED_HIGH: speed = "high"; break;
2537 case USB_SPEED_SUPER:
2538 speed = "super";
2539 break;
2499 case USB_SPEED_VARIABLE: 2540 case USB_SPEED_VARIABLE:
2500 speed = "variable"; 2541 speed = "variable";
2501 type = "Wireless "; 2542 type = "Wireless ";
2502 break; 2543 break;
2503 default: speed = "?"; break; 2544 default: speed = "?"; break;
2504 } 2545 }
2505 dev_info (&udev->dev, 2546 if (udev->speed != USB_SPEED_SUPER)
2506 "%s %s speed %sUSB device using %s and address %d\n", 2547 dev_info(&udev->dev,
2507 (udev->config) ? "reset" : "new", speed, type, 2548 "%s %s speed %sUSB device using %s and address %d\n",
2508 udev->bus->controller->driver->name, devnum); 2549 (udev->config) ? "reset" : "new", speed, type,
2550 udev->bus->controller->driver->name, devnum);
2509 2551
2510 /* Set up TT records, if needed */ 2552 /* Set up TT records, if needed */
2511 if (hdev->tt) { 2553 if (hdev->tt) {
@@ -2530,7 +2572,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2530 * value. 2572 * value.
2531 */ 2573 */
2532 for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) { 2574 for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
2533 if (USE_NEW_SCHEME(retry_counter)) { 2575 /*
2576 * An xHCI controller cannot send any packets to a device until
2577 * a set address command successfully completes.
2578 */
2579 if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3)) {
2534 struct usb_device_descriptor *buf; 2580 struct usb_device_descriptor *buf;
2535 int r = 0; 2581 int r = 0;
2536 2582
@@ -2596,7 +2642,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2596 * unauthorized address in the Connect Ack sequence; 2642 * unauthorized address in the Connect Ack sequence;
2597 * authorization will assign the final address. 2643 * authorization will assign the final address.
2598 */ 2644 */
2599 if (udev->wusb == 0) { 2645 if (udev->wusb == 0) {
2600 for (j = 0; j < SET_ADDRESS_TRIES; ++j) { 2646 for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
2601 retval = hub_set_address(udev, devnum); 2647 retval = hub_set_address(udev, devnum);
2602 if (retval >= 0) 2648 if (retval >= 0)
@@ -2609,13 +2655,20 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2609 devnum, retval); 2655 devnum, retval);
2610 goto fail; 2656 goto fail;
2611 } 2657 }
2658 if (udev->speed == USB_SPEED_SUPER) {
2659 devnum = udev->devnum;
2660 dev_info(&udev->dev,
2661 "%s SuperSpeed USB device using %s and address %d\n",
2662 (udev->config) ? "reset" : "new",
2663 udev->bus->controller->driver->name, devnum);
2664 }
2612 2665
2613 /* cope with hardware quirkiness: 2666 /* cope with hardware quirkiness:
2614 * - let SET_ADDRESS settle, some device hardware wants it 2667 * - let SET_ADDRESS settle, some device hardware wants it
2615 * - read ep0 maxpacket even for high and low speed, 2668 * - read ep0 maxpacket even for high and low speed,
2616 */ 2669 */
2617 msleep(10); 2670 msleep(10);
2618 if (USE_NEW_SCHEME(retry_counter)) 2671 if (USE_NEW_SCHEME(retry_counter) && !(hcd->driver->flags & HCD_USB3))
2619 break; 2672 break;
2620 } 2673 }
2621 2674
@@ -2634,8 +2687,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2634 if (retval) 2687 if (retval)
2635 goto fail; 2688 goto fail;
2636 2689
2637 i = udev->descriptor.bMaxPacketSize0 == 0xff? /* wusb device? */ 2690 if (udev->descriptor.bMaxPacketSize0 == 0xff ||
2638 512 : udev->descriptor.bMaxPacketSize0; 2691 udev->speed == USB_SPEED_SUPER)
2692 i = 512;
2693 else
2694 i = udev->descriptor.bMaxPacketSize0;
2639 if (le16_to_cpu(udev->ep0.desc.wMaxPacketSize) != i) { 2695 if (le16_to_cpu(udev->ep0.desc.wMaxPacketSize) != i) {
2640 if (udev->speed != USB_SPEED_FULL || 2696 if (udev->speed != USB_SPEED_FULL ||
2641 !(i == 8 || i == 16 || i == 32 || i == 64)) { 2697 !(i == 8 || i == 16 || i == 32 || i == 64)) {
@@ -2847,19 +2903,41 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
2847 } 2903 }
2848 2904
2849 usb_set_device_state(udev, USB_STATE_POWERED); 2905 usb_set_device_state(udev, USB_STATE_POWERED);
2850 udev->speed = USB_SPEED_UNKNOWN;
2851 udev->bus_mA = hub->mA_per_port; 2906 udev->bus_mA = hub->mA_per_port;
2852 udev->level = hdev->level + 1; 2907 udev->level = hdev->level + 1;
2853 udev->wusb = hub_is_wusb(hub); 2908 udev->wusb = hub_is_wusb(hub);
2854 2909
2855 /* set the address */ 2910 /*
2856 choose_address(udev); 2911 * USB 3.0 devices are reset automatically before the connect
2857 if (udev->devnum <= 0) { 2912 * port status change appears, and the root hub port status
2858 status = -ENOTCONN; /* Don't retry */ 2913 * shows the correct speed. We also get port change
2859 goto loop; 2914 * notifications for USB 3.0 devices from the USB 3.0 portion of
2915 * an external USB 3.0 hub, but this isn't handled correctly yet
2916 * FIXME.
2917 */
2918
2919 if (!(hcd->driver->flags & HCD_USB3))
2920 udev->speed = USB_SPEED_UNKNOWN;
2921 else if ((hdev->parent == NULL) &&
2922 (portstatus & (1 << USB_PORT_FEAT_SUPERSPEED)))
2923 udev->speed = USB_SPEED_SUPER;
2924 else
2925 udev->speed = USB_SPEED_UNKNOWN;
2926
2927 /*
2928 * xHCI needs to issue an address device command later
2929 * in the hub_port_init sequence for SS/HS/FS/LS devices.
2930 */
2931 if (!(hcd->driver->flags & HCD_USB3)) {
2932 /* set the address */
2933 choose_address(udev);
2934 if (udev->devnum <= 0) {
2935 status = -ENOTCONN; /* Don't retry */
2936 goto loop;
2937 }
2860 } 2938 }
2861 2939
2862 /* reset and get descriptor */ 2940 /* reset (non-USB 3.0 devices) and get descriptor */
2863 status = hub_port_init(hub, udev, port1, i); 2941 status = hub_port_init(hub, udev, port1, i);
2864 if (status < 0) 2942 if (status < 0)
2865 goto loop; 2943 goto loop;
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 2a116ce53c9b..889c0f32a40b 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -47,7 +47,10 @@
47#define USB_PORT_FEAT_L1 5 /* L1 suspend */ 47#define USB_PORT_FEAT_L1 5 /* L1 suspend */
48#define USB_PORT_FEAT_POWER 8 48#define USB_PORT_FEAT_POWER 8
49#define USB_PORT_FEAT_LOWSPEED 9 49#define USB_PORT_FEAT_LOWSPEED 9
50/* This value was never in Table 11-17 */
50#define USB_PORT_FEAT_HIGHSPEED 10 51#define USB_PORT_FEAT_HIGHSPEED 10
52/* This value is also fake */
53#define USB_PORT_FEAT_SUPERSPEED 11
51#define USB_PORT_FEAT_C_CONNECTION 16 54#define USB_PORT_FEAT_C_CONNECTION 16
52#define USB_PORT_FEAT_C_ENABLE 17 55#define USB_PORT_FEAT_C_ENABLE 17
53#define USB_PORT_FEAT_C_SUSPEND 18 56#define USB_PORT_FEAT_C_SUSPEND 18
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index b62628377654..2bed83caacb1 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -10,6 +10,7 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/timer.h> 11#include <linux/timer.h>
12#include <linux/ctype.h> 12#include <linux/ctype.h>
13#include <linux/nls.h>
13#include <linux/device.h> 14#include <linux/device.h>
14#include <linux/scatterlist.h> 15#include <linux/scatterlist.h>
15#include <linux/usb/quirks.h> 16#include <linux/usb/quirks.h>
@@ -364,6 +365,7 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
364 int i; 365 int i;
365 int urb_flags; 366 int urb_flags;
366 int dma; 367 int dma;
368 int use_sg;
367 369
368 if (!io || !dev || !sg 370 if (!io || !dev || !sg
369 || usb_pipecontrol(pipe) 371 || usb_pipecontrol(pipe)
@@ -391,7 +393,19 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
391 if (io->entries <= 0) 393 if (io->entries <= 0)
392 return io->entries; 394 return io->entries;
393 395
394 io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags); 396 /* If we're running on an xHCI host controller, queue the whole scatter
397 * gather list with one call to urb_enqueue(). This is only for bulk,
398 * as that endpoint type does not care how the data gets broken up
399 * across frames.
400 */
401 if (usb_pipebulk(pipe) &&
402 bus_to_hcd(dev->bus)->driver->flags & HCD_USB3) {
403 io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
404 use_sg = true;
405 } else {
406 io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
407 use_sg = false;
408 }
395 if (!io->urbs) 409 if (!io->urbs)
396 goto nomem; 410 goto nomem;
397 411
@@ -401,62 +415,92 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
401 if (usb_pipein(pipe)) 415 if (usb_pipein(pipe))
402 urb_flags |= URB_SHORT_NOT_OK; 416 urb_flags |= URB_SHORT_NOT_OK;
403 417
404 for_each_sg(sg, sg, io->entries, i) { 418 if (use_sg) {
405 unsigned len; 419 io->urbs[0] = usb_alloc_urb(0, mem_flags);
406 420 if (!io->urbs[0]) {
407 io->urbs[i] = usb_alloc_urb(0, mem_flags); 421 io->entries = 0;
408 if (!io->urbs[i]) {
409 io->entries = i;
410 goto nomem; 422 goto nomem;
411 } 423 }
412 424
413 io->urbs[i]->dev = NULL; 425 io->urbs[0]->dev = NULL;
414 io->urbs[i]->pipe = pipe; 426 io->urbs[0]->pipe = pipe;
415 io->urbs[i]->interval = period; 427 io->urbs[0]->interval = period;
416 io->urbs[i]->transfer_flags = urb_flags; 428 io->urbs[0]->transfer_flags = urb_flags;
417 429
418 io->urbs[i]->complete = sg_complete; 430 io->urbs[0]->complete = sg_complete;
419 io->urbs[i]->context = io; 431 io->urbs[0]->context = io;
420 432 /* A length of zero means transfer the whole sg list */
421 /* 433 io->urbs[0]->transfer_buffer_length = length;
422 * Some systems need to revert to PIO when DMA is temporarily 434 if (length == 0) {
423 * unavailable. For their sakes, both transfer_buffer and 435 for_each_sg(sg, sg, io->entries, i) {
424 * transfer_dma are set when possible. However this can only 436 io->urbs[0]->transfer_buffer_length +=
425 * work on systems without: 437 sg_dma_len(sg);
426 * 438 }
427 * - HIGHMEM, since DMA buffers located in high memory are 439 }
428 * not directly addressable by the CPU for PIO; 440 io->urbs[0]->sg = io;
429 * 441 io->urbs[0]->num_sgs = io->entries;
430 * - IOMMU, since dma_map_sg() is allowed to use an IOMMU to 442 io->entries = 1;
431 * make virtually discontiguous buffers be "dma-contiguous" 443 } else {
432 * so that PIO and DMA need diferent numbers of URBs. 444 for_each_sg(sg, sg, io->entries, i) {
433 * 445 unsigned len;
434 * So when HIGHMEM or IOMMU are in use, transfer_buffer is NULL 446
435 * to prevent stale pointers and to help spot bugs. 447 io->urbs[i] = usb_alloc_urb(0, mem_flags);
436 */ 448 if (!io->urbs[i]) {
437 if (dma) { 449 io->entries = i;
438 io->urbs[i]->transfer_dma = sg_dma_address(sg); 450 goto nomem;
439 len = sg_dma_len(sg); 451 }
452
453 io->urbs[i]->dev = NULL;
454 io->urbs[i]->pipe = pipe;
455 io->urbs[i]->interval = period;
456 io->urbs[i]->transfer_flags = urb_flags;
457
458 io->urbs[i]->complete = sg_complete;
459 io->urbs[i]->context = io;
460
461 /*
462 * Some systems need to revert to PIO when DMA is
463 * temporarily unavailable. For their sakes, both
464 * transfer_buffer and transfer_dma are set when
465 * possible. However this can only work on systems
466 * without:
467 *
468 * - HIGHMEM, since DMA buffers located in high memory
469 * are not directly addressable by the CPU for PIO;
470 *
471 * - IOMMU, since dma_map_sg() is allowed to use an
472 * IOMMU to make virtually discontiguous buffers be
473 * "dma-contiguous" so that PIO and DMA need diferent
474 * numbers of URBs.
475 *
476 * So when HIGHMEM or IOMMU are in use, transfer_buffer
477 * is NULL to prevent stale pointers and to help spot
478 * bugs.
479 */
480 if (dma) {
481 io->urbs[i]->transfer_dma = sg_dma_address(sg);
482 len = sg_dma_len(sg);
440#if defined(CONFIG_HIGHMEM) || defined(CONFIG_GART_IOMMU) 483#if defined(CONFIG_HIGHMEM) || defined(CONFIG_GART_IOMMU)
441 io->urbs[i]->transfer_buffer = NULL; 484 io->urbs[i]->transfer_buffer = NULL;
442#else 485#else
443 io->urbs[i]->transfer_buffer = sg_virt(sg); 486 io->urbs[i]->transfer_buffer = sg_virt(sg);
444#endif 487#endif
445 } else { 488 } else {
446 /* hc may use _only_ transfer_buffer */ 489 /* hc may use _only_ transfer_buffer */
447 io->urbs[i]->transfer_buffer = sg_virt(sg); 490 io->urbs[i]->transfer_buffer = sg_virt(sg);
448 len = sg->length; 491 len = sg->length;
449 } 492 }
450 493
451 if (length) { 494 if (length) {
452 len = min_t(unsigned, len, length); 495 len = min_t(unsigned, len, length);
453 length -= len; 496 length -= len;
454 if (length == 0) 497 if (length == 0)
455 io->entries = i + 1; 498 io->entries = i + 1;
499 }
500 io->urbs[i]->transfer_buffer_length = len;
456 } 501 }
457 io->urbs[i]->transfer_buffer_length = len; 502 io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
458 } 503 }
459 io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
460 504
461 /* transaction state */ 505 /* transaction state */
462 io->count = io->entries; 506 io->count = io->entries;
@@ -509,6 +553,10 @@ EXPORT_SYMBOL_GPL(usb_sg_init);
509 * could be transferred. That capability is less useful for low or full 553 * could be transferred. That capability is less useful for low or full
510 * speed interrupt endpoints, which allow at most one packet per millisecond, 554 * speed interrupt endpoints, which allow at most one packet per millisecond,
511 * of at most 8 or 64 bytes (respectively). 555 * of at most 8 or 64 bytes (respectively).
556 *
557 * It is not necessary to call this function to reserve bandwidth for devices
558 * under an xHCI host controller, as the bandwidth is reserved when the
559 * configuration or interface alt setting is selected.
512 */ 560 */
513void usb_sg_wait(struct usb_sg_request *io) 561void usb_sg_wait(struct usb_sg_request *io)
514{ 562{
@@ -759,7 +807,7 @@ static int usb_string_sub(struct usb_device *dev, unsigned int langid,
759} 807}
760 808
761/** 809/**
762 * usb_string - returns ISO 8859-1 version of a string descriptor 810 * usb_string - returns UTF-8 version of a string descriptor
763 * @dev: the device whose string descriptor is being retrieved 811 * @dev: the device whose string descriptor is being retrieved
764 * @index: the number of the descriptor 812 * @index: the number of the descriptor
765 * @buf: where to put the string 813 * @buf: where to put the string
@@ -767,17 +815,10 @@ static int usb_string_sub(struct usb_device *dev, unsigned int langid,
767 * Context: !in_interrupt () 815 * Context: !in_interrupt ()
768 * 816 *
769 * This converts the UTF-16LE encoded strings returned by devices, from 817 * This converts the UTF-16LE encoded strings returned by devices, from
770 * usb_get_string_descriptor(), to null-terminated ISO-8859-1 encoded ones 818 * usb_get_string_descriptor(), to null-terminated UTF-8 encoded ones
771 * that are more usable in most kernel contexts. Note that all characters 819 * that are more usable in most kernel contexts. Note that this function
772 * in the chosen descriptor that can't be encoded using ISO-8859-1
773 * are converted to the question mark ("?") character, and this function
774 * chooses strings in the first language supported by the device. 820 * chooses strings in the first language supported by the device.
775 * 821 *
776 * The ASCII (or, redundantly, "US-ASCII") character set is the seven-bit
777 * subset of ISO 8859-1. ISO-8859-1 is the eight-bit subset of Unicode,
778 * and is appropriate for use many uses of English and several other
779 * Western European languages. (But it doesn't include the "Euro" symbol.)
780 *
781 * This call is synchronous, and may not be used in an interrupt context. 822 * This call is synchronous, and may not be used in an interrupt context.
782 * 823 *
783 * Returns length of the string (>= 0) or usb_control_msg status (< 0). 824 * Returns length of the string (>= 0) or usb_control_msg status (< 0).
@@ -786,7 +827,6 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
786{ 827{
787 unsigned char *tbuf; 828 unsigned char *tbuf;
788 int err; 829 int err;
789 unsigned int u, idx;
790 830
791 if (dev->state == USB_STATE_SUSPENDED) 831 if (dev->state == USB_STATE_SUSPENDED)
792 return -EHOSTUNREACH; 832 return -EHOSTUNREACH;
@@ -821,16 +861,9 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
821 goto errout; 861 goto errout;
822 862
823 size--; /* leave room for trailing NULL char in output buffer */ 863 size--; /* leave room for trailing NULL char in output buffer */
824 for (idx = 0, u = 2; u < err; u += 2) { 864 err = utf16s_to_utf8s((wchar_t *) &tbuf[2], (err - 2) / 2,
825 if (idx >= size) 865 UTF16_LITTLE_ENDIAN, buf, size);
826 break; 866 buf[err] = 0;
827 if (tbuf[u+1]) /* high byte */
828 buf[idx++] = '?'; /* non ISO-8859-1 character */
829 else
830 buf[idx++] = tbuf[u];
831 }
832 buf[idx] = 0;
833 err = idx;
834 867
835 if (tbuf[1] != USB_DT_STRING) 868 if (tbuf[1] != USB_DT_STRING)
836 dev_dbg(&dev->dev, 869 dev_dbg(&dev->dev,
@@ -843,6 +876,9 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
843} 876}
844EXPORT_SYMBOL_GPL(usb_string); 877EXPORT_SYMBOL_GPL(usb_string);
845 878
879/* one UTF-8-encoded 16-bit character has at most three bytes */
880#define MAX_USB_STRING_SIZE (127 * 3 + 1)
881
846/** 882/**
847 * usb_cache_string - read a string descriptor and cache it for later use 883 * usb_cache_string - read a string descriptor and cache it for later use
848 * @udev: the device whose string descriptor is being read 884 * @udev: the device whose string descriptor is being read
@@ -860,9 +896,9 @@ char *usb_cache_string(struct usb_device *udev, int index)
860 if (index <= 0) 896 if (index <= 0)
861 return NULL; 897 return NULL;
862 898
863 buf = kmalloc(256, GFP_KERNEL); 899 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_KERNEL);
864 if (buf) { 900 if (buf) {
865 len = usb_string(udev, index, buf, 256); 901 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
866 if (len > 0) { 902 if (len > 0) {
867 smallbuf = kmalloc(++len, GFP_KERNEL); 903 smallbuf = kmalloc(++len, GFP_KERNEL);
868 if (!smallbuf) 904 if (!smallbuf)
@@ -1664,6 +1700,21 @@ free_interfaces:
1664 if (ret) 1700 if (ret)
1665 goto free_interfaces; 1701 goto free_interfaces;
1666 1702
1703 /* Make sure we have bandwidth (and available HCD resources) for this
1704 * configuration. Remove endpoints from the schedule if we're dropping
1705 * this configuration to set configuration 0. After this point, the
1706 * host controller will not allow submissions to dropped endpoints. If
1707 * this call fails, the device state is unchanged.
1708 */
1709 if (cp)
1710 ret = usb_hcd_check_bandwidth(dev, cp, NULL);
1711 else
1712 ret = usb_hcd_check_bandwidth(dev, NULL, NULL);
1713 if (ret < 0) {
1714 usb_autosuspend_device(dev);
1715 goto free_interfaces;
1716 }
1717
1667 /* if it's already configured, clear out old state first. 1718 /* if it's already configured, clear out old state first.
1668 * getting rid of old interfaces means unbinding their drivers. 1719 * getting rid of old interfaces means unbinding their drivers.
1669 */ 1720 */
@@ -1686,6 +1737,7 @@ free_interfaces:
1686 dev->actconfig = cp; 1737 dev->actconfig = cp;
1687 if (!cp) { 1738 if (!cp) {
1688 usb_set_device_state(dev, USB_STATE_ADDRESS); 1739 usb_set_device_state(dev, USB_STATE_ADDRESS);
1740 usb_hcd_check_bandwidth(dev, NULL, NULL);
1689 usb_autosuspend_device(dev); 1741 usb_autosuspend_device(dev);
1690 goto free_interfaces; 1742 goto free_interfaces;
1691 } 1743 }
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index c66789197927..b5c72e458943 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -552,8 +552,8 @@ static struct attribute *dev_string_attrs[] = {
552static mode_t dev_string_attrs_are_visible(struct kobject *kobj, 552static mode_t dev_string_attrs_are_visible(struct kobject *kobj,
553 struct attribute *a, int n) 553 struct attribute *a, int n)
554{ 554{
555 struct usb_device *udev = to_usb_device( 555 struct device *dev = container_of(kobj, struct device, kobj);
556 container_of(kobj, struct device, kobj)); 556 struct usb_device *udev = to_usb_device(dev);
557 557
558 if (a == &dev_attr_manufacturer.attr) { 558 if (a == &dev_attr_manufacturer.attr) {
559 if (udev->manufacturer == NULL) 559 if (udev->manufacturer == NULL)
@@ -585,8 +585,8 @@ static ssize_t
585read_descriptors(struct kobject *kobj, struct bin_attribute *attr, 585read_descriptors(struct kobject *kobj, struct bin_attribute *attr,
586 char *buf, loff_t off, size_t count) 586 char *buf, loff_t off, size_t count)
587{ 587{
588 struct usb_device *udev = to_usb_device( 588 struct device *dev = container_of(kobj, struct device, kobj);
589 container_of(kobj, struct device, kobj)); 589 struct usb_device *udev = to_usb_device(dev);
590 size_t nleft = count; 590 size_t nleft = count;
591 size_t srclen, n; 591 size_t srclen, n;
592 int cfgno; 592 int cfgno;
@@ -786,8 +786,8 @@ static struct attribute *intf_assoc_attrs[] = {
786static mode_t intf_assoc_attrs_are_visible(struct kobject *kobj, 786static mode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
787 struct attribute *a, int n) 787 struct attribute *a, int n)
788{ 788{
789 struct usb_interface *intf = to_usb_interface( 789 struct device *dev = container_of(kobj, struct device, kobj);
790 container_of(kobj, struct device, kobj)); 790 struct usb_interface *intf = to_usb_interface(dev);
791 791
792 if (intf->intf_assoc == NULL) 792 if (intf->intf_assoc == NULL)
793 return 0; 793 return 0;
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 3376055f36e7..0885d4abdc62 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -241,6 +241,12 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
241 * If the USB subsystem can't allocate sufficient bandwidth to perform 241 * If the USB subsystem can't allocate sufficient bandwidth to perform
242 * the periodic request, submitting such a periodic request should fail. 242 * the periodic request, submitting such a periodic request should fail.
243 * 243 *
244 * For devices under xHCI, the bandwidth is reserved at configuration time, or
245 * when the alt setting is selected. If there is not enough bus bandwidth, the
246 * configuration/alt setting request will fail. Therefore, submissions to
247 * periodic endpoints on devices under xHCI should never fail due to bandwidth
248 * constraints.
249 *
244 * Device drivers must explicitly request that repetition, by ensuring that 250 * Device drivers must explicitly request that repetition, by ensuring that
245 * some URB is always on the endpoint's queue (except possibly for short 251 * some URB is always on the endpoint's queue (except possibly for short
246 * periods during completion callacks). When there is no longer an urb 252 * periods during completion callacks). When there is no longer an urb
@@ -351,6 +357,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
351 if (xfertype == USB_ENDPOINT_XFER_ISOC) { 357 if (xfertype == USB_ENDPOINT_XFER_ISOC) {
352 int n, len; 358 int n, len;
353 359
360 /* FIXME SuperSpeed isoc endpoints have up to 16 bursts */
354 /* "high bandwidth" mode, 1-3 packets/uframe? */ 361 /* "high bandwidth" mode, 1-3 packets/uframe? */
355 if (dev->speed == USB_SPEED_HIGH) { 362 if (dev->speed == USB_SPEED_HIGH) {
356 int mult = 1 + ((max >> 11) & 0x03); 363 int mult = 1 + ((max >> 11) & 0x03);
@@ -426,6 +433,11 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
426 return -EINVAL; 433 return -EINVAL;
427 /* too big? */ 434 /* too big? */
428 switch (dev->speed) { 435 switch (dev->speed) {
436 case USB_SPEED_SUPER: /* units are 125us */
437 /* Handle up to 2^(16-1) microframes */
438 if (urb->interval > (1 << 15))
439 return -EINVAL;
440 max = 1 << 15;
429 case USB_SPEED_HIGH: /* units are microframes */ 441 case USB_SPEED_HIGH: /* units are microframes */
430 /* NOTE usb handles 2^15 */ 442 /* NOTE usb handles 2^15 */
431 if (urb->interval > (1024 * 8)) 443 if (urb->interval > (1024 * 8))
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 7eee400d3e32..a26f73880c32 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -34,6 +34,7 @@
34#include <linux/usb.h> 34#include <linux/usb.h>
35#include <linux/mutex.h> 35#include <linux/mutex.h>
36#include <linux/workqueue.h> 36#include <linux/workqueue.h>
37#include <linux/debugfs.h>
37 38
38#include <asm/io.h> 39#include <asm/io.h>
39#include <linux/scatterlist.h> 40#include <linux/scatterlist.h>
@@ -139,8 +140,7 @@ static int __find_interface(struct device *dev, void *data)
139 struct find_interface_arg *arg = data; 140 struct find_interface_arg *arg = data;
140 struct usb_interface *intf; 141 struct usb_interface *intf;
141 142
142 /* can't look at usb devices, only interfaces */ 143 if (!is_usb_interface(dev))
143 if (is_usb_device(dev))
144 return 0; 144 return 0;
145 145
146 intf = to_usb_interface(dev); 146 intf = to_usb_interface(dev);
@@ -184,11 +184,16 @@ EXPORT_SYMBOL_GPL(usb_find_interface);
184static void usb_release_dev(struct device *dev) 184static void usb_release_dev(struct device *dev)
185{ 185{
186 struct usb_device *udev; 186 struct usb_device *udev;
187 struct usb_hcd *hcd;
187 188
188 udev = to_usb_device(dev); 189 udev = to_usb_device(dev);
190 hcd = bus_to_hcd(udev->bus);
189 191
190 usb_destroy_configuration(udev); 192 usb_destroy_configuration(udev);
191 usb_put_hcd(bus_to_hcd(udev->bus)); 193 /* Root hubs aren't real devices, so don't free HCD resources */
194 if (hcd->driver->free_dev && udev->parent)
195 hcd->driver->free_dev(hcd, udev);
196 usb_put_hcd(hcd);
192 kfree(udev->product); 197 kfree(udev->product);
193 kfree(udev->manufacturer); 198 kfree(udev->manufacturer);
194 kfree(udev->serial); 199 kfree(udev->serial);
@@ -305,10 +310,21 @@ static struct dev_pm_ops usb_device_pm_ops = {
305 310
306#endif /* CONFIG_PM */ 311#endif /* CONFIG_PM */
307 312
313
314static char *usb_nodename(struct device *dev)
315{
316 struct usb_device *usb_dev;
317
318 usb_dev = to_usb_device(dev);
319 return kasprintf(GFP_KERNEL, "bus/usb/%03d/%03d",
320 usb_dev->bus->busnum, usb_dev->devnum);
321}
322
308struct device_type usb_device_type = { 323struct device_type usb_device_type = {
309 .name = "usb_device", 324 .name = "usb_device",
310 .release = usb_release_dev, 325 .release = usb_release_dev,
311 .uevent = usb_dev_uevent, 326 .uevent = usb_dev_uevent,
327 .nodename = usb_nodename,
312 .pm = &usb_device_pm_ops, 328 .pm = &usb_device_pm_ops,
313}; 329};
314 330
@@ -348,6 +364,13 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
348 kfree(dev); 364 kfree(dev);
349 return NULL; 365 return NULL;
350 } 366 }
367 /* Root hubs aren't true devices, so don't allocate HCD resources */
368 if (usb_hcd->driver->alloc_dev && parent &&
369 !usb_hcd->driver->alloc_dev(usb_hcd, dev)) {
370 usb_put_hcd(bus_to_hcd(bus));
371 kfree(dev);
372 return NULL;
373 }
351 374
352 device_initialize(&dev->dev); 375 device_initialize(&dev->dev);
353 dev->dev.bus = &usb_bus_type; 376 dev->dev.bus = &usb_bus_type;
@@ -375,18 +398,24 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
375 */ 398 */
376 if (unlikely(!parent)) { 399 if (unlikely(!parent)) {
377 dev->devpath[0] = '0'; 400 dev->devpath[0] = '0';
401 dev->route = 0;
378 402
379 dev->dev.parent = bus->controller; 403 dev->dev.parent = bus->controller;
380 dev_set_name(&dev->dev, "usb%d", bus->busnum); 404 dev_set_name(&dev->dev, "usb%d", bus->busnum);
381 root_hub = 1; 405 root_hub = 1;
382 } else { 406 } else {
383 /* match any labeling on the hubs; it's one-based */ 407 /* match any labeling on the hubs; it's one-based */
384 if (parent->devpath[0] == '0') 408 if (parent->devpath[0] == '0') {
385 snprintf(dev->devpath, sizeof dev->devpath, 409 snprintf(dev->devpath, sizeof dev->devpath,
386 "%d", port1); 410 "%d", port1);
387 else 411 /* Root ports are not counted in route string */
412 dev->route = 0;
413 } else {
388 snprintf(dev->devpath, sizeof dev->devpath, 414 snprintf(dev->devpath, sizeof dev->devpath,
389 "%s.%d", parent->devpath, port1); 415 "%s.%d", parent->devpath, port1);
416 dev->route = parent->route +
417 (port1 << ((parent->level - 1)*4));
418 }
390 419
391 dev->dev.parent = &parent->dev; 420 dev->dev.parent = &parent->dev;
392 dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath); 421 dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath);
@@ -799,12 +828,12 @@ void usb_buffer_dmasync(struct urb *urb)
799 return; 828 return;
800 829
801 if (controller->dma_mask) { 830 if (controller->dma_mask) {
802 dma_sync_single(controller, 831 dma_sync_single_for_cpu(controller,
803 urb->transfer_dma, urb->transfer_buffer_length, 832 urb->transfer_dma, urb->transfer_buffer_length,
804 usb_pipein(urb->pipe) 833 usb_pipein(urb->pipe)
805 ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 834 ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
806 if (usb_pipecontrol(urb->pipe)) 835 if (usb_pipecontrol(urb->pipe))
807 dma_sync_single(controller, 836 dma_sync_single_for_cpu(controller,
808 urb->setup_dma, 837 urb->setup_dma,
809 sizeof(struct usb_ctrlrequest), 838 sizeof(struct usb_ctrlrequest),
810 DMA_TO_DEVICE); 839 DMA_TO_DEVICE);
@@ -922,8 +951,8 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
922 || !controller->dma_mask) 951 || !controller->dma_mask)
923 return; 952 return;
924 953
925 dma_sync_sg(controller, sg, n_hw_ents, 954 dma_sync_sg_for_cpu(controller, sg, n_hw_ents,
926 is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 955 is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
927} 956}
928EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg); 957EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg);
929#endif 958#endif
@@ -1001,6 +1030,35 @@ static struct notifier_block usb_bus_nb = {
1001 .notifier_call = usb_bus_notify, 1030 .notifier_call = usb_bus_notify,
1002}; 1031};
1003 1032
1033struct dentry *usb_debug_root;
1034EXPORT_SYMBOL_GPL(usb_debug_root);
1035
1036struct dentry *usb_debug_devices;
1037
1038static int usb_debugfs_init(void)
1039{
1040 usb_debug_root = debugfs_create_dir("usb", NULL);
1041 if (!usb_debug_root)
1042 return -ENOENT;
1043
1044 usb_debug_devices = debugfs_create_file("devices", 0444,
1045 usb_debug_root, NULL,
1046 &usbfs_devices_fops);
1047 if (!usb_debug_devices) {
1048 debugfs_remove(usb_debug_root);
1049 usb_debug_root = NULL;
1050 return -ENOENT;
1051 }
1052
1053 return 0;
1054}
1055
1056static void usb_debugfs_cleanup(void)
1057{
1058 debugfs_remove(usb_debug_devices);
1059 debugfs_remove(usb_debug_root);
1060}
1061
1004/* 1062/*
1005 * Init 1063 * Init
1006 */ 1064 */
@@ -1012,6 +1070,10 @@ static int __init usb_init(void)
1012 return 0; 1070 return 0;
1013 } 1071 }
1014 1072
1073 retval = usb_debugfs_init();
1074 if (retval)
1075 goto out;
1076
1015 retval = ksuspend_usb_init(); 1077 retval = ksuspend_usb_init();
1016 if (retval) 1078 if (retval)
1017 goto out; 1079 goto out;
@@ -1021,9 +1083,6 @@ static int __init usb_init(void)
1021 retval = bus_register_notifier(&usb_bus_type, &usb_bus_nb); 1083 retval = bus_register_notifier(&usb_bus_type, &usb_bus_nb);
1022 if (retval) 1084 if (retval)
1023 goto bus_notifier_failed; 1085 goto bus_notifier_failed;
1024 retval = usb_host_init();
1025 if (retval)
1026 goto host_init_failed;
1027 retval = usb_major_init(); 1086 retval = usb_major_init();
1028 if (retval) 1087 if (retval)
1029 goto major_init_failed; 1088 goto major_init_failed;
@@ -1053,8 +1112,6 @@ usb_devio_init_failed:
1053driver_register_failed: 1112driver_register_failed:
1054 usb_major_cleanup(); 1113 usb_major_cleanup();
1055major_init_failed: 1114major_init_failed:
1056 usb_host_cleanup();
1057host_init_failed:
1058 bus_unregister_notifier(&usb_bus_type, &usb_bus_nb); 1115 bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
1059bus_notifier_failed: 1116bus_notifier_failed:
1060 bus_unregister(&usb_bus_type); 1117 bus_unregister(&usb_bus_type);
@@ -1079,10 +1136,10 @@ static void __exit usb_exit(void)
1079 usb_deregister(&usbfs_driver); 1136 usb_deregister(&usbfs_driver);
1080 usb_devio_cleanup(); 1137 usb_devio_cleanup();
1081 usb_hub_cleanup(); 1138 usb_hub_cleanup();
1082 usb_host_cleanup();
1083 bus_unregister_notifier(&usb_bus_type, &usb_bus_nb); 1139 bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
1084 bus_unregister(&usb_bus_type); 1140 bus_unregister(&usb_bus_type);
1085 ksuspend_usb_cleanup(); 1141 ksuspend_usb_cleanup();
1142 usb_debugfs_cleanup();
1086} 1143}
1087 1144
1088subsys_initcall(usb_init); 1145subsys_initcall(usb_init);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 79d8a9ea559b..e2a8cfaade1d 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -41,8 +41,6 @@ extern int usb_hub_init(void);
41extern void usb_hub_cleanup(void); 41extern void usb_hub_cleanup(void);
42extern int usb_major_init(void); 42extern int usb_major_init(void);
43extern void usb_major_cleanup(void); 43extern void usb_major_cleanup(void);
44extern int usb_host_init(void);
45extern void usb_host_cleanup(void);
46 44
47#ifdef CONFIG_PM 45#ifdef CONFIG_PM
48 46
@@ -106,6 +104,7 @@ extern struct workqueue_struct *ksuspend_usb_wq;
106extern struct bus_type usb_bus_type; 104extern struct bus_type usb_bus_type;
107extern struct device_type usb_device_type; 105extern struct device_type usb_device_type;
108extern struct device_type usb_if_device_type; 106extern struct device_type usb_if_device_type;
107extern struct device_type usb_ep_device_type;
109extern struct usb_device_driver usb_generic_driver; 108extern struct usb_device_driver usb_generic_driver;
110 109
111static inline int is_usb_device(const struct device *dev) 110static inline int is_usb_device(const struct device *dev)
@@ -113,6 +112,16 @@ static inline int is_usb_device(const struct device *dev)
113 return dev->type == &usb_device_type; 112 return dev->type == &usb_device_type;
114} 113}
115 114
115static inline int is_usb_interface(const struct device *dev)
116{
117 return dev->type == &usb_if_device_type;
118}
119
120static inline int is_usb_endpoint(const struct device *dev)
121{
122 return dev->type == &usb_ep_device_type;
123}
124
116/* Do the same for device drivers and interface drivers. */ 125/* Do the same for device drivers and interface drivers. */
117 126
118static inline int is_usb_device_driver(struct device_driver *drv) 127static inline int is_usb_device_driver(struct device_driver *drv)
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 080bb1e4b847..5d1ddf485d1e 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -156,7 +156,7 @@ config USB_ATMEL_USBA
156 156
157config USB_GADGET_FSL_USB2 157config USB_GADGET_FSL_USB2
158 boolean "Freescale Highspeed USB DR Peripheral Controller" 158 boolean "Freescale Highspeed USB DR Peripheral Controller"
159 depends on FSL_SOC 159 depends on FSL_SOC || ARCH_MXC
160 select USB_GADGET_DUALSPEED 160 select USB_GADGET_DUALSPEED
161 help 161 help
162 Some of Freescale PowerPC processors have a High Speed 162 Some of Freescale PowerPC processors have a High Speed
@@ -253,7 +253,7 @@ config USB_PXA25X_SMALL
253 253
254config USB_GADGET_PXA27X 254config USB_GADGET_PXA27X
255 boolean "PXA 27x" 255 boolean "PXA 27x"
256 depends on ARCH_PXA && PXA27x 256 depends on ARCH_PXA && (PXA27x || PXA3xx)
257 select USB_OTG_UTILS 257 select USB_OTG_UTILS
258 help 258 help
259 Intel's PXA 27x series XScale ARM v5TE processors include 259 Intel's PXA 27x series XScale ARM v5TE processors include
@@ -272,6 +272,20 @@ config USB_PXA27X
272 default USB_GADGET 272 default USB_GADGET
273 select USB_GADGET_SELECTED 273 select USB_GADGET_SELECTED
274 274
275config USB_GADGET_S3C_HSOTG
276 boolean "S3C HS/OtG USB Device controller"
277 depends on S3C_DEV_USB_HSOTG
278 select USB_GADGET_S3C_HSOTG_PIO
279 help
280 The Samsung S3C64XX USB2.0 high-speed gadget controller
281 integrated into the S3C64XX series SoC.
282
283config USB_S3C_HSOTG
284 tristate
285 depends on USB_GADGET_S3C_HSOTG
286 default USB_GADGET
287 select USB_GADGET_SELECTED
288
275config USB_GADGET_S3C2410 289config USB_GADGET_S3C2410
276 boolean "S3C2410 USB Device Controller" 290 boolean "S3C2410 USB Device Controller"
277 depends on ARCH_S3C2410 291 depends on ARCH_S3C2410
@@ -460,6 +474,27 @@ config USB_GOKU
460 default USB_GADGET 474 default USB_GADGET
461 select USB_GADGET_SELECTED 475 select USB_GADGET_SELECTED
462 476
477config USB_GADGET_LANGWELL
478 boolean "Intel Langwell USB Device Controller"
479 depends on PCI
480 select USB_GADGET_DUALSPEED
481 help
482 Intel Langwell USB Device Controller is a High-Speed USB
483 On-The-Go device controller.
484
485 The number of programmable endpoints is different through
486 controller revision.
487
488 Say "y" to link the driver statically, or "m" to build a
489 dynamically linked module called "langwell_udc" and force all
490 gadget drivers to also be dynamically linked.
491
492config USB_LANGWELL
493 tristate
494 depends on USB_GADGET_LANGWELL
495 default USB_GADGET
496 select USB_GADGET_SELECTED
497
463 498
464# 499#
465# LAST -- dummy/emulated controller 500# LAST -- dummy/emulated controller
@@ -566,6 +601,20 @@ config USB_ZERO_HNPTEST
566 the "B-Peripheral" role, that device will use HNP to let this 601 the "B-Peripheral" role, that device will use HNP to let this
567 one serve as the USB host instead (in the "B-Host" role). 602 one serve as the USB host instead (in the "B-Host" role).
568 603
604config USB_AUDIO
605 tristate "Audio Gadget (EXPERIMENTAL)"
606 depends on SND
607 help
608 Gadget Audio is compatible with USB Audio Class specification 1.0.
609 It will include at least one AudioControl interface, zero or more
610 AudioStream interface and zero or more MIDIStream interface.
611
612 Gadget Audio will use on-board ALSA (CONFIG_SND) audio card to
613 playback or capture audio stream.
614
615 Say "y" to link the driver statically, or "m" to build a
616 dynamically linked module called "g_audio".
617
569config USB_ETH 618config USB_ETH
570 tristate "Ethernet Gadget (with CDC Ethernet support)" 619 tristate "Ethernet Gadget (with CDC Ethernet support)"
571 depends on NET 620 depends on NET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 39a51d746cb7..e6017e6bf6da 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -18,14 +18,21 @@ obj-$(CONFIG_USB_S3C2410) += s3c2410_udc.o
18obj-$(CONFIG_USB_AT91) += at91_udc.o 18obj-$(CONFIG_USB_AT91) += at91_udc.o
19obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o 19obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
20obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o 20obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
21fsl_usb2_udc-objs := fsl_udc_core.o
22ifeq ($(CONFIG_ARCH_MXC),y)
23fsl_usb2_udc-objs += fsl_mx3_udc.o
24endif
21obj-$(CONFIG_USB_M66592) += m66592-udc.o 25obj-$(CONFIG_USB_M66592) += m66592-udc.o
22obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o 26obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o
23obj-$(CONFIG_USB_CI13XXX) += ci13xxx_udc.o 27obj-$(CONFIG_USB_CI13XXX) += ci13xxx_udc.o
28obj-$(CONFIG_USB_S3C_HSOTG) += s3c-hsotg.o
29obj-$(CONFIG_USB_LANGWELL) += langwell_udc.o
24 30
25# 31#
26# USB gadget drivers 32# USB gadget drivers
27# 33#
28g_zero-objs := zero.o 34g_zero-objs := zero.o
35g_audio-objs := audio.o
29g_ether-objs := ether.o 36g_ether-objs := ether.o
30g_serial-objs := serial.o 37g_serial-objs := serial.o
31g_midi-objs := gmidi.o 38g_midi-objs := gmidi.o
@@ -35,6 +42,7 @@ g_printer-objs := printer.o
35g_cdc-objs := cdc2.o 42g_cdc-objs := cdc2.o
36 43
37obj-$(CONFIG_USB_ZERO) += g_zero.o 44obj-$(CONFIG_USB_ZERO) += g_zero.o
45obj-$(CONFIG_USB_AUDIO) += g_audio.o
38obj-$(CONFIG_USB_ETH) += g_ether.o 46obj-$(CONFIG_USB_ETH) += g_ether.o
39obj-$(CONFIG_USB_GADGETFS) += gadgetfs.o 47obj-$(CONFIG_USB_GADGETFS) += gadgetfs.o
40obj-$(CONFIG_USB_FILE_STORAGE) += g_file_storage.o 48obj-$(CONFIG_USB_FILE_STORAGE) += g_file_storage.o
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 0b2bb8f0706d..72bae8f39d81 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -485,7 +485,7 @@ static int at91_ep_enable(struct usb_ep *_ep,
485 return -ESHUTDOWN; 485 return -ESHUTDOWN;
486 } 486 }
487 487
488 tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 488 tmp = usb_endpoint_type(desc);
489 switch (tmp) { 489 switch (tmp) {
490 case USB_ENDPOINT_XFER_CONTROL: 490 case USB_ENDPOINT_XFER_CONTROL:
491 DBG("only one control endpoint\n"); 491 DBG("only one control endpoint\n");
@@ -517,7 +517,7 @@ ok:
517 local_irq_save(flags); 517 local_irq_save(flags);
518 518
519 /* initialize endpoint to match this descriptor */ 519 /* initialize endpoint to match this descriptor */
520 ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0; 520 ep->is_in = usb_endpoint_dir_in(desc);
521 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC); 521 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC);
522 ep->stopped = 0; 522 ep->stopped = 0;
523 if (ep->is_in) 523 if (ep->is_in)
@@ -1574,7 +1574,7 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
1574 1574
1575 udc->driver = driver; 1575 udc->driver = driver;
1576 udc->gadget.dev.driver = &driver->driver; 1576 udc->gadget.dev.driver = &driver->driver;
1577 udc->gadget.dev.driver_data = &driver->driver; 1577 dev_set_drvdata(&udc->gadget.dev, &driver->driver);
1578 udc->enabled = 1; 1578 udc->enabled = 1;
1579 udc->selfpowered = 1; 1579 udc->selfpowered = 1;
1580 1580
@@ -1583,7 +1583,7 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
1583 DBG("driver->bind() returned %d\n", retval); 1583 DBG("driver->bind() returned %d\n", retval);
1584 udc->driver = NULL; 1584 udc->driver = NULL;
1585 udc->gadget.dev.driver = NULL; 1585 udc->gadget.dev.driver = NULL;
1586 udc->gadget.dev.driver_data = NULL; 1586 dev_set_drvdata(&udc->gadget.dev, NULL);
1587 udc->enabled = 0; 1587 udc->enabled = 0;
1588 udc->selfpowered = 0; 1588 udc->selfpowered = 0;
1589 return retval; 1589 return retval;
@@ -1613,7 +1613,7 @@ int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
1613 1613
1614 driver->unbind(&udc->gadget); 1614 driver->unbind(&udc->gadget);
1615 udc->gadget.dev.driver = NULL; 1615 udc->gadget.dev.driver = NULL;
1616 udc->gadget.dev.driver_data = NULL; 1616 dev_set_drvdata(&udc->gadget.dev, NULL);
1617 udc->driver = NULL; 1617 udc->driver = NULL;
1618 1618
1619 DBG("unbound from %s\n", driver->driver.name); 1619 DBG("unbound from %s\n", driver->driver.name);
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 05c913cc3658..4e970cf0e29a 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -326,13 +326,7 @@ static int vbus_is_present(struct usba_udc *udc)
326 return 1; 326 return 1;
327} 327}
328 328
329#if defined(CONFIG_AVR32) 329#if defined(CONFIG_ARCH_AT91SAM9RL)
330
331static void toggle_bias(int is_on)
332{
333}
334
335#elif defined(CONFIG_ARCH_AT91)
336 330
337#include <mach/at91_pmc.h> 331#include <mach/at91_pmc.h>
338 332
@@ -346,7 +340,13 @@ static void toggle_bias(int is_on)
346 at91_sys_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN)); 340 at91_sys_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
347} 341}
348 342
349#endif /* CONFIG_ARCH_AT91 */ 343#else
344
345static void toggle_bias(int is_on)
346{
347}
348
349#endif /* CONFIG_ARCH_AT91SAM9RL */
350 350
351static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req) 351static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
352{ 352{
@@ -550,12 +550,12 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
550 DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n", 550 DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n",
551 ep->ep.name, ept_cfg, maxpacket); 551 ep->ep.name, ept_cfg, maxpacket);
552 552
553 if ((desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) { 553 if (usb_endpoint_dir_in(desc)) {
554 ep->is_in = 1; 554 ep->is_in = 1;
555 ept_cfg |= USBA_EPT_DIR_IN; 555 ept_cfg |= USBA_EPT_DIR_IN;
556 } 556 }
557 557
558 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 558 switch (usb_endpoint_type(desc)) {
559 case USB_ENDPOINT_XFER_CONTROL: 559 case USB_ENDPOINT_XFER_CONTROL:
560 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL); 560 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL);
561 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE); 561 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE);
diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c
new file mode 100644
index 000000000000..94de7e864614
--- /dev/null
+++ b/drivers/usb/gadget/audio.c
@@ -0,0 +1,302 @@
1/*
2 * audio.c -- Audio gadget driver
3 *
4 * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
5 * Copyright (C) 2008 Analog Devices, Inc
6 *
7 * Enter bugs at http://blackfin.uclinux.org/
8 *
9 * Licensed under the GPL-2 or later.
10 */
11
12/* #define VERBOSE_DEBUG */
13
14#include <linux/kernel.h>
15#include <linux/utsname.h>
16
17#include "u_audio.h"
18
19#define DRIVER_DESC "Linux USB Audio Gadget"
20#define DRIVER_VERSION "Dec 18, 2008"
21
22/*-------------------------------------------------------------------------*/
23
24/*
25 * Kbuild is not very cooperative with respect to linking separately
26 * compiled library objects into one module. So for now we won't use
27 * separate compilation ... ensuring init/exit sections work to shrink
28 * the runtime footprint, and giving us at least some parts of what
29 * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
30 */
31#include "composite.c"
32#include "usbstring.c"
33#include "config.c"
34#include "epautoconf.c"
35
36#include "u_audio.c"
37#include "f_audio.c"
38
39/*-------------------------------------------------------------------------*/
40
41/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
42 * Instead: allocate your own, using normal USB-IF procedures.
43 */
44
45/* Thanks to NetChip Technologies for donating this product ID. */
46#define AUDIO_VENDOR_NUM 0x0525 /* NetChip */
47#define AUDIO_PRODUCT_NUM 0xa4a1 /* Linux-USB Audio Gadget */
48
49/*-------------------------------------------------------------------------*/
50
51static struct usb_device_descriptor device_desc = {
52 .bLength = sizeof device_desc,
53 .bDescriptorType = USB_DT_DEVICE,
54
55 .bcdUSB = __constant_cpu_to_le16(0x200),
56
57 .bDeviceClass = USB_CLASS_PER_INTERFACE,
58 .bDeviceSubClass = 0,
59 .bDeviceProtocol = 0,
60 /* .bMaxPacketSize0 = f(hardware) */
61
62 /* Vendor and product id defaults change according to what configs
63 * we support. (As does bNumConfigurations.) These values can
64 * also be overridden by module parameters.
65 */
66 .idVendor = __constant_cpu_to_le16(AUDIO_VENDOR_NUM),
67 .idProduct = __constant_cpu_to_le16(AUDIO_PRODUCT_NUM),
68 /* .bcdDevice = f(hardware) */
69 /* .iManufacturer = DYNAMIC */
70 /* .iProduct = DYNAMIC */
71 /* NO SERIAL NUMBER */
72 .bNumConfigurations = 1,
73};
74
75static struct usb_otg_descriptor otg_descriptor = {
76 .bLength = sizeof otg_descriptor,
77 .bDescriptorType = USB_DT_OTG,
78
79 /* REVISIT SRP-only hardware is possible, although
80 * it would not be called "OTG" ...
81 */
82 .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
83};
84
85static const struct usb_descriptor_header *otg_desc[] = {
86 (struct usb_descriptor_header *) &otg_descriptor,
87 NULL,
88};
89
90/*-------------------------------------------------------------------------*/
91
92/**
93 * Handle USB audio endpoint set/get command in setup class request
94 */
95
96static int audio_set_endpoint_req(struct usb_configuration *c,
97 const struct usb_ctrlrequest *ctrl)
98{
99 struct usb_composite_dev *cdev = c->cdev;
100 int value = -EOPNOTSUPP;
101 u16 ep = le16_to_cpu(ctrl->wIndex);
102 u16 len = le16_to_cpu(ctrl->wLength);
103 u16 w_value = le16_to_cpu(ctrl->wValue);
104
105 DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
106 ctrl->bRequest, w_value, len, ep);
107
108 switch (ctrl->bRequest) {
109 case SET_CUR:
110 value = 0;
111 break;
112
113 case SET_MIN:
114 break;
115
116 case SET_MAX:
117 break;
118
119 case SET_RES:
120 break;
121
122 case SET_MEM:
123 break;
124
125 default:
126 break;
127 }
128
129 return value;
130}
131
132static int audio_get_endpoint_req(struct usb_configuration *c,
133 const struct usb_ctrlrequest *ctrl)
134{
135 struct usb_composite_dev *cdev = c->cdev;
136 int value = -EOPNOTSUPP;
137 u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
138 u16 len = le16_to_cpu(ctrl->wLength);
139 u16 w_value = le16_to_cpu(ctrl->wValue);
140
141 DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
142 ctrl->bRequest, w_value, len, ep);
143
144 switch (ctrl->bRequest) {
145 case GET_CUR:
146 case GET_MIN:
147 case GET_MAX:
148 case GET_RES:
149 value = 3;
150 break;
151 case GET_MEM:
152 break;
153 default:
154 break;
155 }
156
157 return value;
158}
159
160static int
161audio_setup(struct usb_configuration *c, const struct usb_ctrlrequest *ctrl)
162{
163 struct usb_composite_dev *cdev = c->cdev;
164 struct usb_request *req = cdev->req;
165 int value = -EOPNOTSUPP;
166 u16 w_index = le16_to_cpu(ctrl->wIndex);
167 u16 w_value = le16_to_cpu(ctrl->wValue);
168 u16 w_length = le16_to_cpu(ctrl->wLength);
169
170 /* composite driver infrastructure handles everything except
171 * Audio class messages; interface activation uses set_alt().
172 */
173 switch (ctrl->bRequestType) {
174 case USB_AUDIO_SET_ENDPOINT:
175 value = audio_set_endpoint_req(c, ctrl);
176 break;
177
178 case USB_AUDIO_GET_ENDPOINT:
179 value = audio_get_endpoint_req(c, ctrl);
180 break;
181
182 default:
183 ERROR(cdev, "Invalid control req%02x.%02x v%04x i%04x l%d\n",
184 ctrl->bRequestType, ctrl->bRequest,
185 w_value, w_index, w_length);
186 }
187
188 /* respond with data transfer or status phase? */
189 if (value >= 0) {
190 DBG(cdev, "Audio req%02x.%02x v%04x i%04x l%d\n",
191 ctrl->bRequestType, ctrl->bRequest,
192 w_value, w_index, w_length);
193 req->zero = 0;
194 req->length = value;
195 value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
196 if (value < 0)
197 ERROR(cdev, "Audio response on err %d\n", value);
198 }
199
200 /* device either stalls (value < 0) or reports success */
201 return value;
202}
203
204/*-------------------------------------------------------------------------*/
205
206static int __init audio_do_config(struct usb_configuration *c)
207{
208 /* FIXME alloc iConfiguration string, set it in c->strings */
209
210 if (gadget_is_otg(c->cdev->gadget)) {
211 c->descriptors = otg_desc;
212 c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
213 }
214
215 audio_bind_config(c);
216
217 return 0;
218}
219
220static struct usb_configuration audio_config_driver = {
221 .label = DRIVER_DESC,
222 .bind = audio_do_config,
223 .setup = audio_setup,
224 .bConfigurationValue = 1,
225 /* .iConfiguration = DYNAMIC */
226 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
227};
228
229/*-------------------------------------------------------------------------*/
230
231static int __init audio_bind(struct usb_composite_dev *cdev)
232{
233 int gcnum;
234 int status;
235
236 gcnum = usb_gadget_controller_number(cdev->gadget);
237 if (gcnum >= 0)
238 device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
239 else {
240 ERROR(cdev, "controller '%s' not recognized; trying %s\n",
241 cdev->gadget->name,
242 audio_config_driver.label);
243 device_desc.bcdDevice =
244 __constant_cpu_to_le16(0x0300 | 0x0099);
245 }
246
247 /* device descriptor strings: manufacturer, product */
248 snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
249 init_utsname()->sysname, init_utsname()->release,
250 cdev->gadget->name);
251 status = usb_string_id(cdev);
252 if (status < 0)
253 goto fail;
254 strings_dev[STRING_MANUFACTURER_IDX].id = status;
255 device_desc.iManufacturer = status;
256
257 status = usb_string_id(cdev);
258 if (status < 0)
259 goto fail;
260 strings_dev[STRING_PRODUCT_IDX].id = status;
261 device_desc.iProduct = status;
262
263 status = usb_add_config(cdev, &audio_config_driver);
264 if (status < 0)
265 goto fail;
266
267 INFO(cdev, "%s, version: %s\n", DRIVER_DESC, DRIVER_VERSION);
268 return 0;
269
270fail:
271 return status;
272}
273
274static int __exit audio_unbind(struct usb_composite_dev *cdev)
275{
276 return 0;
277}
278
279static struct usb_composite_driver audio_driver = {
280 .name = "g_audio",
281 .dev = &device_desc,
282 .strings = audio_strings,
283 .bind = audio_bind,
284 .unbind = __exit_p(audio_unbind),
285};
286
287static int __init init(void)
288{
289 return usb_composite_register(&audio_driver);
290}
291module_init(init);
292
293static void __exit cleanup(void)
294{
295 usb_composite_unregister(&audio_driver);
296}
297module_exit(cleanup);
298
299MODULE_DESCRIPTION(DRIVER_DESC);
300MODULE_AUTHOR("Bryan Wu <cooloney@kernel.org>");
301MODULE_LICENSE("GPL");
302
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 38e531ecae4d..c7cb87a6fee2 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -1977,9 +1977,9 @@ static int ep_enable(struct usb_ep *ep,
1977 if (!list_empty(&mEp->qh[mEp->dir].queue)) 1977 if (!list_empty(&mEp->qh[mEp->dir].queue))
1978 warn("enabling a non-empty endpoint!"); 1978 warn("enabling a non-empty endpoint!");
1979 1979
1980 mEp->dir = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? TX : RX; 1980 mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
1981 mEp->num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; 1981 mEp->num = usb_endpoint_num(desc);
1982 mEp->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 1982 mEp->type = usb_endpoint_type(desc);
1983 1983
1984 mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize); 1984 mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize);
1985 1985
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
new file mode 100644
index 000000000000..66527ba2d2ea
--- /dev/null
+++ b/drivers/usb/gadget/f_audio.c
@@ -0,0 +1,707 @@
1/*
2 * f_audio.c -- USB Audio class function driver
3 *
4 * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
5 * Copyright (C) 2008 Analog Devices, Inc
6 *
7 * Enter bugs at http://blackfin.uclinux.org/
8 *
9 * Licensed under the GPL-2 or later.
10 */
11
12#include <linux/kernel.h>
13#include <linux/device.h>
14#include <asm/atomic.h>
15
16#include "u_audio.h"
17
18#define OUT_EP_MAX_PACKET_SIZE 200
19static int req_buf_size = OUT_EP_MAX_PACKET_SIZE;
20module_param(req_buf_size, int, S_IRUGO);
21MODULE_PARM_DESC(req_buf_size, "ISO OUT endpoint request buffer size");
22
23static int req_count = 256;
24module_param(req_count, int, S_IRUGO);
25MODULE_PARM_DESC(req_count, "ISO OUT endpoint request count");
26
27static int audio_buf_size = 48000;
28module_param(audio_buf_size, int, S_IRUGO);
29MODULE_PARM_DESC(audio_buf_size, "Audio buffer size");
30
31/*
32 * DESCRIPTORS ... most are static, but strings and full
33 * configuration descriptors are built on demand.
34 */
35
36/*
37 * We have two interfaces- AudioControl and AudioStreaming
38 * TODO: only supcard playback currently
39 */
40#define F_AUDIO_AC_INTERFACE 0
41#define F_AUDIO_AS_INTERFACE 1
42#define F_AUDIO_NUM_INTERFACES 2
43
44/* B.3.1 Standard AC Interface Descriptor */
45static struct usb_interface_descriptor ac_interface_desc __initdata = {
46 .bLength = USB_DT_INTERFACE_SIZE,
47 .bDescriptorType = USB_DT_INTERFACE,
48 .bNumEndpoints = 0,
49 .bInterfaceClass = USB_CLASS_AUDIO,
50 .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
51};
52
53DECLARE_USB_AC_HEADER_DESCRIPTOR(2);
54
55#define USB_DT_AC_HEADER_LENGH USB_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
56/* B.3.2 Class-Specific AC Interface Descriptor */
57static struct usb_ac_header_descriptor_2 ac_header_desc = {
58 .bLength = USB_DT_AC_HEADER_LENGH,
59 .bDescriptorType = USB_DT_CS_INTERFACE,
60 .bDescriptorSubtype = HEADER,
61 .bcdADC = __constant_cpu_to_le16(0x0100),
62 .wTotalLength = __constant_cpu_to_le16(USB_DT_AC_HEADER_LENGH),
63 .bInCollection = F_AUDIO_NUM_INTERFACES,
64 .baInterfaceNr = {
65 [0] = F_AUDIO_AC_INTERFACE,
66 [1] = F_AUDIO_AS_INTERFACE,
67 }
68};
69
70#define INPUT_TERMINAL_ID 1
71static struct usb_input_terminal_descriptor input_terminal_desc = {
72 .bLength = USB_DT_AC_INPUT_TERMINAL_SIZE,
73 .bDescriptorType = USB_DT_CS_INTERFACE,
74 .bDescriptorSubtype = INPUT_TERMINAL,
75 .bTerminalID = INPUT_TERMINAL_ID,
76 .wTerminalType = USB_AC_TERMINAL_STREAMING,
77 .bAssocTerminal = 0,
78 .wChannelConfig = 0x3,
79};
80
81DECLARE_USB_AC_FEATURE_UNIT_DESCRIPTOR(0);
82
83#define FEATURE_UNIT_ID 2
84static struct usb_ac_feature_unit_descriptor_0 feature_unit_desc = {
85 .bLength = USB_DT_AC_FEATURE_UNIT_SIZE(0),
86 .bDescriptorType = USB_DT_CS_INTERFACE,
87 .bDescriptorSubtype = FEATURE_UNIT,
88 .bUnitID = FEATURE_UNIT_ID,
89 .bSourceID = INPUT_TERMINAL_ID,
90 .bControlSize = 2,
91 .bmaControls[0] = (FU_MUTE | FU_VOLUME),
92};
93
94static struct usb_audio_control mute_control = {
95 .list = LIST_HEAD_INIT(mute_control.list),
96 .name = "Mute Control",
97 .type = MUTE_CONTROL,
98 /* Todo: add real Mute control code */
99 .set = generic_set_cmd,
100 .get = generic_get_cmd,
101};
102
103static struct usb_audio_control volume_control = {
104 .list = LIST_HEAD_INIT(volume_control.list),
105 .name = "Volume Control",
106 .type = VOLUME_CONTROL,
107 /* Todo: add real Volume control code */
108 .set = generic_set_cmd,
109 .get = generic_get_cmd,
110};
111
112static struct usb_audio_control_selector feature_unit = {
113 .list = LIST_HEAD_INIT(feature_unit.list),
114 .id = FEATURE_UNIT_ID,
115 .name = "Mute & Volume Control",
116 .type = FEATURE_UNIT,
117 .desc = (struct usb_descriptor_header *)&feature_unit_desc,
118};
119
120#define OUTPUT_TERMINAL_ID 3
121static struct usb_output_terminal_descriptor output_terminal_desc = {
122 .bLength = USB_DT_AC_OUTPUT_TERMINAL_SIZE,
123 .bDescriptorType = USB_DT_CS_INTERFACE,
124 .bDescriptorSubtype = OUTPUT_TERMINAL,
125 .bTerminalID = OUTPUT_TERMINAL_ID,
126 .wTerminalType = USB_AC_OUTPUT_TERMINAL_SPEAKER,
127 .bAssocTerminal = FEATURE_UNIT_ID,
128 .bSourceID = FEATURE_UNIT_ID,
129};
130
131/* B.4.1 Standard AS Interface Descriptor */
132static struct usb_interface_descriptor as_interface_alt_0_desc = {
133 .bLength = USB_DT_INTERFACE_SIZE,
134 .bDescriptorType = USB_DT_INTERFACE,
135 .bAlternateSetting = 0,
136 .bNumEndpoints = 0,
137 .bInterfaceClass = USB_CLASS_AUDIO,
138 .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
139};
140
141static struct usb_interface_descriptor as_interface_alt_1_desc = {
142 .bLength = USB_DT_INTERFACE_SIZE,
143 .bDescriptorType = USB_DT_INTERFACE,
144 .bAlternateSetting = 1,
145 .bNumEndpoints = 1,
146 .bInterfaceClass = USB_CLASS_AUDIO,
147 .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING,
148};
149
150/* B.4.2 Class-Specific AS Interface Descriptor */
151static struct usb_as_header_descriptor as_header_desc = {
152 .bLength = USB_DT_AS_HEADER_SIZE,
153 .bDescriptorType = USB_DT_CS_INTERFACE,
154 .bDescriptorSubtype = AS_GENERAL,
155 .bTerminalLink = INPUT_TERMINAL_ID,
156 .bDelay = 1,
157 .wFormatTag = USB_AS_AUDIO_FORMAT_TYPE_I_PCM,
158};
159
160DECLARE_USB_AS_FORMAT_TYPE_I_DISCRETE_DESC(1);
161
162static struct usb_as_formate_type_i_discrete_descriptor_1 as_type_i_desc = {
163 .bLength = USB_AS_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
164 .bDescriptorType = USB_DT_CS_INTERFACE,
165 .bDescriptorSubtype = FORMAT_TYPE,
166 .bFormatType = USB_AS_FORMAT_TYPE_I,
167 .bSubframeSize = 2,
168 .bBitResolution = 16,
169 .bSamFreqType = 1,
170};
171
172/* Standard ISO OUT Endpoint Descriptor */
173static struct usb_endpoint_descriptor as_out_ep_desc __initdata = {
174 .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
175 .bDescriptorType = USB_DT_ENDPOINT,
176 .bEndpointAddress = USB_DIR_OUT,
177 .bmAttributes = USB_AS_ENDPOINT_ADAPTIVE
178 | USB_ENDPOINT_XFER_ISOC,
179 .wMaxPacketSize = __constant_cpu_to_le16(OUT_EP_MAX_PACKET_SIZE),
180 .bInterval = 4,
181};
182
183/* Class-specific AS ISO OUT Endpoint Descriptor */
184static struct usb_as_iso_endpoint_descriptor as_iso_out_desc __initdata = {
185 .bLength = USB_AS_ISO_ENDPOINT_DESC_SIZE,
186 .bDescriptorType = USB_DT_CS_ENDPOINT,
187 .bDescriptorSubtype = EP_GENERAL,
188 .bmAttributes = 1,
189 .bLockDelayUnits = 1,
190 .wLockDelay = __constant_cpu_to_le16(1),
191};
192
193static struct usb_descriptor_header *f_audio_desc[] __initdata = {
194 (struct usb_descriptor_header *)&ac_interface_desc,
195 (struct usb_descriptor_header *)&ac_header_desc,
196
197 (struct usb_descriptor_header *)&input_terminal_desc,
198 (struct usb_descriptor_header *)&output_terminal_desc,
199 (struct usb_descriptor_header *)&feature_unit_desc,
200
201 (struct usb_descriptor_header *)&as_interface_alt_0_desc,
202 (struct usb_descriptor_header *)&as_interface_alt_1_desc,
203 (struct usb_descriptor_header *)&as_header_desc,
204
205 (struct usb_descriptor_header *)&as_type_i_desc,
206
207 (struct usb_descriptor_header *)&as_out_ep_desc,
208 (struct usb_descriptor_header *)&as_iso_out_desc,
209 NULL,
210};
211
212/* string IDs are assigned dynamically */
213
214#define STRING_MANUFACTURER_IDX 0
215#define STRING_PRODUCT_IDX 1
216
217static char manufacturer[50];
218
219static struct usb_string strings_dev[] = {
220 [STRING_MANUFACTURER_IDX].s = manufacturer,
221 [STRING_PRODUCT_IDX].s = DRIVER_DESC,
222 { } /* end of list */
223};
224
225static struct usb_gadget_strings stringtab_dev = {
226 .language = 0x0409, /* en-us */
227 .strings = strings_dev,
228};
229
230static struct usb_gadget_strings *audio_strings[] = {
231 &stringtab_dev,
232 NULL,
233};
234
235/*
236 * This function is an ALSA sound card following USB Audio Class Spec 1.0.
237 */
238
239/*-------------------------------------------------------------------------*/
240struct f_audio_buf {
241 u8 *buf;
242 int actual;
243 struct list_head list;
244};
245
246static struct f_audio_buf *f_audio_buffer_alloc(int buf_size)
247{
248 struct f_audio_buf *copy_buf;
249
250 copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC);
251 if (!copy_buf)
252 return (struct f_audio_buf *)-ENOMEM;
253
254 copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC);
255 if (!copy_buf->buf) {
256 kfree(copy_buf);
257 return (struct f_audio_buf *)-ENOMEM;
258 }
259
260 return copy_buf;
261}
262
263static void f_audio_buffer_free(struct f_audio_buf *audio_buf)
264{
265 kfree(audio_buf->buf);
266 kfree(audio_buf);
267}
268/*-------------------------------------------------------------------------*/
269
270struct f_audio {
271 struct gaudio card;
272
273 /* endpoints handle full and/or high speeds */
274 struct usb_ep *out_ep;
275 struct usb_endpoint_descriptor *out_desc;
276
277 spinlock_t lock;
278 struct f_audio_buf *copy_buf;
279 struct work_struct playback_work;
280 struct list_head play_queue;
281
282 /* Control Set command */
283 struct list_head cs;
284 u8 set_cmd;
285 struct usb_audio_control *set_con;
286};
287
288static inline struct f_audio *func_to_audio(struct usb_function *f)
289{
290 return container_of(f, struct f_audio, card.func);
291}
292
293/*-------------------------------------------------------------------------*/
294
295static void f_audio_playback_work(struct work_struct *data)
296{
297 struct f_audio *audio = container_of(data, struct f_audio,
298 playback_work);
299 struct f_audio_buf *play_buf;
300
301 spin_lock_irq(&audio->lock);
302 if (list_empty(&audio->play_queue)) {
303 spin_unlock_irq(&audio->lock);
304 return;
305 }
306 play_buf = list_first_entry(&audio->play_queue,
307 struct f_audio_buf, list);
308 list_del(&play_buf->list);
309 spin_unlock_irq(&audio->lock);
310
311 u_audio_playback(&audio->card, play_buf->buf, play_buf->actual);
312 f_audio_buffer_free(play_buf);
313
314 return;
315}
316
317static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
318{
319 struct f_audio *audio = req->context;
320 struct usb_composite_dev *cdev = audio->card.func.config->cdev;
321 struct f_audio_buf *copy_buf = audio->copy_buf;
322 int err;
323
324 if (!copy_buf)
325 return -EINVAL;
326
327 /* Copy buffer is full, add it to the play_queue */
328 if (audio_buf_size - copy_buf->actual < req->actual) {
329 list_add_tail(&copy_buf->list, &audio->play_queue);
330 schedule_work(&audio->playback_work);
331 copy_buf = f_audio_buffer_alloc(audio_buf_size);
332 if (copy_buf < 0)
333 return -ENOMEM;
334 }
335
336 memcpy(copy_buf->buf + copy_buf->actual, req->buf, req->actual);
337 copy_buf->actual += req->actual;
338 audio->copy_buf = copy_buf;
339
340 err = usb_ep_queue(ep, req, GFP_ATOMIC);
341 if (err)
342 ERROR(cdev, "%s queue req: %d\n", ep->name, err);
343
344 return 0;
345
346}
347
348static void f_audio_complete(struct usb_ep *ep, struct usb_request *req)
349{
350 struct f_audio *audio = req->context;
351 int status = req->status;
352 u32 data = 0;
353 struct usb_ep *out_ep = audio->out_ep;
354
355 switch (status) {
356
357 case 0: /* normal completion? */
358 if (ep == out_ep)
359 f_audio_out_ep_complete(ep, req);
360 else if (audio->set_con) {
361 memcpy(&data, req->buf, req->length);
362 audio->set_con->set(audio->set_con, audio->set_cmd,
363 le16_to_cpu(data));
364 audio->set_con = NULL;
365 }
366 break;
367 default:
368 break;
369 }
370}
371
372static int audio_set_intf_req(struct usb_function *f,
373 const struct usb_ctrlrequest *ctrl)
374{
375 struct f_audio *audio = func_to_audio(f);
376 struct usb_composite_dev *cdev = f->config->cdev;
377 struct usb_request *req = cdev->req;
378 u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
379 u16 len = le16_to_cpu(ctrl->wLength);
380 u16 w_value = le16_to_cpu(ctrl->wValue);
381 u8 con_sel = (w_value >> 8) & 0xFF;
382 u8 cmd = (ctrl->bRequest & 0x0F);
383 struct usb_audio_control_selector *cs;
384 struct usb_audio_control *con;
385
386 DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
387 ctrl->bRequest, w_value, len, id);
388
389 list_for_each_entry(cs, &audio->cs, list) {
390 if (cs->id == id) {
391 list_for_each_entry(con, &cs->control, list) {
392 if (con->type == con_sel) {
393 audio->set_con = con;
394 break;
395 }
396 }
397 break;
398 }
399 }
400
401 audio->set_cmd = cmd;
402 req->context = audio;
403 req->complete = f_audio_complete;
404
405 return len;
406}
407
408static int audio_get_intf_req(struct usb_function *f,
409 const struct usb_ctrlrequest *ctrl)
410{
411 struct f_audio *audio = func_to_audio(f);
412 struct usb_composite_dev *cdev = f->config->cdev;
413 struct usb_request *req = cdev->req;
414 int value = -EOPNOTSUPP;
415 u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
416 u16 len = le16_to_cpu(ctrl->wLength);
417 u16 w_value = le16_to_cpu(ctrl->wValue);
418 u8 con_sel = (w_value >> 8) & 0xFF;
419 u8 cmd = (ctrl->bRequest & 0x0F);
420 struct usb_audio_control_selector *cs;
421 struct usb_audio_control *con;
422
423 DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n",
424 ctrl->bRequest, w_value, len, id);
425
426 list_for_each_entry(cs, &audio->cs, list) {
427 if (cs->id == id) {
428 list_for_each_entry(con, &cs->control, list) {
429 if (con->type == con_sel && con->get) {
430 value = con->get(con, cmd);
431 break;
432 }
433 }
434 break;
435 }
436 }
437
438 req->context = audio;
439 req->complete = f_audio_complete;
440 memcpy(req->buf, &value, len);
441
442 return len;
443}
444
445static int
446f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
447{
448 struct usb_composite_dev *cdev = f->config->cdev;
449 struct usb_request *req = cdev->req;
450 int value = -EOPNOTSUPP;
451 u16 w_index = le16_to_cpu(ctrl->wIndex);
452 u16 w_value = le16_to_cpu(ctrl->wValue);
453 u16 w_length = le16_to_cpu(ctrl->wLength);
454
455 /* composite driver infrastructure handles everything except
456 * Audio class messages; interface activation uses set_alt().
457 */
458 switch (ctrl->bRequestType) {
459 case USB_AUDIO_SET_INTF:
460 value = audio_set_intf_req(f, ctrl);
461 break;
462
463 case USB_AUDIO_GET_INTF:
464 value = audio_get_intf_req(f, ctrl);
465 break;
466
467 default:
468 ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
469 ctrl->bRequestType, ctrl->bRequest,
470 w_value, w_index, w_length);
471 }
472
473 /* respond with data transfer or status phase? */
474 if (value >= 0) {
475 DBG(cdev, "audio req%02x.%02x v%04x i%04x l%d\n",
476 ctrl->bRequestType, ctrl->bRequest,
477 w_value, w_index, w_length);
478 req->zero = 0;
479 req->length = value;
480 value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
481 if (value < 0)
482 ERROR(cdev, "audio response on err %d\n", value);
483 }
484
485 /* device either stalls (value < 0) or reports success */
486 return value;
487}
488
489static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
490{
491 struct f_audio *audio = func_to_audio(f);
492 struct usb_composite_dev *cdev = f->config->cdev;
493 struct usb_ep *out_ep = audio->out_ep;
494 struct usb_request *req;
495 int i = 0, err = 0;
496
497 DBG(cdev, "intf %d, alt %d\n", intf, alt);
498
499 if (intf == 1) {
500 if (alt == 1) {
501 usb_ep_enable(out_ep, audio->out_desc);
502 out_ep->driver_data = audio;
503 audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
504
505 /*
506 * allocate a bunch of read buffers
507 * and queue them all at once.
508 */
509 for (i = 0; i < req_count && err == 0; i++) {
510 req = usb_ep_alloc_request(out_ep, GFP_ATOMIC);
511 if (req) {
512 req->buf = kzalloc(req_buf_size,
513 GFP_ATOMIC);
514 if (req->buf) {
515 req->length = req_buf_size;
516 req->context = audio;
517 req->complete =
518 f_audio_complete;
519 err = usb_ep_queue(out_ep,
520 req, GFP_ATOMIC);
521 if (err)
522 ERROR(cdev,
523 "%s queue req: %d\n",
524 out_ep->name, err);
525 } else
526 err = -ENOMEM;
527 } else
528 err = -ENOMEM;
529 }
530
531 } else {
532 struct f_audio_buf *copy_buf = audio->copy_buf;
533 if (copy_buf) {
534 list_add_tail(&copy_buf->list,
535 &audio->play_queue);
536 schedule_work(&audio->playback_work);
537 }
538 }
539 }
540
541 return err;
542}
543
544static void f_audio_disable(struct usb_function *f)
545{
546 return;
547}
548
549/*-------------------------------------------------------------------------*/
550
551static void f_audio_build_desc(struct f_audio *audio)
552{
553 struct gaudio *card = &audio->card;
554 u8 *sam_freq;
555 int rate;
556
557 /* Set channel numbers */
558 input_terminal_desc.bNrChannels = u_audio_get_playback_channels(card);
559 as_type_i_desc.bNrChannels = u_audio_get_playback_channels(card);
560
561 /* Set sample rates */
562 rate = u_audio_get_playback_rate(card);
563 sam_freq = as_type_i_desc.tSamFreq[0];
564 memcpy(sam_freq, &rate, 3);
565
566 /* Todo: Set Sample bits and other parameters */
567
568 return;
569}
570
571/* audio function driver setup/binding */
572static int __init
573f_audio_bind(struct usb_configuration *c, struct usb_function *f)
574{
575 struct usb_composite_dev *cdev = c->cdev;
576 struct f_audio *audio = func_to_audio(f);
577 int status;
578 struct usb_ep *ep;
579
580 f_audio_build_desc(audio);
581
582 /* allocate instance-specific interface IDs, and patch descriptors */
583 status = usb_interface_id(c, f);
584 if (status < 0)
585 goto fail;
586 ac_interface_desc.bInterfaceNumber = status;
587
588 status = usb_interface_id(c, f);
589 if (status < 0)
590 goto fail;
591 as_interface_alt_0_desc.bInterfaceNumber = status;
592 as_interface_alt_1_desc.bInterfaceNumber = status;
593
594 status = -ENODEV;
595
596 /* allocate instance-specific endpoints */
597 ep = usb_ep_autoconfig(cdev->gadget, &as_out_ep_desc);
598 if (!ep)
599 goto fail;
600 audio->out_ep = ep;
601 ep->driver_data = cdev; /* claim */
602
603 status = -ENOMEM;
604
605 /* supcard all relevant hardware speeds... we expect that when
606 * hardware is dual speed, all bulk-capable endpoints work at
607 * both speeds
608 */
609
610 /* copy descriptors, and track endpoint copies */
611 if (gadget_is_dualspeed(c->cdev->gadget)) {
612 c->highspeed = true;
613 f->hs_descriptors = usb_copy_descriptors(f_audio_desc);
614 } else
615 f->descriptors = usb_copy_descriptors(f_audio_desc);
616
617 return 0;
618
619fail:
620
621 return status;
622}
623
624static void
625f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
626{
627 struct f_audio *audio = func_to_audio(f);
628
629 usb_free_descriptors(f->descriptors);
630 kfree(audio);
631}
632
633/*-------------------------------------------------------------------------*/
634
635/* Todo: add more control selecotor dynamically */
636int __init control_selector_init(struct f_audio *audio)
637{
638 INIT_LIST_HEAD(&audio->cs);
639 list_add(&feature_unit.list, &audio->cs);
640
641 INIT_LIST_HEAD(&feature_unit.control);
642 list_add(&mute_control.list, &feature_unit.control);
643 list_add(&volume_control.list, &feature_unit.control);
644
645 volume_control.data[_CUR] = 0xffc0;
646 volume_control.data[_MIN] = 0xe3a0;
647 volume_control.data[_MAX] = 0xfff0;
648 volume_control.data[_RES] = 0x0030;
649
650 return 0;
651}
652
653/**
654 * audio_bind_config - add USB audio fucntion to a configuration
655 * @c: the configuration to supcard the USB audio function
656 * Context: single threaded during gadget setup
657 *
658 * Returns zero on success, else negative errno.
659 */
660int __init audio_bind_config(struct usb_configuration *c)
661{
662 struct f_audio *audio;
663 int status;
664
665 /* allocate and initialize one new instance */
666 audio = kzalloc(sizeof *audio, GFP_KERNEL);
667 if (!audio)
668 return -ENOMEM;
669
670 audio->card.func.name = "g_audio";
671 audio->card.gadget = c->cdev->gadget;
672
673 INIT_LIST_HEAD(&audio->play_queue);
674 spin_lock_init(&audio->lock);
675
676 /* set up ASLA audio devices */
677 status = gaudio_setup(&audio->card);
678 if (status < 0)
679 goto setup_fail;
680
681 audio->card.func.strings = audio_strings;
682 audio->card.func.bind = f_audio_bind;
683 audio->card.func.unbind = f_audio_unbind;
684 audio->card.func.set_alt = f_audio_set_alt;
685 audio->card.func.setup = f_audio_setup;
686 audio->card.func.disable = f_audio_disable;
687 audio->out_desc = &as_out_ep_desc;
688
689 control_selector_init(audio);
690
691 INIT_WORK(&audio->playback_work, f_audio_playback_work);
692
693 status = usb_add_function(c, &audio->card.func);
694 if (status)
695 goto add_fail;
696
697 INFO(c->cdev, "audio_buf_size %d, req_buf_size %d, req_count %d\n",
698 audio_buf_size, req_buf_size, req_count);
699
700 return status;
701
702add_fail:
703 gaudio_cleanup(&audio->card);
704setup_fail:
705 kfree(audio);
706 return status;
707}
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 3279a4726042..424a37c5773f 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -475,7 +475,9 @@ static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
475 if (rndis->port.in_ep->driver_data) { 475 if (rndis->port.in_ep->driver_data) {
476 DBG(cdev, "reset rndis\n"); 476 DBG(cdev, "reset rndis\n");
477 gether_disconnect(&rndis->port); 477 gether_disconnect(&rndis->port);
478 } else { 478 }
479
480 if (!rndis->port.in) {
479 DBG(cdev, "init rndis\n"); 481 DBG(cdev, "init rndis\n");
480 rndis->port.in = ep_choose(cdev->gadget, 482 rndis->port.in = ep_choose(cdev->gadget,
481 rndis->hs.in, rndis->fs.in); 483 rndis->hs.in, rndis->fs.in);
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 381a53b3e11c..1e6aa504d58a 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -248,6 +248,8 @@
248#include <linux/freezer.h> 248#include <linux/freezer.h>
249#include <linux/utsname.h> 249#include <linux/utsname.h>
250 250
251#include <asm/unaligned.h>
252
251#include <linux/usb/ch9.h> 253#include <linux/usb/ch9.h>
252#include <linux/usb/gadget.h> 254#include <linux/usb/gadget.h>
253 255
@@ -799,29 +801,9 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
799 801
800/* Routines for unaligned data access */ 802/* Routines for unaligned data access */
801 803
802static u16 get_be16(u8 *buf) 804static u32 get_unaligned_be24(u8 *buf)
803{
804 return ((u16) buf[0] << 8) | ((u16) buf[1]);
805}
806
807static u32 get_be32(u8 *buf)
808{
809 return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) |
810 ((u32) buf[2] << 8) | ((u32) buf[3]);
811}
812
813static void put_be16(u8 *buf, u16 val)
814{
815 buf[0] = val >> 8;
816 buf[1] = val;
817}
818
819static void put_be32(u8 *buf, u32 val)
820{ 805{
821 buf[0] = val >> 24; 806 return 0xffffff & (u32) get_unaligned_be32(buf - 1);
822 buf[1] = val >> 16;
823 buf[2] = val >> 8;
824 buf[3] = val & 0xff;
825} 807}
826 808
827 809
@@ -1582,9 +1564,9 @@ static int do_read(struct fsg_dev *fsg)
1582 /* Get the starting Logical Block Address and check that it's 1564 /* Get the starting Logical Block Address and check that it's
1583 * not too big */ 1565 * not too big */
1584 if (fsg->cmnd[0] == SC_READ_6) 1566 if (fsg->cmnd[0] == SC_READ_6)
1585 lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]); 1567 lba = get_unaligned_be24(&fsg->cmnd[1]);
1586 else { 1568 else {
1587 lba = get_be32(&fsg->cmnd[2]); 1569 lba = get_unaligned_be32(&fsg->cmnd[2]);
1588 1570
1589 /* We allow DPO (Disable Page Out = don't save data in the 1571 /* We allow DPO (Disable Page Out = don't save data in the
1590 * cache) and FUA (Force Unit Access = don't read from the 1572 * cache) and FUA (Force Unit Access = don't read from the
@@ -1717,9 +1699,9 @@ static int do_write(struct fsg_dev *fsg)
1717 /* Get the starting Logical Block Address and check that it's 1699 /* Get the starting Logical Block Address and check that it's
1718 * not too big */ 1700 * not too big */
1719 if (fsg->cmnd[0] == SC_WRITE_6) 1701 if (fsg->cmnd[0] == SC_WRITE_6)
1720 lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]); 1702 lba = get_unaligned_be24(&fsg->cmnd[1]);
1721 else { 1703 else {
1722 lba = get_be32(&fsg->cmnd[2]); 1704 lba = get_unaligned_be32(&fsg->cmnd[2]);
1723 1705
1724 /* We allow DPO (Disable Page Out = don't save data in the 1706 /* We allow DPO (Disable Page Out = don't save data in the
1725 * cache) and FUA (Force Unit Access = write directly to the 1707 * cache) and FUA (Force Unit Access = write directly to the
@@ -1940,7 +1922,7 @@ static int do_verify(struct fsg_dev *fsg)
1940 1922
1941 /* Get the starting Logical Block Address and check that it's 1923 /* Get the starting Logical Block Address and check that it's
1942 * not too big */ 1924 * not too big */
1943 lba = get_be32(&fsg->cmnd[2]); 1925 lba = get_unaligned_be32(&fsg->cmnd[2]);
1944 if (lba >= curlun->num_sectors) { 1926 if (lba >= curlun->num_sectors) {
1945 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1927 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1946 return -EINVAL; 1928 return -EINVAL;
@@ -1953,7 +1935,7 @@ static int do_verify(struct fsg_dev *fsg)
1953 return -EINVAL; 1935 return -EINVAL;
1954 } 1936 }
1955 1937
1956 verification_length = get_be16(&fsg->cmnd[7]); 1938 verification_length = get_unaligned_be16(&fsg->cmnd[7]);
1957 if (unlikely(verification_length == 0)) 1939 if (unlikely(verification_length == 0))
1958 return -EIO; // No default reply 1940 return -EIO; // No default reply
1959 1941
@@ -2103,7 +2085,7 @@ static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2103 memset(buf, 0, 18); 2085 memset(buf, 0, 18);
2104 buf[0] = valid | 0x70; // Valid, current error 2086 buf[0] = valid | 0x70; // Valid, current error
2105 buf[2] = SK(sd); 2087 buf[2] = SK(sd);
2106 put_be32(&buf[3], sdinfo); // Sense information 2088 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
2107 buf[7] = 18 - 8; // Additional sense length 2089 buf[7] = 18 - 8; // Additional sense length
2108 buf[12] = ASC(sd); 2090 buf[12] = ASC(sd);
2109 buf[13] = ASCQ(sd); 2091 buf[13] = ASCQ(sd);
@@ -2114,7 +2096,7 @@ static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2114static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh) 2096static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2115{ 2097{
2116 struct lun *curlun = fsg->curlun; 2098 struct lun *curlun = fsg->curlun;
2117 u32 lba = get_be32(&fsg->cmnd[2]); 2099 u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
2118 int pmi = fsg->cmnd[8]; 2100 int pmi = fsg->cmnd[8];
2119 u8 *buf = (u8 *) bh->buf; 2101 u8 *buf = (u8 *) bh->buf;
2120 2102
@@ -2124,8 +2106,9 @@ static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2124 return -EINVAL; 2106 return -EINVAL;
2125 } 2107 }
2126 2108
2127 put_be32(&buf[0], curlun->num_sectors - 1); // Max logical block 2109 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
2128 put_be32(&buf[4], 512); // Block length 2110 /* Max logical block */
2111 put_unaligned_be32(512, &buf[4]); /* Block length */
2129 return 8; 2112 return 8;
2130} 2113}
2131 2114
@@ -2144,7 +2127,7 @@ static void store_cdrom_address(u8 *dest, int msf, u32 addr)
2144 dest[0] = 0; /* Reserved */ 2127 dest[0] = 0; /* Reserved */
2145 } else { 2128 } else {
2146 /* Absolute sector */ 2129 /* Absolute sector */
2147 put_be32(dest, addr); 2130 put_unaligned_be32(addr, dest);
2148 } 2131 }
2149} 2132}
2150 2133
@@ -2152,7 +2135,7 @@ static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2152{ 2135{
2153 struct lun *curlun = fsg->curlun; 2136 struct lun *curlun = fsg->curlun;
2154 int msf = fsg->cmnd[1] & 0x02; 2137 int msf = fsg->cmnd[1] & 0x02;
2155 u32 lba = get_be32(&fsg->cmnd[2]); 2138 u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
2156 u8 *buf = (u8 *) bh->buf; 2139 u8 *buf = (u8 *) bh->buf;
2157 2140
2158 if ((fsg->cmnd[1] & ~0x02) != 0) { /* Mask away MSF */ 2141 if ((fsg->cmnd[1] & ~0x02) != 0) { /* Mask away MSF */
@@ -2252,10 +2235,13 @@ static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2252 buf[2] = 0x04; // Write cache enable, 2235 buf[2] = 0x04; // Write cache enable,
2253 // Read cache not disabled 2236 // Read cache not disabled
2254 // No cache retention priorities 2237 // No cache retention priorities
2255 put_be16(&buf[4], 0xffff); // Don't disable prefetch 2238 put_unaligned_be16(0xffff, &buf[4]);
2256 // Minimum prefetch = 0 2239 /* Don't disable prefetch */
2257 put_be16(&buf[8], 0xffff); // Maximum prefetch 2240 /* Minimum prefetch = 0 */
2258 put_be16(&buf[10], 0xffff); // Maximum prefetch ceiling 2241 put_unaligned_be16(0xffff, &buf[8]);
2242 /* Maximum prefetch */
2243 put_unaligned_be16(0xffff, &buf[10]);
2244 /* Maximum prefetch ceiling */
2259 } 2245 }
2260 buf += 12; 2246 buf += 12;
2261 } 2247 }
@@ -2272,7 +2258,7 @@ static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2272 if (mscmnd == SC_MODE_SENSE_6) 2258 if (mscmnd == SC_MODE_SENSE_6)
2273 buf0[0] = len - 1; 2259 buf0[0] = len - 1;
2274 else 2260 else
2275 put_be16(buf0, len - 2); 2261 put_unaligned_be16(len - 2, buf0);
2276 return len; 2262 return len;
2277} 2263}
2278 2264
@@ -2360,9 +2346,10 @@ static int do_read_format_capacities(struct fsg_dev *fsg,
2360 buf[3] = 8; // Only the Current/Maximum Capacity Descriptor 2346 buf[3] = 8; // Only the Current/Maximum Capacity Descriptor
2361 buf += 4; 2347 buf += 4;
2362 2348
2363 put_be32(&buf[0], curlun->num_sectors); // Number of blocks 2349 put_unaligned_be32(curlun->num_sectors, &buf[0]);
2364 put_be32(&buf[4], 512); // Block length 2350 /* Number of blocks */
2365 buf[4] = 0x02; // Current capacity 2351 put_unaligned_be32(512, &buf[4]); /* Block length */
2352 buf[4] = 0x02; /* Current capacity */
2366 return 12; 2353 return 12;
2367} 2354}
2368 2355
@@ -2882,7 +2869,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2882 break; 2869 break;
2883 2870
2884 case SC_MODE_SELECT_10: 2871 case SC_MODE_SELECT_10:
2885 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]); 2872 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2886 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST, 2873 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
2887 (1<<1) | (3<<7), 0, 2874 (1<<1) | (3<<7), 0,
2888 "MODE SELECT(10)")) == 0) 2875 "MODE SELECT(10)")) == 0)
@@ -2898,7 +2885,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2898 break; 2885 break;
2899 2886
2900 case SC_MODE_SENSE_10: 2887 case SC_MODE_SENSE_10:
2901 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]); 2888 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2902 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2889 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2903 (1<<1) | (1<<2) | (3<<7), 0, 2890 (1<<1) | (1<<2) | (3<<7), 0,
2904 "MODE SENSE(10)")) == 0) 2891 "MODE SENSE(10)")) == 0)
@@ -2923,7 +2910,8 @@ static int do_scsi_command(struct fsg_dev *fsg)
2923 break; 2910 break;
2924 2911
2925 case SC_READ_10: 2912 case SC_READ_10:
2926 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9; 2913 fsg->data_size_from_cmnd =
2914 get_unaligned_be16(&fsg->cmnd[7]) << 9;
2927 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2915 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2928 (1<<1) | (0xf<<2) | (3<<7), 1, 2916 (1<<1) | (0xf<<2) | (3<<7), 1,
2929 "READ(10)")) == 0) 2917 "READ(10)")) == 0)
@@ -2931,7 +2919,8 @@ static int do_scsi_command(struct fsg_dev *fsg)
2931 break; 2919 break;
2932 2920
2933 case SC_READ_12: 2921 case SC_READ_12:
2934 fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9; 2922 fsg->data_size_from_cmnd =
2923 get_unaligned_be32(&fsg->cmnd[6]) << 9;
2935 if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST, 2924 if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
2936 (1<<1) | (0xf<<2) | (0xf<<6), 1, 2925 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2937 "READ(12)")) == 0) 2926 "READ(12)")) == 0)
@@ -2949,7 +2938,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2949 case SC_READ_HEADER: 2938 case SC_READ_HEADER:
2950 if (!mod_data.cdrom) 2939 if (!mod_data.cdrom)
2951 goto unknown_cmnd; 2940 goto unknown_cmnd;
2952 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]); 2941 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2953 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2942 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2954 (3<<7) | (0x1f<<1), 1, 2943 (3<<7) | (0x1f<<1), 1,
2955 "READ HEADER")) == 0) 2944 "READ HEADER")) == 0)
@@ -2959,7 +2948,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2959 case SC_READ_TOC: 2948 case SC_READ_TOC:
2960 if (!mod_data.cdrom) 2949 if (!mod_data.cdrom)
2961 goto unknown_cmnd; 2950 goto unknown_cmnd;
2962 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]); 2951 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2963 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2952 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2964 (7<<6) | (1<<1), 1, 2953 (7<<6) | (1<<1), 1,
2965 "READ TOC")) == 0) 2954 "READ TOC")) == 0)
@@ -2967,7 +2956,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2967 break; 2956 break;
2968 2957
2969 case SC_READ_FORMAT_CAPACITIES: 2958 case SC_READ_FORMAT_CAPACITIES:
2970 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]); 2959 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2971 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST, 2960 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2972 (3<<7), 1, 2961 (3<<7), 1,
2973 "READ FORMAT CAPACITIES")) == 0) 2962 "READ FORMAT CAPACITIES")) == 0)
@@ -3025,7 +3014,8 @@ static int do_scsi_command(struct fsg_dev *fsg)
3025 break; 3014 break;
3026 3015
3027 case SC_WRITE_10: 3016 case SC_WRITE_10:
3028 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9; 3017 fsg->data_size_from_cmnd =
3018 get_unaligned_be16(&fsg->cmnd[7]) << 9;
3029 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST, 3019 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
3030 (1<<1) | (0xf<<2) | (3<<7), 1, 3020 (1<<1) | (0xf<<2) | (3<<7), 1,
3031 "WRITE(10)")) == 0) 3021 "WRITE(10)")) == 0)
@@ -3033,7 +3023,8 @@ static int do_scsi_command(struct fsg_dev *fsg)
3033 break; 3023 break;
3034 3024
3035 case SC_WRITE_12: 3025 case SC_WRITE_12:
3036 fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9; 3026 fsg->data_size_from_cmnd =
3027 get_unaligned_be32(&fsg->cmnd[6]) << 9;
3037 if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST, 3028 if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
3038 (1<<1) | (0xf<<2) | (0xf<<6), 1, 3029 (1<<1) | (0xf<<2) | (0xf<<6), 1,
3039 "WRITE(12)")) == 0) 3030 "WRITE(12)")) == 0)
diff --git a/drivers/usb/gadget/fsl_mx3_udc.c b/drivers/usb/gadget/fsl_mx3_udc.c
new file mode 100644
index 000000000000..4bc2bf3d602e
--- /dev/null
+++ b/drivers/usb/gadget/fsl_mx3_udc.c
@@ -0,0 +1,95 @@
1/*
2 * Copyright (C) 2009
3 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
4 *
5 * Description:
6 * Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
7 * driver to function correctly on these systems.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/err.h>
17#include <linux/fsl_devices.h>
18#include <linux/platform_device.h>
19
20static struct clk *mxc_ahb_clk;
21static struct clk *mxc_usb_clk;
22
23int fsl_udc_clk_init(struct platform_device *pdev)
24{
25 struct fsl_usb2_platform_data *pdata;
26 unsigned long freq;
27 int ret;
28
29 pdata = pdev->dev.platform_data;
30
31 mxc_ahb_clk = clk_get(&pdev->dev, "usb_ahb");
32 if (IS_ERR(mxc_ahb_clk))
33 return PTR_ERR(mxc_ahb_clk);
34
35 ret = clk_enable(mxc_ahb_clk);
36 if (ret < 0) {
37 dev_err(&pdev->dev, "clk_enable(\"usb_ahb\") failed\n");
38 goto eenahb;
39 }
40
41 /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
42 mxc_usb_clk = clk_get(&pdev->dev, "usb");
43 if (IS_ERR(mxc_usb_clk)) {
44 dev_err(&pdev->dev, "clk_get(\"usb\") failed\n");
45 ret = PTR_ERR(mxc_usb_clk);
46 goto egusb;
47 }
48
49 freq = clk_get_rate(mxc_usb_clk);
50 if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
51 (freq < 59999000 || freq > 60001000)) {
52 dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
53 goto eclkrate;
54 }
55
56 ret = clk_enable(mxc_usb_clk);
57 if (ret < 0) {
58 dev_err(&pdev->dev, "clk_enable(\"usb_clk\") failed\n");
59 goto eenusb;
60 }
61
62 return 0;
63
64eenusb:
65eclkrate:
66 clk_put(mxc_usb_clk);
67 mxc_usb_clk = NULL;
68egusb:
69 clk_disable(mxc_ahb_clk);
70eenahb:
71 clk_put(mxc_ahb_clk);
72 return ret;
73}
74
75void fsl_udc_clk_finalize(struct platform_device *pdev)
76{
77 struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
78
79 /* ULPI transceivers don't need usbpll */
80 if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
81 clk_disable(mxc_usb_clk);
82 clk_put(mxc_usb_clk);
83 mxc_usb_clk = NULL;
84 }
85}
86
87void fsl_udc_clk_release(void)
88{
89 if (mxc_usb_clk) {
90 clk_disable(mxc_usb_clk);
91 clk_put(mxc_usb_clk);
92 }
93 clk_disable(mxc_ahb_clk);
94 clk_put(mxc_ahb_clk);
95}
diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_udc_core.c
index 9d7b95d4e3d2..42a74b8a0bb8 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -38,6 +38,7 @@
38#include <linux/platform_device.h> 38#include <linux/platform_device.h>
39#include <linux/fsl_devices.h> 39#include <linux/fsl_devices.h>
40#include <linux/dmapool.h> 40#include <linux/dmapool.h>
41#include <linux/delay.h>
41 42
42#include <asm/byteorder.h> 43#include <asm/byteorder.h>
43#include <asm/io.h> 44#include <asm/io.h>
@@ -57,7 +58,9 @@ static const char driver_name[] = "fsl-usb2-udc";
57static const char driver_desc[] = DRIVER_DESC; 58static const char driver_desc[] = DRIVER_DESC;
58 59
59static struct usb_dr_device *dr_regs; 60static struct usb_dr_device *dr_regs;
61#ifndef CONFIG_ARCH_MXC
60static struct usb_sys_interface *usb_sys_regs; 62static struct usb_sys_interface *usb_sys_regs;
63#endif
61 64
62/* it is initialized in probe() */ 65/* it is initialized in probe() */
63static struct fsl_udc *udc_controller = NULL; 66static struct fsl_udc *udc_controller = NULL;
@@ -174,10 +177,34 @@ static void nuke(struct fsl_ep *ep, int status)
174 177
175static int dr_controller_setup(struct fsl_udc *udc) 178static int dr_controller_setup(struct fsl_udc *udc)
176{ 179{
177 unsigned int tmp = 0, portctrl = 0, ctrl = 0; 180 unsigned int tmp, portctrl;
181#ifndef CONFIG_ARCH_MXC
182 unsigned int ctrl;
183#endif
178 unsigned long timeout; 184 unsigned long timeout;
179#define FSL_UDC_RESET_TIMEOUT 1000 185#define FSL_UDC_RESET_TIMEOUT 1000
180 186
187 /* Config PHY interface */
188 portctrl = fsl_readl(&dr_regs->portsc1);
189 portctrl &= ~(PORTSCX_PHY_TYPE_SEL | PORTSCX_PORT_WIDTH);
190 switch (udc->phy_mode) {
191 case FSL_USB2_PHY_ULPI:
192 portctrl |= PORTSCX_PTS_ULPI;
193 break;
194 case FSL_USB2_PHY_UTMI_WIDE:
195 portctrl |= PORTSCX_PTW_16BIT;
196 /* fall through */
197 case FSL_USB2_PHY_UTMI:
198 portctrl |= PORTSCX_PTS_UTMI;
199 break;
200 case FSL_USB2_PHY_SERIAL:
201 portctrl |= PORTSCX_PTS_FSLS;
202 break;
203 default:
204 return -EINVAL;
205 }
206 fsl_writel(portctrl, &dr_regs->portsc1);
207
181 /* Stop and reset the usb controller */ 208 /* Stop and reset the usb controller */
182 tmp = fsl_readl(&dr_regs->usbcmd); 209 tmp = fsl_readl(&dr_regs->usbcmd);
183 tmp &= ~USB_CMD_RUN_STOP; 210 tmp &= ~USB_CMD_RUN_STOP;
@@ -215,31 +242,12 @@ static int dr_controller_setup(struct fsl_udc *udc)
215 udc->ep_qh, (int)tmp, 242 udc->ep_qh, (int)tmp,
216 fsl_readl(&dr_regs->endpointlistaddr)); 243 fsl_readl(&dr_regs->endpointlistaddr));
217 244
218 /* Config PHY interface */
219 portctrl = fsl_readl(&dr_regs->portsc1);
220 portctrl &= ~(PORTSCX_PHY_TYPE_SEL | PORTSCX_PORT_WIDTH);
221 switch (udc->phy_mode) {
222 case FSL_USB2_PHY_ULPI:
223 portctrl |= PORTSCX_PTS_ULPI;
224 break;
225 case FSL_USB2_PHY_UTMI_WIDE:
226 portctrl |= PORTSCX_PTW_16BIT;
227 /* fall through */
228 case FSL_USB2_PHY_UTMI:
229 portctrl |= PORTSCX_PTS_UTMI;
230 break;
231 case FSL_USB2_PHY_SERIAL:
232 portctrl |= PORTSCX_PTS_FSLS;
233 break;
234 default:
235 return -EINVAL;
236 }
237 fsl_writel(portctrl, &dr_regs->portsc1);
238
239 /* Config control enable i/o output, cpu endian register */ 245 /* Config control enable i/o output, cpu endian register */
246#ifndef CONFIG_ARCH_MXC
240 ctrl = __raw_readl(&usb_sys_regs->control); 247 ctrl = __raw_readl(&usb_sys_regs->control);
241 ctrl |= USB_CTRL_IOENB; 248 ctrl |= USB_CTRL_IOENB;
242 __raw_writel(ctrl, &usb_sys_regs->control); 249 __raw_writel(ctrl, &usb_sys_regs->control);
250#endif
243 251
244#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 252#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
245 /* Turn on cache snooping hardware, since some PowerPC platforms 253 /* Turn on cache snooping hardware, since some PowerPC platforms
@@ -2043,6 +2051,7 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
2043 size -= t; 2051 size -= t;
2044 next += t; 2052 next += t;
2045 2053
2054#ifndef CONFIG_ARCH_MXC
2046 tmp_reg = usb_sys_regs->snoop1; 2055 tmp_reg = usb_sys_regs->snoop1;
2047 t = scnprintf(next, size, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg); 2056 t = scnprintf(next, size, "Snoop1 Reg : = [0x%x]\n\n", tmp_reg);
2048 size -= t; 2057 size -= t;
@@ -2053,6 +2062,7 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count,
2053 tmp_reg); 2062 tmp_reg);
2054 size -= t; 2063 size -= t;
2055 next += t; 2064 next += t;
2065#endif
2056 2066
2057 /* ------fsl_udc, fsl_ep, fsl_request structure information ----- */ 2067 /* ------fsl_udc, fsl_ep, fsl_request structure information ----- */
2058 ep = &udc->eps[0]; 2068 ep = &udc->eps[0];
@@ -2263,14 +2273,21 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
2263 goto err_kfree; 2273 goto err_kfree;
2264 } 2274 }
2265 2275
2266 dr_regs = ioremap(res->start, res->end - res->start + 1); 2276 dr_regs = ioremap(res->start, resource_size(res));
2267 if (!dr_regs) { 2277 if (!dr_regs) {
2268 ret = -ENOMEM; 2278 ret = -ENOMEM;
2269 goto err_release_mem_region; 2279 goto err_release_mem_region;
2270 } 2280 }
2271 2281
2282#ifndef CONFIG_ARCH_MXC
2272 usb_sys_regs = (struct usb_sys_interface *) 2283 usb_sys_regs = (struct usb_sys_interface *)
2273 ((u32)dr_regs + USB_DR_SYS_OFFSET); 2284 ((u32)dr_regs + USB_DR_SYS_OFFSET);
2285#endif
2286
2287 /* Initialize USB clocks */
2288 ret = fsl_udc_clk_init(pdev);
2289 if (ret < 0)
2290 goto err_iounmap_noclk;
2274 2291
2275 /* Read Device Controller Capability Parameters register */ 2292 /* Read Device Controller Capability Parameters register */
2276 dccparams = fsl_readl(&dr_regs->dccparams); 2293 dccparams = fsl_readl(&dr_regs->dccparams);
@@ -2308,6 +2325,8 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
2308 * leave usbintr reg untouched */ 2325 * leave usbintr reg untouched */
2309 dr_controller_setup(udc_controller); 2326 dr_controller_setup(udc_controller);
2310 2327
2328 fsl_udc_clk_finalize(pdev);
2329
2311 /* Setup gadget structure */ 2330 /* Setup gadget structure */
2312 udc_controller->gadget.ops = &fsl_gadget_ops; 2331 udc_controller->gadget.ops = &fsl_gadget_ops;
2313 udc_controller->gadget.is_dualspeed = 1; 2332 udc_controller->gadget.is_dualspeed = 1;
@@ -2362,6 +2381,8 @@ err_unregister:
2362err_free_irq: 2381err_free_irq:
2363 free_irq(udc_controller->irq, udc_controller); 2382 free_irq(udc_controller->irq, udc_controller);
2364err_iounmap: 2383err_iounmap:
2384 fsl_udc_clk_release();
2385err_iounmap_noclk:
2365 iounmap(dr_regs); 2386 iounmap(dr_regs);
2366err_release_mem_region: 2387err_release_mem_region:
2367 release_mem_region(res->start, res->end - res->start + 1); 2388 release_mem_region(res->start, res->end - res->start + 1);
@@ -2384,6 +2405,8 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
2384 return -ENODEV; 2405 return -ENODEV;
2385 udc_controller->done = &done; 2406 udc_controller->done = &done;
2386 2407
2408 fsl_udc_clk_release();
2409
2387 /* DR has been stopped in usb_gadget_unregister_driver() */ 2410 /* DR has been stopped in usb_gadget_unregister_driver() */
2388 remove_proc_file(); 2411 remove_proc_file();
2389 2412
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index e63ef12645f5..20aeceed48c7 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -563,4 +563,22 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
563 * 2 + ((windex & USB_DIR_IN) ? 1 : 0)) 563 * 2 + ((windex & USB_DIR_IN) ? 1 : 0))
564#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP)) 564#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP))
565 565
566struct platform_device;
567#ifdef CONFIG_ARCH_MXC
568int fsl_udc_clk_init(struct platform_device *pdev);
569void fsl_udc_clk_finalize(struct platform_device *pdev);
570void fsl_udc_clk_release(void);
571#else
572static inline int fsl_udc_clk_init(struct platform_device *pdev)
573{
574 return 0;
575}
576static inline void fsl_udc_clk_finalize(struct platform_device *pdev)
577{
578}
579static inline void fsl_udc_clk_release(void)
580{
581}
582#endif
583
566#endif 584#endif
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index ec6d439a2aa5..8e0e9a0b7364 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -137,6 +137,12 @@
137#define gadget_is_musbhdrc(g) 0 137#define gadget_is_musbhdrc(g) 0
138#endif 138#endif
139 139
140#ifdef CONFIG_USB_GADGET_LANGWELL
141#define gadget_is_langwell(g) (!strcmp("langwell_udc", (g)->name))
142#else
143#define gadget_is_langwell(g) 0
144#endif
145
140/* from Montavista kernel (?) */ 146/* from Montavista kernel (?) */
141#ifdef CONFIG_USB_GADGET_MPC8272 147#ifdef CONFIG_USB_GADGET_MPC8272
142#define gadget_is_mpc8272(g) !strcmp("mpc8272_udc", (g)->name) 148#define gadget_is_mpc8272(g) !strcmp("mpc8272_udc", (g)->name)
@@ -231,6 +237,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
231 return 0x22; 237 return 0x22;
232 else if (gadget_is_ci13xxx(gadget)) 238 else if (gadget_is_ci13xxx(gadget))
233 return 0x23; 239 return 0x23;
240 else if (gadget_is_langwell(gadget))
241 return 0x24;
234 return -ENOENT; 242 return -ENOENT;
235} 243}
236 244
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index de010c939dbb..112bb40a427c 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -110,10 +110,10 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
110 return -EINVAL; 110 return -EINVAL;
111 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 111 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
112 return -ESHUTDOWN; 112 return -ESHUTDOWN;
113 if (ep->num != (desc->bEndpointAddress & 0x0f)) 113 if (ep->num != usb_endpoint_num(desc))
114 return -EINVAL; 114 return -EINVAL;
115 115
116 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { 116 switch (usb_endpoint_type(desc)) {
117 case USB_ENDPOINT_XFER_BULK: 117 case USB_ENDPOINT_XFER_BULK:
118 case USB_ENDPOINT_XFER_INT: 118 case USB_ENDPOINT_XFER_INT:
119 break; 119 break;
@@ -142,7 +142,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
142 /* ep1/ep2 dma direction is chosen early; it works in the other 142 /* ep1/ep2 dma direction is chosen early; it works in the other
143 * direction, with pio. be cautious with out-dma. 143 * direction, with pio. be cautious with out-dma.
144 */ 144 */
145 ep->is_in = (USB_DIR_IN & desc->bEndpointAddress) != 0; 145 ep->is_in = usb_endpoint_dir_in(desc);
146 if (ep->is_in) { 146 if (ep->is_in) {
147 mode |= 1; 147 mode |= 1;
148 ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT); 148 ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index 168658b4b4e2..c52a681f376c 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -415,6 +415,13 @@ static int write_packet(struct imx_ep_struct *imx_ep, struct imx_request *req)
415 u8 *buf; 415 u8 *buf;
416 int length, count, temp; 416 int length, count, temp;
417 417
418 if (unlikely(__raw_readl(imx_ep->imx_usb->base +
419 USB_EP_STAT(EP_NO(imx_ep))) & EPSTAT_ZLPS)) {
420 D_TRX(imx_ep->imx_usb->dev, "<%s> zlp still queued in EP %s\n",
421 __func__, imx_ep->ep.name);
422 return -1;
423 }
424
418 buf = req->req.buf + req->req.actual; 425 buf = req->req.buf + req->req.actual;
419 prefetch(buf); 426 prefetch(buf);
420 427
@@ -734,9 +741,12 @@ static struct usb_request *imx_ep_alloc_request
734{ 741{
735 struct imx_request *req; 742 struct imx_request *req;
736 743
744 if (!usb_ep)
745 return NULL;
746
737 req = kzalloc(sizeof *req, gfp_flags); 747 req = kzalloc(sizeof *req, gfp_flags);
738 if (!req || !usb_ep) 748 if (!req)
739 return 0; 749 return NULL;
740 750
741 INIT_LIST_HEAD(&req->queue); 751 INIT_LIST_HEAD(&req->queue);
742 req->in_use = 0; 752 req->in_use = 0;
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index d20937f28a19..7d33f50b5874 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -384,9 +384,8 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
384 return value; 384 return value;
385 385
386 /* halt any endpoint by doing a "wrong direction" i/o call */ 386 /* halt any endpoint by doing a "wrong direction" i/o call */
387 if (data->desc.bEndpointAddress & USB_DIR_IN) { 387 if (usb_endpoint_dir_in(&data->desc)) {
388 if ((data->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 388 if (usb_endpoint_xfer_isoc(&data->desc))
389 == USB_ENDPOINT_XFER_ISOC)
390 return -EINVAL; 389 return -EINVAL;
391 DBG (data->dev, "%s halt\n", data->name); 390 DBG (data->dev, "%s halt\n", data->name);
392 spin_lock_irq (&data->dev->lock); 391 spin_lock_irq (&data->dev->lock);
@@ -428,9 +427,8 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
428 return value; 427 return value;
429 428
430 /* halt any endpoint by doing a "wrong direction" i/o call */ 429 /* halt any endpoint by doing a "wrong direction" i/o call */
431 if (!(data->desc.bEndpointAddress & USB_DIR_IN)) { 430 if (!usb_endpoint_dir_in(&data->desc)) {
432 if ((data->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 431 if (usb_endpoint_xfer_isoc(&data->desc))
433 == USB_ENDPOINT_XFER_ISOC)
434 return -EINVAL; 432 return -EINVAL;
435 DBG (data->dev, "%s halt\n", data->name); 433 DBG (data->dev, "%s halt\n", data->name);
436 spin_lock_irq (&data->dev->lock); 434 spin_lock_irq (&data->dev->lock);
@@ -691,7 +689,7 @@ ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
691 struct ep_data *epdata = iocb->ki_filp->private_data; 689 struct ep_data *epdata = iocb->ki_filp->private_data;
692 char *buf; 690 char *buf;
693 691
694 if (unlikely(epdata->desc.bEndpointAddress & USB_DIR_IN)) 692 if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
695 return -EINVAL; 693 return -EINVAL;
696 694
697 buf = kmalloc(iocb->ki_left, GFP_KERNEL); 695 buf = kmalloc(iocb->ki_left, GFP_KERNEL);
@@ -711,7 +709,7 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
711 size_t len = 0; 709 size_t len = 0;
712 int i = 0; 710 int i = 0;
713 711
714 if (unlikely(!(epdata->desc.bEndpointAddress & USB_DIR_IN))) 712 if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
715 return -EINVAL; 713 return -EINVAL;
716 714
717 buf = kmalloc(iocb->ki_left, GFP_KERNEL); 715 buf = kmalloc(iocb->ki_left, GFP_KERNEL);
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
new file mode 100644
index 000000000000..6829d5961359
--- /dev/null
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -0,0 +1,3373 @@
1/*
2 * Intel Langwell USB Device Controller driver
3 * Copyright (C) 2008-2009, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20
21/* #undef DEBUG */
22/* #undef VERBOSE */
23
24#if defined(CONFIG_USB_LANGWELL_OTG)
25#define OTG_TRANSCEIVER
26#endif
27
28
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/kernel.h>
33#include <linux/delay.h>
34#include <linux/ioport.h>
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/smp_lock.h>
38#include <linux/errno.h>
39#include <linux/init.h>
40#include <linux/timer.h>
41#include <linux/list.h>
42#include <linux/interrupt.h>
43#include <linux/moduleparam.h>
44#include <linux/device.h>
45#include <linux/usb/ch9.h>
46#include <linux/usb/gadget.h>
47#include <linux/usb/otg.h>
48#include <linux/pm.h>
49#include <linux/io.h>
50#include <linux/irq.h>
51#include <asm/system.h>
52#include <asm/unaligned.h>
53
54#include "langwell_udc.h"
55
56
57#define DRIVER_DESC "Intel Langwell USB Device Controller driver"
58#define DRIVER_VERSION "16 May 2009"
59
60static const char driver_name[] = "langwell_udc";
61static const char driver_desc[] = DRIVER_DESC;
62
63
64/* controller device global variable */
65static struct langwell_udc *the_controller;
66
67/* for endpoint 0 operations */
68static const struct usb_endpoint_descriptor
69langwell_ep0_desc = {
70 .bLength = USB_DT_ENDPOINT_SIZE,
71 .bDescriptorType = USB_DT_ENDPOINT,
72 .bEndpointAddress = 0,
73 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
74 .wMaxPacketSize = EP0_MAX_PKT_SIZE,
75};
76
77
78/*-------------------------------------------------------------------------*/
79/* debugging */
80
81#ifdef DEBUG
82#define DBG(dev, fmt, args...) \
83 pr_debug("%s %s: " fmt , driver_name, \
84 pci_name(dev->pdev), ## args)
85#else
86#define DBG(dev, fmt, args...) \
87 do { } while (0)
88#endif /* DEBUG */
89
90
91#ifdef VERBOSE
92#define VDBG DBG
93#else
94#define VDBG(dev, fmt, args...) \
95 do { } while (0)
96#endif /* VERBOSE */
97
98
99#define ERROR(dev, fmt, args...) \
100 pr_err("%s %s: " fmt , driver_name, \
101 pci_name(dev->pdev), ## args)
102
103#define WARNING(dev, fmt, args...) \
104 pr_warning("%s %s: " fmt , driver_name, \
105 pci_name(dev->pdev), ## args)
106
107#define INFO(dev, fmt, args...) \
108 pr_info("%s %s: " fmt , driver_name, \
109 pci_name(dev->pdev), ## args)
110
111
112#ifdef VERBOSE
113static inline void print_all_registers(struct langwell_udc *dev)
114{
115 int i;
116
117 /* Capability Registers */
118 printk(KERN_DEBUG "Capability Registers (offset: "
119 "0x%04x, length: 0x%08x)\n",
120 CAP_REG_OFFSET,
121 (u32)sizeof(struct langwell_cap_regs));
122 printk(KERN_DEBUG "caplength=0x%02x\n",
123 readb(&dev->cap_regs->caplength));
124 printk(KERN_DEBUG "hciversion=0x%04x\n",
125 readw(&dev->cap_regs->hciversion));
126 printk(KERN_DEBUG "hcsparams=0x%08x\n",
127 readl(&dev->cap_regs->hcsparams));
128 printk(KERN_DEBUG "hccparams=0x%08x\n",
129 readl(&dev->cap_regs->hccparams));
130 printk(KERN_DEBUG "dciversion=0x%04x\n",
131 readw(&dev->cap_regs->dciversion));
132 printk(KERN_DEBUG "dccparams=0x%08x\n",
133 readl(&dev->cap_regs->dccparams));
134
135 /* Operational Registers */
136 printk(KERN_DEBUG "Operational Registers (offset: "
137 "0x%04x, length: 0x%08x)\n",
138 OP_REG_OFFSET,
139 (u32)sizeof(struct langwell_op_regs));
140 printk(KERN_DEBUG "extsts=0x%08x\n",
141 readl(&dev->op_regs->extsts));
142 printk(KERN_DEBUG "extintr=0x%08x\n",
143 readl(&dev->op_regs->extintr));
144 printk(KERN_DEBUG "usbcmd=0x%08x\n",
145 readl(&dev->op_regs->usbcmd));
146 printk(KERN_DEBUG "usbsts=0x%08x\n",
147 readl(&dev->op_regs->usbsts));
148 printk(KERN_DEBUG "usbintr=0x%08x\n",
149 readl(&dev->op_regs->usbintr));
150 printk(KERN_DEBUG "frindex=0x%08x\n",
151 readl(&dev->op_regs->frindex));
152 printk(KERN_DEBUG "ctrldssegment=0x%08x\n",
153 readl(&dev->op_regs->ctrldssegment));
154 printk(KERN_DEBUG "deviceaddr=0x%08x\n",
155 readl(&dev->op_regs->deviceaddr));
156 printk(KERN_DEBUG "endpointlistaddr=0x%08x\n",
157 readl(&dev->op_regs->endpointlistaddr));
158 printk(KERN_DEBUG "ttctrl=0x%08x\n",
159 readl(&dev->op_regs->ttctrl));
160 printk(KERN_DEBUG "burstsize=0x%08x\n",
161 readl(&dev->op_regs->burstsize));
162 printk(KERN_DEBUG "txfilltuning=0x%08x\n",
163 readl(&dev->op_regs->txfilltuning));
164 printk(KERN_DEBUG "txttfilltuning=0x%08x\n",
165 readl(&dev->op_regs->txttfilltuning));
166 printk(KERN_DEBUG "ic_usb=0x%08x\n",
167 readl(&dev->op_regs->ic_usb));
168 printk(KERN_DEBUG "ulpi_viewport=0x%08x\n",
169 readl(&dev->op_regs->ulpi_viewport));
170 printk(KERN_DEBUG "configflag=0x%08x\n",
171 readl(&dev->op_regs->configflag));
172 printk(KERN_DEBUG "portsc1=0x%08x\n",
173 readl(&dev->op_regs->portsc1));
174 printk(KERN_DEBUG "devlc=0x%08x\n",
175 readl(&dev->op_regs->devlc));
176 printk(KERN_DEBUG "otgsc=0x%08x\n",
177 readl(&dev->op_regs->otgsc));
178 printk(KERN_DEBUG "usbmode=0x%08x\n",
179 readl(&dev->op_regs->usbmode));
180 printk(KERN_DEBUG "endptnak=0x%08x\n",
181 readl(&dev->op_regs->endptnak));
182 printk(KERN_DEBUG "endptnaken=0x%08x\n",
183 readl(&dev->op_regs->endptnaken));
184 printk(KERN_DEBUG "endptsetupstat=0x%08x\n",
185 readl(&dev->op_regs->endptsetupstat));
186 printk(KERN_DEBUG "endptprime=0x%08x\n",
187 readl(&dev->op_regs->endptprime));
188 printk(KERN_DEBUG "endptflush=0x%08x\n",
189 readl(&dev->op_regs->endptflush));
190 printk(KERN_DEBUG "endptstat=0x%08x\n",
191 readl(&dev->op_regs->endptstat));
192 printk(KERN_DEBUG "endptcomplete=0x%08x\n",
193 readl(&dev->op_regs->endptcomplete));
194
195 for (i = 0; i < dev->ep_max / 2; i++) {
196 printk(KERN_DEBUG "endptctrl[%d]=0x%08x\n",
197 i, readl(&dev->op_regs->endptctrl[i]));
198 }
199}
200#endif /* VERBOSE */
201
202
203/*-------------------------------------------------------------------------*/
204
205#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
206
207#define is_in(ep) (((ep)->ep_num == 0) ? ((ep)->dev->ep0_dir == \
208 USB_DIR_IN) : ((ep)->desc->bEndpointAddress \
209 & USB_DIR_IN) == USB_DIR_IN)
210
211
212#ifdef DEBUG
213static char *type_string(u8 bmAttributes)
214{
215 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
216 case USB_ENDPOINT_XFER_BULK:
217 return "bulk";
218 case USB_ENDPOINT_XFER_ISOC:
219 return "iso";
220 case USB_ENDPOINT_XFER_INT:
221 return "int";
222 };
223
224 return "control";
225}
226#endif
227
228
229/* configure endpoint control registers */
230static void ep_reset(struct langwell_ep *ep, unsigned char ep_num,
231 unsigned char is_in, unsigned char ep_type)
232{
233 struct langwell_udc *dev;
234 u32 endptctrl;
235
236 dev = ep->dev;
237 VDBG(dev, "---> %s()\n", __func__);
238
239 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
240 if (is_in) { /* TX */
241 if (ep_num)
242 endptctrl |= EPCTRL_TXR;
243 endptctrl |= EPCTRL_TXE;
244 endptctrl |= ep_type << EPCTRL_TXT_SHIFT;
245 } else { /* RX */
246 if (ep_num)
247 endptctrl |= EPCTRL_RXR;
248 endptctrl |= EPCTRL_RXE;
249 endptctrl |= ep_type << EPCTRL_RXT_SHIFT;
250 }
251
252 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
253
254 VDBG(dev, "<--- %s()\n", __func__);
255}
256
257
258/* reset ep0 dQH and endptctrl */
259static void ep0_reset(struct langwell_udc *dev)
260{
261 struct langwell_ep *ep;
262 int i;
263
264 VDBG(dev, "---> %s()\n", __func__);
265
266 /* ep0 in and out */
267 for (i = 0; i < 2; i++) {
268 ep = &dev->ep[i];
269 ep->dev = dev;
270
271 /* ep0 dQH */
272 ep->dqh = &dev->ep_dqh[i];
273
274 /* configure ep0 endpoint capabilities in dQH */
275 ep->dqh->dqh_ios = 1;
276 ep->dqh->dqh_mpl = EP0_MAX_PKT_SIZE;
277
278 /* FIXME: enable ep0-in HW zero length termination select */
279 if (is_in(ep))
280 ep->dqh->dqh_zlt = 0;
281 ep->dqh->dqh_mult = 0;
282
283 /* configure ep0 control registers */
284 ep_reset(&dev->ep[0], 0, i, USB_ENDPOINT_XFER_CONTROL);
285 }
286
287 VDBG(dev, "<--- %s()\n", __func__);
288 return;
289}
290
291
292/*-------------------------------------------------------------------------*/
293
294/* endpoints operations */
295
296/* configure endpoint, making it usable */
297static int langwell_ep_enable(struct usb_ep *_ep,
298 const struct usb_endpoint_descriptor *desc)
299{
300 struct langwell_udc *dev;
301 struct langwell_ep *ep;
302 u16 max = 0;
303 unsigned long flags;
304 int retval = 0;
305 unsigned char zlt, ios = 0, mult = 0;
306
307 ep = container_of(_ep, struct langwell_ep, ep);
308 dev = ep->dev;
309 VDBG(dev, "---> %s()\n", __func__);
310
311 if (!_ep || !desc || ep->desc
312 || desc->bDescriptorType != USB_DT_ENDPOINT)
313 return -EINVAL;
314
315 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
316 return -ESHUTDOWN;
317
318 max = le16_to_cpu(desc->wMaxPacketSize);
319
320 /*
321 * disable HW zero length termination select
322 * driver handles zero length packet through req->req.zero
323 */
324 zlt = 1;
325
326 /*
327 * sanity check type, direction, address, and then
328 * initialize the endpoint capabilities fields in dQH
329 */
330 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
331 case USB_ENDPOINT_XFER_CONTROL:
332 ios = 1;
333 break;
334 case USB_ENDPOINT_XFER_BULK:
335 if ((dev->gadget.speed == USB_SPEED_HIGH
336 && max != 512)
337 || (dev->gadget.speed == USB_SPEED_FULL
338 && max > 64)) {
339 goto done;
340 }
341 break;
342 case USB_ENDPOINT_XFER_INT:
343 if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
344 goto done;
345
346 switch (dev->gadget.speed) {
347 case USB_SPEED_HIGH:
348 if (max <= 1024)
349 break;
350 case USB_SPEED_FULL:
351 if (max <= 64)
352 break;
353 default:
354 if (max <= 8)
355 break;
356 goto done;
357 }
358 break;
359 case USB_ENDPOINT_XFER_ISOC:
360 if (strstr(ep->ep.name, "-bulk")
361 || strstr(ep->ep.name, "-int"))
362 goto done;
363
364 switch (dev->gadget.speed) {
365 case USB_SPEED_HIGH:
366 if (max <= 1024)
367 break;
368 case USB_SPEED_FULL:
369 if (max <= 1023)
370 break;
371 default:
372 goto done;
373 }
374 /*
375 * FIXME:
376 * calculate transactions needed for high bandwidth iso
377 */
378 mult = (unsigned char)(1 + ((max >> 11) & 0x03));
379 max = max & 0x8ff; /* bit 0~10 */
380 /* 3 transactions at most */
381 if (mult > 3)
382 goto done;
383 break;
384 default:
385 goto done;
386 }
387
388 spin_lock_irqsave(&dev->lock, flags);
389
390 /* configure endpoint capabilities in dQH */
391 ep->dqh->dqh_ios = ios;
392 ep->dqh->dqh_mpl = cpu_to_le16(max);
393 ep->dqh->dqh_zlt = zlt;
394 ep->dqh->dqh_mult = mult;
395
396 ep->ep.maxpacket = max;
397 ep->desc = desc;
398 ep->stopped = 0;
399 ep->ep_num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
400
401 /* ep_type */
402 ep->ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
403
404 /* configure endpoint control registers */
405 ep_reset(ep, ep->ep_num, is_in(ep), ep->ep_type);
406
407 DBG(dev, "enabled %s (ep%d%s-%s), max %04x\n",
408 _ep->name,
409 ep->ep_num,
410 DIR_STRING(desc->bEndpointAddress),
411 type_string(desc->bmAttributes),
412 max);
413
414 spin_unlock_irqrestore(&dev->lock, flags);
415done:
416 VDBG(dev, "<--- %s()\n", __func__);
417 return retval;
418}
419
420
421/*-------------------------------------------------------------------------*/
422
423/* retire a request */
424static void done(struct langwell_ep *ep, struct langwell_request *req,
425 int status)
426{
427 struct langwell_udc *dev = ep->dev;
428 unsigned stopped = ep->stopped;
429 struct langwell_dtd *curr_dtd, *next_dtd;
430 int i;
431
432 VDBG(dev, "---> %s()\n", __func__);
433
434 /* remove the req from ep->queue */
435 list_del_init(&req->queue);
436
437 if (req->req.status == -EINPROGRESS)
438 req->req.status = status;
439 else
440 status = req->req.status;
441
442 /* free dTD for the request */
443 next_dtd = req->head;
444 for (i = 0; i < req->dtd_count; i++) {
445 curr_dtd = next_dtd;
446 if (i != req->dtd_count - 1)
447 next_dtd = curr_dtd->next_dtd_virt;
448 dma_pool_free(dev->dtd_pool, curr_dtd, curr_dtd->dtd_dma);
449 }
450
451 if (req->mapped) {
452 dma_unmap_single(&dev->pdev->dev, req->req.dma, req->req.length,
453 is_in(ep) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
454 req->req.dma = DMA_ADDR_INVALID;
455 req->mapped = 0;
456 } else
457 dma_sync_single_for_cpu(&dev->pdev->dev, req->req.dma,
458 req->req.length,
459 is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
460
461 if (status != -ESHUTDOWN)
462 DBG(dev, "complete %s, req %p, stat %d, len %u/%u\n",
463 ep->ep.name, &req->req, status,
464 req->req.actual, req->req.length);
465
466 /* don't modify queue heads during completion callback */
467 ep->stopped = 1;
468
469 spin_unlock(&dev->lock);
470 /* complete routine from gadget driver */
471 if (req->req.complete)
472 req->req.complete(&ep->ep, &req->req);
473
474 spin_lock(&dev->lock);
475 ep->stopped = stopped;
476
477 VDBG(dev, "<--- %s()\n", __func__);
478}
479
480
481static void langwell_ep_fifo_flush(struct usb_ep *_ep);
482
483/* delete all endpoint requests, called with spinlock held */
484static void nuke(struct langwell_ep *ep, int status)
485{
486 /* called with spinlock held */
487 ep->stopped = 1;
488
489 /* endpoint fifo flush */
490 if (&ep->ep && ep->desc)
491 langwell_ep_fifo_flush(&ep->ep);
492
493 while (!list_empty(&ep->queue)) {
494 struct langwell_request *req = NULL;
495 req = list_entry(ep->queue.next, struct langwell_request,
496 queue);
497 done(ep, req, status);
498 }
499}
500
501
502/*-------------------------------------------------------------------------*/
503
504/* endpoint is no longer usable */
505static int langwell_ep_disable(struct usb_ep *_ep)
506{
507 struct langwell_ep *ep;
508 unsigned long flags;
509 struct langwell_udc *dev;
510 int ep_num;
511 u32 endptctrl;
512
513 ep = container_of(_ep, struct langwell_ep, ep);
514 dev = ep->dev;
515 VDBG(dev, "---> %s()\n", __func__);
516
517 if (!_ep || !ep->desc)
518 return -EINVAL;
519
520 spin_lock_irqsave(&dev->lock, flags);
521
522 /* disable endpoint control register */
523 ep_num = ep->ep_num;
524 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
525 if (is_in(ep))
526 endptctrl &= ~EPCTRL_TXE;
527 else
528 endptctrl &= ~EPCTRL_RXE;
529 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
530
531 /* nuke all pending requests (does flush) */
532 nuke(ep, -ESHUTDOWN);
533
534 ep->desc = NULL;
535 ep->stopped = 1;
536
537 spin_unlock_irqrestore(&dev->lock, flags);
538
539 DBG(dev, "disabled %s\n", _ep->name);
540 VDBG(dev, "<--- %s()\n", __func__);
541
542 return 0;
543}
544
545
546/* allocate a request object to use with this endpoint */
547static struct usb_request *langwell_alloc_request(struct usb_ep *_ep,
548 gfp_t gfp_flags)
549{
550 struct langwell_ep *ep;
551 struct langwell_udc *dev;
552 struct langwell_request *req = NULL;
553
554 if (!_ep)
555 return NULL;
556
557 ep = container_of(_ep, struct langwell_ep, ep);
558 dev = ep->dev;
559 VDBG(dev, "---> %s()\n", __func__);
560
561 req = kzalloc(sizeof(*req), gfp_flags);
562 if (!req)
563 return NULL;
564
565 req->req.dma = DMA_ADDR_INVALID;
566 INIT_LIST_HEAD(&req->queue);
567
568 VDBG(dev, "alloc request for %s\n", _ep->name);
569 VDBG(dev, "<--- %s()\n", __func__);
570 return &req->req;
571}
572
573
574/* free a request object */
575static void langwell_free_request(struct usb_ep *_ep,
576 struct usb_request *_req)
577{
578 struct langwell_ep *ep;
579 struct langwell_udc *dev;
580 struct langwell_request *req = NULL;
581
582 ep = container_of(_ep, struct langwell_ep, ep);
583 dev = ep->dev;
584 VDBG(dev, "---> %s()\n", __func__);
585
586 if (!_ep || !_req)
587 return;
588
589 req = container_of(_req, struct langwell_request, req);
590 WARN_ON(!list_empty(&req->queue));
591
592 if (_req)
593 kfree(req);
594
595 VDBG(dev, "free request for %s\n", _ep->name);
596 VDBG(dev, "<--- %s()\n", __func__);
597}
598
599
600/*-------------------------------------------------------------------------*/
601
602/* queue dTD and PRIME endpoint */
603static int queue_dtd(struct langwell_ep *ep, struct langwell_request *req)
604{
605 u32 bit_mask, usbcmd, endptstat, dtd_dma;
606 u8 dtd_status;
607 int i;
608 struct langwell_dqh *dqh;
609 struct langwell_udc *dev;
610
611 dev = ep->dev;
612 VDBG(dev, "---> %s()\n", __func__);
613
614 i = ep->ep_num * 2 + is_in(ep);
615 dqh = &dev->ep_dqh[i];
616
617 if (ep->ep_num)
618 VDBG(dev, "%s\n", ep->name);
619 else
620 /* ep0 */
621 VDBG(dev, "%s-%s\n", ep->name, is_in(ep) ? "in" : "out");
622
623 VDBG(dev, "ep_dqh[%d] addr: 0x%08x\n", i, (u32)&(dev->ep_dqh[i]));
624
625 bit_mask = is_in(ep) ?
626 (1 << (ep->ep_num + 16)) : (1 << (ep->ep_num));
627
628 VDBG(dev, "bit_mask = 0x%08x\n", bit_mask);
629
630 /* check if the pipe is empty */
631 if (!(list_empty(&ep->queue))) {
632 /* add dTD to the end of linked list */
633 struct langwell_request *lastreq;
634 lastreq = list_entry(ep->queue.prev,
635 struct langwell_request, queue);
636
637 lastreq->tail->dtd_next =
638 cpu_to_le32(req->head->dtd_dma & DTD_NEXT_MASK);
639
640 /* read prime bit, if 1 goto out */
641 if (readl(&dev->op_regs->endptprime) & bit_mask)
642 goto out;
643
644 do {
645 /* set ATDTW bit in USBCMD */
646 usbcmd = readl(&dev->op_regs->usbcmd);
647 writel(usbcmd | CMD_ATDTW, &dev->op_regs->usbcmd);
648
649 /* read correct status bit */
650 endptstat = readl(&dev->op_regs->endptstat) & bit_mask;
651
652 } while (!(readl(&dev->op_regs->usbcmd) & CMD_ATDTW));
653
654 /* write ATDTW bit to 0 */
655 usbcmd = readl(&dev->op_regs->usbcmd);
656 writel(usbcmd & ~CMD_ATDTW, &dev->op_regs->usbcmd);
657
658 if (endptstat)
659 goto out;
660 }
661
662 /* write dQH next pointer and terminate bit to 0 */
663 dtd_dma = req->head->dtd_dma & DTD_NEXT_MASK;
664 dqh->dtd_next = cpu_to_le32(dtd_dma);
665
666 /* clear active and halt bit */
667 dtd_status = (u8) ~(DTD_STS_ACTIVE | DTD_STS_HALTED);
668 dqh->dtd_status &= dtd_status;
669 VDBG(dev, "dqh->dtd_status = 0x%x\n", dqh->dtd_status);
670
671 /* write 1 to endptprime register to PRIME endpoint */
672 bit_mask = is_in(ep) ? (1 << (ep->ep_num + 16)) : (1 << ep->ep_num);
673 VDBG(dev, "endprime bit_mask = 0x%08x\n", bit_mask);
674 writel(bit_mask, &dev->op_regs->endptprime);
675out:
676 VDBG(dev, "<--- %s()\n", __func__);
677 return 0;
678}
679
680
681/* fill in the dTD structure to build a transfer descriptor */
682static struct langwell_dtd *build_dtd(struct langwell_request *req,
683 unsigned *length, dma_addr_t *dma, int *is_last)
684{
685 u32 buf_ptr;
686 struct langwell_dtd *dtd;
687 struct langwell_udc *dev;
688 int i;
689
690 dev = req->ep->dev;
691 VDBG(dev, "---> %s()\n", __func__);
692
693 /* the maximum transfer length, up to 16k bytes */
694 *length = min(req->req.length - req->req.actual,
695 (unsigned)DTD_MAX_TRANSFER_LENGTH);
696
697 /* create dTD dma_pool resource */
698 dtd = dma_pool_alloc(dev->dtd_pool, GFP_KERNEL, dma);
699 if (dtd == NULL)
700 return dtd;
701 dtd->dtd_dma = *dma;
702
703 /* initialize buffer page pointers */
704 buf_ptr = (u32)(req->req.dma + req->req.actual);
705 for (i = 0; i < 5; i++)
706 dtd->dtd_buf[i] = cpu_to_le32(buf_ptr + i * PAGE_SIZE);
707
708 req->req.actual += *length;
709
710 /* fill in total bytes with transfer size */
711 dtd->dtd_total = cpu_to_le16(*length);
712 VDBG(dev, "dtd->dtd_total = %d\n", dtd->dtd_total);
713
714 /* set is_last flag if req->req.zero is set or not */
715 if (req->req.zero) {
716 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
717 *is_last = 1;
718 else
719 *is_last = 0;
720 } else if (req->req.length == req->req.actual) {
721 *is_last = 1;
722 } else
723 *is_last = 0;
724
725 if (*is_last == 0)
726 VDBG(dev, "multi-dtd request!\n");
727
728 /* set interrupt on complete bit for the last dTD */
729 if (*is_last && !req->req.no_interrupt)
730 dtd->dtd_ioc = 1;
731
732 /* set multiplier override 0 for non-ISO and non-TX endpoint */
733 dtd->dtd_multo = 0;
734
735 /* set the active bit of status field to 1 */
736 dtd->dtd_status = DTD_STS_ACTIVE;
737 VDBG(dev, "dtd->dtd_status = 0x%02x\n", dtd->dtd_status);
738
739 VDBG(dev, "length = %d, dma addr= 0x%08x\n", *length, (int)*dma);
740 VDBG(dev, "<--- %s()\n", __func__);
741 return dtd;
742}
743
744
745/* generate dTD linked list for a request */
746static int req_to_dtd(struct langwell_request *req)
747{
748 unsigned count;
749 int is_last, is_first = 1;
750 struct langwell_dtd *dtd, *last_dtd = NULL;
751 struct langwell_udc *dev;
752 dma_addr_t dma;
753
754 dev = req->ep->dev;
755 VDBG(dev, "---> %s()\n", __func__);
756 do {
757 dtd = build_dtd(req, &count, &dma, &is_last);
758 if (dtd == NULL)
759 return -ENOMEM;
760
761 if (is_first) {
762 is_first = 0;
763 req->head = dtd;
764 } else {
765 last_dtd->dtd_next = cpu_to_le32(dma);
766 last_dtd->next_dtd_virt = dtd;
767 }
768 last_dtd = dtd;
769 req->dtd_count++;
770 } while (!is_last);
771
772 /* set terminate bit to 1 for the last dTD */
773 dtd->dtd_next = DTD_TERM;
774
775 req->tail = dtd;
776
777 VDBG(dev, "<--- %s()\n", __func__);
778 return 0;
779}
780
781/*-------------------------------------------------------------------------*/
782
783/* queue (submits) an I/O requests to an endpoint */
784static int langwell_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
785 gfp_t gfp_flags)
786{
787 struct langwell_request *req;
788 struct langwell_ep *ep;
789 struct langwell_udc *dev;
790 unsigned long flags;
791 int is_iso = 0, zlflag = 0;
792
793 /* always require a cpu-view buffer */
794 req = container_of(_req, struct langwell_request, req);
795 ep = container_of(_ep, struct langwell_ep, ep);
796
797 if (!_req || !_req->complete || !_req->buf
798 || !list_empty(&req->queue)) {
799 return -EINVAL;
800 }
801
802 if (unlikely(!_ep || !ep->desc))
803 return -EINVAL;
804
805 dev = ep->dev;
806 req->ep = ep;
807 VDBG(dev, "---> %s()\n", __func__);
808
809 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
810 if (req->req.length > ep->ep.maxpacket)
811 return -EMSGSIZE;
812 is_iso = 1;
813 }
814
815 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
816 return -ESHUTDOWN;
817
818 /* set up dma mapping in case the caller didn't */
819 if (_req->dma == DMA_ADDR_INVALID) {
820 /* WORKAROUND: WARN_ON(size == 0) */
821 if (_req->length == 0) {
822 VDBG(dev, "req->length: 0->1\n");
823 zlflag = 1;
824 _req->length++;
825 }
826
827 _req->dma = dma_map_single(&dev->pdev->dev,
828 _req->buf, _req->length,
829 is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
830 if (zlflag && (_req->length == 1)) {
831 VDBG(dev, "req->length: 1->0\n");
832 zlflag = 0;
833 _req->length = 0;
834 }
835
836 req->mapped = 1;
837 VDBG(dev, "req->mapped = 1\n");
838 } else {
839 dma_sync_single_for_device(&dev->pdev->dev,
840 _req->dma, _req->length,
841 is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
842 req->mapped = 0;
843 VDBG(dev, "req->mapped = 0\n");
844 }
845
846 DBG(dev, "%s queue req %p, len %u, buf %p, dma 0x%08x\n",
847 _ep->name,
848 _req, _req->length, _req->buf, _req->dma);
849
850 _req->status = -EINPROGRESS;
851 _req->actual = 0;
852 req->dtd_count = 0;
853
854 spin_lock_irqsave(&dev->lock, flags);
855
856 /* build and put dTDs to endpoint queue */
857 if (!req_to_dtd(req)) {
858 queue_dtd(ep, req);
859 } else {
860 spin_unlock_irqrestore(&dev->lock, flags);
861 return -ENOMEM;
862 }
863
864 /* update ep0 state */
865 if (ep->ep_num == 0)
866 dev->ep0_state = DATA_STATE_XMIT;
867
868 if (likely(req != NULL)) {
869 list_add_tail(&req->queue, &ep->queue);
870 VDBG(dev, "list_add_tail() \n");
871 }
872
873 spin_unlock_irqrestore(&dev->lock, flags);
874
875 VDBG(dev, "<--- %s()\n", __func__);
876 return 0;
877}
878
879
880/* dequeue (cancels, unlinks) an I/O request from an endpoint */
881static int langwell_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
882{
883 struct langwell_ep *ep;
884 struct langwell_udc *dev;
885 struct langwell_request *req;
886 unsigned long flags;
887 int stopped, ep_num, retval = 0;
888 u32 endptctrl;
889
890 ep = container_of(_ep, struct langwell_ep, ep);
891 dev = ep->dev;
892 VDBG(dev, "---> %s()\n", __func__);
893
894 if (!_ep || !ep->desc || !_req)
895 return -EINVAL;
896
897 if (!dev->driver)
898 return -ESHUTDOWN;
899
900 spin_lock_irqsave(&dev->lock, flags);
901 stopped = ep->stopped;
902
903 /* quiesce dma while we patch the queue */
904 ep->stopped = 1;
905 ep_num = ep->ep_num;
906
907 /* disable endpoint control register */
908 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
909 if (is_in(ep))
910 endptctrl &= ~EPCTRL_TXE;
911 else
912 endptctrl &= ~EPCTRL_RXE;
913 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
914
915 /* make sure it's still queued on this endpoint */
916 list_for_each_entry(req, &ep->queue, queue) {
917 if (&req->req == _req)
918 break;
919 }
920
921 if (&req->req != _req) {
922 retval = -EINVAL;
923 goto done;
924 }
925
926 /* queue head may be partially complete. */
927 if (ep->queue.next == &req->queue) {
928 DBG(dev, "unlink (%s) dma\n", _ep->name);
929 _req->status = -ECONNRESET;
930 langwell_ep_fifo_flush(&ep->ep);
931
932 /* not the last request in endpoint queue */
933 if (likely(ep->queue.next == &req->queue)) {
934 struct langwell_dqh *dqh;
935 struct langwell_request *next_req;
936
937 dqh = ep->dqh;
938 next_req = list_entry(req->queue.next,
939 struct langwell_request, queue);
940
941 /* point the dQH to the first dTD of next request */
942 writel((u32) next_req->head, &dqh->dqh_current);
943 }
944 } else {
945 struct langwell_request *prev_req;
946
947 prev_req = list_entry(req->queue.prev,
948 struct langwell_request, queue);
949 writel(readl(&req->tail->dtd_next),
950 &prev_req->tail->dtd_next);
951 }
952
953 done(ep, req, -ECONNRESET);
954
955done:
956 /* enable endpoint again */
957 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
958 if (is_in(ep))
959 endptctrl |= EPCTRL_TXE;
960 else
961 endptctrl |= EPCTRL_RXE;
962 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
963
964 ep->stopped = stopped;
965 spin_unlock_irqrestore(&dev->lock, flags);
966
967 VDBG(dev, "<--- %s()\n", __func__);
968 return retval;
969}
970
971
972/*-------------------------------------------------------------------------*/
973
974/* endpoint set/clear halt */
975static void ep_set_halt(struct langwell_ep *ep, int value)
976{
977 u32 endptctrl = 0;
978 int ep_num;
979 struct langwell_udc *dev = ep->dev;
980 VDBG(dev, "---> %s()\n", __func__);
981
982 ep_num = ep->ep_num;
983 endptctrl = readl(&dev->op_regs->endptctrl[ep_num]);
984
985 /* value: 1 - set halt, 0 - clear halt */
986 if (value) {
987 /* set the stall bit */
988 if (is_in(ep))
989 endptctrl |= EPCTRL_TXS;
990 else
991 endptctrl |= EPCTRL_RXS;
992 } else {
993 /* clear the stall bit and reset data toggle */
994 if (is_in(ep)) {
995 endptctrl &= ~EPCTRL_TXS;
996 endptctrl |= EPCTRL_TXR;
997 } else {
998 endptctrl &= ~EPCTRL_RXS;
999 endptctrl |= EPCTRL_RXR;
1000 }
1001 }
1002
1003 writel(endptctrl, &dev->op_regs->endptctrl[ep_num]);
1004
1005 VDBG(dev, "<--- %s()\n", __func__);
1006}
1007
1008
1009/* set the endpoint halt feature */
1010static int langwell_ep_set_halt(struct usb_ep *_ep, int value)
1011{
1012 struct langwell_ep *ep;
1013 struct langwell_udc *dev;
1014 unsigned long flags;
1015 int retval = 0;
1016
1017 ep = container_of(_ep, struct langwell_ep, ep);
1018 dev = ep->dev;
1019
1020 VDBG(dev, "---> %s()\n", __func__);
1021
1022 if (!_ep || !ep->desc)
1023 return -EINVAL;
1024
1025 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1026 return -ESHUTDOWN;
1027
1028 if (ep->desc && (ep->desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
1029 == USB_ENDPOINT_XFER_ISOC)
1030 return -EOPNOTSUPP;
1031
1032 spin_lock_irqsave(&dev->lock, flags);
1033
1034 /*
1035 * attempt to halt IN ep will fail if any transfer requests
1036 * are still queue
1037 */
1038 if (!list_empty(&ep->queue) && is_in(ep) && value) {
1039 /* IN endpoint FIFO holds bytes */
1040 DBG(dev, "%s FIFO holds bytes\n", _ep->name);
1041 retval = -EAGAIN;
1042 goto done;
1043 }
1044
1045 /* endpoint set/clear halt */
1046 if (ep->ep_num) {
1047 ep_set_halt(ep, value);
1048 } else { /* endpoint 0 */
1049 dev->ep0_state = WAIT_FOR_SETUP;
1050 dev->ep0_dir = USB_DIR_OUT;
1051 }
1052done:
1053 spin_unlock_irqrestore(&dev->lock, flags);
1054 DBG(dev, "%s %s halt\n", _ep->name, value ? "set" : "clear");
1055 VDBG(dev, "<--- %s()\n", __func__);
1056 return retval;
1057}
1058
1059
1060/* set the halt feature and ignores clear requests */
1061static int langwell_ep_set_wedge(struct usb_ep *_ep)
1062{
1063 struct langwell_ep *ep;
1064 struct langwell_udc *dev;
1065
1066 ep = container_of(_ep, struct langwell_ep, ep);
1067 dev = ep->dev;
1068
1069 VDBG(dev, "---> %s()\n", __func__);
1070
1071 if (!_ep || !ep->desc)
1072 return -EINVAL;
1073
1074 VDBG(dev, "<--- %s()\n", __func__);
1075 return usb_ep_set_halt(_ep);
1076}
1077
1078
1079/* flush contents of a fifo */
1080static void langwell_ep_fifo_flush(struct usb_ep *_ep)
1081{
1082 struct langwell_ep *ep;
1083 struct langwell_udc *dev;
1084 u32 flush_bit;
1085 unsigned long timeout;
1086
1087 ep = container_of(_ep, struct langwell_ep, ep);
1088 dev = ep->dev;
1089
1090 VDBG(dev, "---> %s()\n", __func__);
1091
1092 if (!_ep || !ep->desc) {
1093 VDBG(dev, "ep or ep->desc is NULL\n");
1094 VDBG(dev, "<--- %s()\n", __func__);
1095 return;
1096 }
1097
1098 VDBG(dev, "%s-%s fifo flush\n", _ep->name, is_in(ep) ? "in" : "out");
1099
1100 /* flush endpoint buffer */
1101 if (ep->ep_num == 0)
1102 flush_bit = (1 << 16) | 1;
1103 else if (is_in(ep))
1104 flush_bit = 1 << (ep->ep_num + 16); /* TX */
1105 else
1106 flush_bit = 1 << ep->ep_num; /* RX */
1107
1108 /* wait until flush complete */
1109 timeout = jiffies + FLUSH_TIMEOUT;
1110 do {
1111 writel(flush_bit, &dev->op_regs->endptflush);
1112 while (readl(&dev->op_regs->endptflush)) {
1113 if (time_after(jiffies, timeout)) {
1114 ERROR(dev, "ep flush timeout\n");
1115 goto done;
1116 }
1117 cpu_relax();
1118 }
1119 } while (readl(&dev->op_regs->endptstat) & flush_bit);
1120done:
1121 VDBG(dev, "<--- %s()\n", __func__);
1122}
1123
1124
1125/* endpoints operations structure */
1126static const struct usb_ep_ops langwell_ep_ops = {
1127
1128 /* configure endpoint, making it usable */
1129 .enable = langwell_ep_enable,
1130
1131 /* endpoint is no longer usable */
1132 .disable = langwell_ep_disable,
1133
1134 /* allocate a request object to use with this endpoint */
1135 .alloc_request = langwell_alloc_request,
1136
1137 /* free a request object */
1138 .free_request = langwell_free_request,
1139
1140 /* queue (submits) an I/O requests to an endpoint */
1141 .queue = langwell_ep_queue,
1142
1143 /* dequeue (cancels, unlinks) an I/O request from an endpoint */
1144 .dequeue = langwell_ep_dequeue,
1145
1146 /* set the endpoint halt feature */
1147 .set_halt = langwell_ep_set_halt,
1148
1149 /* set the halt feature and ignores clear requests */
1150 .set_wedge = langwell_ep_set_wedge,
1151
1152 /* flush contents of a fifo */
1153 .fifo_flush = langwell_ep_fifo_flush,
1154};
1155
1156
1157/*-------------------------------------------------------------------------*/
1158
1159/* device controller usb_gadget_ops structure */
1160
1161/* returns the current frame number */
1162static int langwell_get_frame(struct usb_gadget *_gadget)
1163{
1164 struct langwell_udc *dev;
1165 u16 retval;
1166
1167 if (!_gadget)
1168 return -ENODEV;
1169
1170 dev = container_of(_gadget, struct langwell_udc, gadget);
1171 VDBG(dev, "---> %s()\n", __func__);
1172
1173 retval = readl(&dev->op_regs->frindex) & FRINDEX_MASK;
1174
1175 VDBG(dev, "<--- %s()\n", __func__);
1176 return retval;
1177}
1178
1179
1180/* tries to wake up the host connected to this gadget */
1181static int langwell_wakeup(struct usb_gadget *_gadget)
1182{
1183 struct langwell_udc *dev;
1184 u32 portsc1, devlc;
1185 unsigned long flags;
1186
1187 if (!_gadget)
1188 return 0;
1189
1190 dev = container_of(_gadget, struct langwell_udc, gadget);
1191 VDBG(dev, "---> %s()\n", __func__);
1192
1193 /* Remote Wakeup feature not enabled by host */
1194 if (!dev->remote_wakeup)
1195 return -ENOTSUPP;
1196
1197 spin_lock_irqsave(&dev->lock, flags);
1198
1199 portsc1 = readl(&dev->op_regs->portsc1);
1200 if (!(portsc1 & PORTS_SUSP)) {
1201 spin_unlock_irqrestore(&dev->lock, flags);
1202 return 0;
1203 }
1204
1205 /* LPM L1 to L0, remote wakeup */
1206 if (dev->lpm && dev->lpm_state == LPM_L1) {
1207 portsc1 |= PORTS_SLP;
1208 writel(portsc1, &dev->op_regs->portsc1);
1209 }
1210
1211 /* force port resume */
1212 if (dev->usb_state == USB_STATE_SUSPENDED) {
1213 portsc1 |= PORTS_FPR;
1214 writel(portsc1, &dev->op_regs->portsc1);
1215 }
1216
1217 /* exit PHY low power suspend */
1218 devlc = readl(&dev->op_regs->devlc);
1219 VDBG(dev, "devlc = 0x%08x\n", devlc);
1220 devlc &= ~LPM_PHCD;
1221 writel(devlc, &dev->op_regs->devlc);
1222
1223 spin_unlock_irqrestore(&dev->lock, flags);
1224
1225 VDBG(dev, "<--- %s()\n", __func__);
1226 return 0;
1227}
1228
1229
1230/* notify controller that VBUS is powered or not */
1231static int langwell_vbus_session(struct usb_gadget *_gadget, int is_active)
1232{
1233 struct langwell_udc *dev;
1234 unsigned long flags;
1235 u32 usbcmd;
1236
1237 if (!_gadget)
1238 return -ENODEV;
1239
1240 dev = container_of(_gadget, struct langwell_udc, gadget);
1241 VDBG(dev, "---> %s()\n", __func__);
1242
1243 spin_lock_irqsave(&dev->lock, flags);
1244 VDBG(dev, "VBUS status: %s\n", is_active ? "on" : "off");
1245
1246 dev->vbus_active = (is_active != 0);
1247 if (dev->driver && dev->softconnected && dev->vbus_active) {
1248 usbcmd = readl(&dev->op_regs->usbcmd);
1249 usbcmd |= CMD_RUNSTOP;
1250 writel(usbcmd, &dev->op_regs->usbcmd);
1251 } else {
1252 usbcmd = readl(&dev->op_regs->usbcmd);
1253 usbcmd &= ~CMD_RUNSTOP;
1254 writel(usbcmd, &dev->op_regs->usbcmd);
1255 }
1256
1257 spin_unlock_irqrestore(&dev->lock, flags);
1258
1259 VDBG(dev, "<--- %s()\n", __func__);
1260 return 0;
1261}
1262
1263
1264/* constrain controller's VBUS power usage */
1265static int langwell_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1266{
1267 struct langwell_udc *dev;
1268
1269 if (!_gadget)
1270 return -ENODEV;
1271
1272 dev = container_of(_gadget, struct langwell_udc, gadget);
1273 VDBG(dev, "---> %s()\n", __func__);
1274
1275 if (dev->transceiver) {
1276 VDBG(dev, "otg_set_power\n");
1277 VDBG(dev, "<--- %s()\n", __func__);
1278 return otg_set_power(dev->transceiver, mA);
1279 }
1280
1281 VDBG(dev, "<--- %s()\n", __func__);
1282 return -ENOTSUPP;
1283}
1284
1285
1286/* D+ pullup, software-controlled connect/disconnect to USB host */
1287static int langwell_pullup(struct usb_gadget *_gadget, int is_on)
1288{
1289 struct langwell_udc *dev;
1290 u32 usbcmd;
1291 unsigned long flags;
1292
1293 if (!_gadget)
1294 return -ENODEV;
1295
1296 dev = container_of(_gadget, struct langwell_udc, gadget);
1297
1298 VDBG(dev, "---> %s()\n", __func__);
1299
1300 spin_lock_irqsave(&dev->lock, flags);
1301 dev->softconnected = (is_on != 0);
1302
1303 if (dev->driver && dev->softconnected && dev->vbus_active) {
1304 usbcmd = readl(&dev->op_regs->usbcmd);
1305 usbcmd |= CMD_RUNSTOP;
1306 writel(usbcmd, &dev->op_regs->usbcmd);
1307 } else {
1308 usbcmd = readl(&dev->op_regs->usbcmd);
1309 usbcmd &= ~CMD_RUNSTOP;
1310 writel(usbcmd, &dev->op_regs->usbcmd);
1311 }
1312 spin_unlock_irqrestore(&dev->lock, flags);
1313
1314 VDBG(dev, "<--- %s()\n", __func__);
1315 return 0;
1316}
1317
1318
1319/* device controller usb_gadget_ops structure */
1320static const struct usb_gadget_ops langwell_ops = {
1321
1322 /* returns the current frame number */
1323 .get_frame = langwell_get_frame,
1324
1325 /* tries to wake up the host connected to this gadget */
1326 .wakeup = langwell_wakeup,
1327
1328 /* set the device selfpowered feature, always selfpowered */
1329 /* .set_selfpowered = langwell_set_selfpowered, */
1330
1331 /* notify controller that VBUS is powered or not */
1332 .vbus_session = langwell_vbus_session,
1333
1334 /* constrain controller's VBUS power usage */
1335 .vbus_draw = langwell_vbus_draw,
1336
1337 /* D+ pullup, software-controlled connect/disconnect to USB host */
1338 .pullup = langwell_pullup,
1339};
1340
1341
1342/*-------------------------------------------------------------------------*/
1343
1344/* device controller operations */
1345
1346/* reset device controller */
1347static int langwell_udc_reset(struct langwell_udc *dev)
1348{
1349 u32 usbcmd, usbmode, devlc, endpointlistaddr;
1350 unsigned long timeout;
1351
1352 if (!dev)
1353 return -EINVAL;
1354
1355 DBG(dev, "---> %s()\n", __func__);
1356
1357 /* set controller to stop state */
1358 usbcmd = readl(&dev->op_regs->usbcmd);
1359 usbcmd &= ~CMD_RUNSTOP;
1360 writel(usbcmd, &dev->op_regs->usbcmd);
1361
1362 /* reset device controller */
1363 usbcmd = readl(&dev->op_regs->usbcmd);
1364 usbcmd |= CMD_RST;
1365 writel(usbcmd, &dev->op_regs->usbcmd);
1366
1367 /* wait for reset to complete */
1368 timeout = jiffies + RESET_TIMEOUT;
1369 while (readl(&dev->op_regs->usbcmd) & CMD_RST) {
1370 if (time_after(jiffies, timeout)) {
1371 ERROR(dev, "device reset timeout\n");
1372 return -ETIMEDOUT;
1373 }
1374 cpu_relax();
1375 }
1376
1377 /* set controller to device mode */
1378 usbmode = readl(&dev->op_regs->usbmode);
1379 usbmode |= MODE_DEVICE;
1380
1381 /* turn setup lockout off, require setup tripwire in usbcmd */
1382 usbmode |= MODE_SLOM;
1383
1384 writel(usbmode, &dev->op_regs->usbmode);
1385 usbmode = readl(&dev->op_regs->usbmode);
1386 VDBG(dev, "usbmode=0x%08x\n", usbmode);
1387
1388 /* Write-Clear setup status */
1389 writel(0, &dev->op_regs->usbsts);
1390
1391 /* if support USB LPM, ACK all LPM token */
1392 if (dev->lpm) {
1393 devlc = readl(&dev->op_regs->devlc);
1394 devlc &= ~LPM_STL; /* don't STALL LPM token */
1395 devlc &= ~LPM_NYT_ACK; /* ACK LPM token */
1396 writel(devlc, &dev->op_regs->devlc);
1397 }
1398
1399 /* fill endpointlistaddr register */
1400 endpointlistaddr = dev->ep_dqh_dma;
1401 endpointlistaddr &= ENDPOINTLISTADDR_MASK;
1402 writel(endpointlistaddr, &dev->op_regs->endpointlistaddr);
1403
1404 VDBG(dev, "dQH base (vir: %p, phy: 0x%08x), endpointlistaddr=0x%08x\n",
1405 dev->ep_dqh, endpointlistaddr,
1406 readl(&dev->op_regs->endpointlistaddr));
1407 DBG(dev, "<--- %s()\n", __func__);
1408 return 0;
1409}
1410
1411
1412/* reinitialize device controller endpoints */
1413static int eps_reinit(struct langwell_udc *dev)
1414{
1415 struct langwell_ep *ep;
1416 char name[14];
1417 int i;
1418
1419 VDBG(dev, "---> %s()\n", __func__);
1420
1421 /* initialize ep0 */
1422 ep = &dev->ep[0];
1423 ep->dev = dev;
1424 strncpy(ep->name, "ep0", sizeof(ep->name));
1425 ep->ep.name = ep->name;
1426 ep->ep.ops = &langwell_ep_ops;
1427 ep->stopped = 0;
1428 ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
1429 ep->ep_num = 0;
1430 ep->desc = &langwell_ep0_desc;
1431 INIT_LIST_HEAD(&ep->queue);
1432
1433 ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1434
1435 /* initialize other endpoints */
1436 for (i = 2; i < dev->ep_max; i++) {
1437 ep = &dev->ep[i];
1438 if (i % 2)
1439 snprintf(name, sizeof(name), "ep%din", i / 2);
1440 else
1441 snprintf(name, sizeof(name), "ep%dout", i / 2);
1442 ep->dev = dev;
1443 strncpy(ep->name, name, sizeof(ep->name));
1444 ep->ep.name = ep->name;
1445
1446 ep->ep.ops = &langwell_ep_ops;
1447 ep->stopped = 0;
1448 ep->ep.maxpacket = (unsigned short) ~0;
1449 ep->ep_num = i / 2;
1450
1451 INIT_LIST_HEAD(&ep->queue);
1452 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
1453
1454 ep->dqh = &dev->ep_dqh[i];
1455 }
1456
1457 VDBG(dev, "<--- %s()\n", __func__);
1458 return 0;
1459}
1460
1461
1462/* enable interrupt and set controller to run state */
1463static void langwell_udc_start(struct langwell_udc *dev)
1464{
1465 u32 usbintr, usbcmd;
1466 DBG(dev, "---> %s()\n", __func__);
1467
1468 /* enable interrupts */
1469 usbintr = INTR_ULPIE /* ULPI */
1470 | INTR_SLE /* suspend */
1471 /* | INTR_SRE SOF received */
1472 | INTR_URE /* USB reset */
1473 | INTR_AAE /* async advance */
1474 | INTR_SEE /* system error */
1475 | INTR_FRE /* frame list rollover */
1476 | INTR_PCE /* port change detect */
1477 | INTR_UEE /* USB error interrupt */
1478 | INTR_UE; /* USB interrupt */
1479 writel(usbintr, &dev->op_regs->usbintr);
1480
1481 /* clear stopped bit */
1482 dev->stopped = 0;
1483
1484 /* set controller to run */
1485 usbcmd = readl(&dev->op_regs->usbcmd);
1486 usbcmd |= CMD_RUNSTOP;
1487 writel(usbcmd, &dev->op_regs->usbcmd);
1488
1489 DBG(dev, "<--- %s()\n", __func__);
1490 return;
1491}
1492
1493
1494/* disable interrupt and set controller to stop state */
1495static void langwell_udc_stop(struct langwell_udc *dev)
1496{
1497 u32 usbcmd;
1498
1499 DBG(dev, "---> %s()\n", __func__);
1500
1501 /* disable all interrupts */
1502 writel(0, &dev->op_regs->usbintr);
1503
1504 /* set stopped bit */
1505 dev->stopped = 1;
1506
1507 /* set controller to stop state */
1508 usbcmd = readl(&dev->op_regs->usbcmd);
1509 usbcmd &= ~CMD_RUNSTOP;
1510 writel(usbcmd, &dev->op_regs->usbcmd);
1511
1512 DBG(dev, "<--- %s()\n", __func__);
1513 return;
1514}
1515
1516
1517/* stop all USB activities */
1518static void stop_activity(struct langwell_udc *dev,
1519 struct usb_gadget_driver *driver)
1520{
1521 struct langwell_ep *ep;
1522 DBG(dev, "---> %s()\n", __func__);
1523
1524 nuke(&dev->ep[0], -ESHUTDOWN);
1525
1526 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1527 nuke(ep, -ESHUTDOWN);
1528 }
1529
1530 /* report disconnect; the driver is already quiesced */
1531 if (driver) {
1532 spin_unlock(&dev->lock);
1533 driver->disconnect(&dev->gadget);
1534 spin_lock(&dev->lock);
1535 }
1536
1537 DBG(dev, "<--- %s()\n", __func__);
1538}
1539
1540
1541/*-------------------------------------------------------------------------*/
1542
1543/* device "function" sysfs attribute file */
1544static ssize_t show_function(struct device *_dev,
1545 struct device_attribute *attr, char *buf)
1546{
1547 struct langwell_udc *dev = the_controller;
1548
1549 if (!dev->driver || !dev->driver->function
1550 || strlen(dev->driver->function) > PAGE_SIZE)
1551 return 0;
1552
1553 return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
1554}
1555static DEVICE_ATTR(function, S_IRUGO, show_function, NULL);
1556
1557
1558/* device "langwell_udc" sysfs attribute file */
1559static ssize_t show_langwell_udc(struct device *_dev,
1560 struct device_attribute *attr, char *buf)
1561{
1562 struct langwell_udc *dev = the_controller;
1563 struct langwell_request *req;
1564 struct langwell_ep *ep = NULL;
1565 char *next;
1566 unsigned size;
1567 unsigned t;
1568 unsigned i;
1569 unsigned long flags;
1570 u32 tmp_reg;
1571
1572 next = buf;
1573 size = PAGE_SIZE;
1574 spin_lock_irqsave(&dev->lock, flags);
1575
1576 /* driver basic information */
1577 t = scnprintf(next, size,
1578 DRIVER_DESC "\n"
1579 "%s version: %s\n"
1580 "Gadget driver: %s\n\n",
1581 driver_name, DRIVER_VERSION,
1582 dev->driver ? dev->driver->driver.name : "(none)");
1583 size -= t;
1584 next += t;
1585
1586 /* device registers */
1587 tmp_reg = readl(&dev->op_regs->usbcmd);
1588 t = scnprintf(next, size,
1589 "USBCMD reg:\n"
1590 "SetupTW: %d\n"
1591 "Run/Stop: %s\n\n",
1592 (tmp_reg & CMD_SUTW) ? 1 : 0,
1593 (tmp_reg & CMD_RUNSTOP) ? "Run" : "Stop");
1594 size -= t;
1595 next += t;
1596
1597 tmp_reg = readl(&dev->op_regs->usbsts);
1598 t = scnprintf(next, size,
1599 "USB Status Reg:\n"
1600 "Device Suspend: %d\n"
1601 "Reset Received: %d\n"
1602 "System Error: %s\n"
1603 "USB Error Interrupt: %s\n\n",
1604 (tmp_reg & STS_SLI) ? 1 : 0,
1605 (tmp_reg & STS_URI) ? 1 : 0,
1606 (tmp_reg & STS_SEI) ? "Error" : "No error",
1607 (tmp_reg & STS_UEI) ? "Error detected" : "No error");
1608 size -= t;
1609 next += t;
1610
1611 tmp_reg = readl(&dev->op_regs->usbintr);
1612 t = scnprintf(next, size,
1613 "USB Intrrupt Enable Reg:\n"
1614 "Sleep Enable: %d\n"
1615 "SOF Received Enable: %d\n"
1616 "Reset Enable: %d\n"
1617 "System Error Enable: %d\n"
1618 "Port Change Dectected Enable: %d\n"
1619 "USB Error Intr Enable: %d\n"
1620 "USB Intr Enable: %d\n\n",
1621 (tmp_reg & INTR_SLE) ? 1 : 0,
1622 (tmp_reg & INTR_SRE) ? 1 : 0,
1623 (tmp_reg & INTR_URE) ? 1 : 0,
1624 (tmp_reg & INTR_SEE) ? 1 : 0,
1625 (tmp_reg & INTR_PCE) ? 1 : 0,
1626 (tmp_reg & INTR_UEE) ? 1 : 0,
1627 (tmp_reg & INTR_UE) ? 1 : 0);
1628 size -= t;
1629 next += t;
1630
1631 tmp_reg = readl(&dev->op_regs->frindex);
1632 t = scnprintf(next, size,
1633 "USB Frame Index Reg:\n"
1634 "Frame Number is 0x%08x\n\n",
1635 (tmp_reg & FRINDEX_MASK));
1636 size -= t;
1637 next += t;
1638
1639 tmp_reg = readl(&dev->op_regs->deviceaddr);
1640 t = scnprintf(next, size,
1641 "USB Device Address Reg:\n"
1642 "Device Addr is 0x%x\n\n",
1643 USBADR(tmp_reg));
1644 size -= t;
1645 next += t;
1646
1647 tmp_reg = readl(&dev->op_regs->endpointlistaddr);
1648 t = scnprintf(next, size,
1649 "USB Endpoint List Address Reg:\n"
1650 "Endpoint List Pointer is 0x%x\n\n",
1651 EPBASE(tmp_reg));
1652 size -= t;
1653 next += t;
1654
1655 tmp_reg = readl(&dev->op_regs->portsc1);
1656 t = scnprintf(next, size,
1657 "USB Port Status & Control Reg:\n"
1658 "Port Reset: %s\n"
1659 "Port Suspend Mode: %s\n"
1660 "Over-current Change: %s\n"
1661 "Port Enable/Disable Change: %s\n"
1662 "Port Enabled/Disabled: %s\n"
1663 "Current Connect Status: %s\n\n",
1664 (tmp_reg & PORTS_PR) ? "Reset" : "Not Reset",
1665 (tmp_reg & PORTS_SUSP) ? "Suspend " : "Not Suspend",
1666 (tmp_reg & PORTS_OCC) ? "Detected" : "No",
1667 (tmp_reg & PORTS_PEC) ? "Changed" : "Not Changed",
1668 (tmp_reg & PORTS_PE) ? "Enable" : "Not Correct",
1669 (tmp_reg & PORTS_CCS) ? "Attached" : "Not Attached");
1670 size -= t;
1671 next += t;
1672
1673 tmp_reg = readl(&dev->op_regs->devlc);
1674 t = scnprintf(next, size,
1675 "Device LPM Control Reg:\n"
1676 "Parallel Transceiver : %d\n"
1677 "Serial Transceiver : %d\n"
1678 "Port Speed: %s\n"
1679 "Port Force Full Speed Connenct: %s\n"
1680 "PHY Low Power Suspend Clock Disable: %s\n"
1681 "BmAttributes: %d\n\n",
1682 LPM_PTS(tmp_reg),
1683 (tmp_reg & LPM_STS) ? 1 : 0,
1684 ({
1685 char *s;
1686 switch (LPM_PSPD(tmp_reg)) {
1687 case LPM_SPEED_FULL:
1688 s = "Full Speed"; break;
1689 case LPM_SPEED_LOW:
1690 s = "Low Speed"; break;
1691 case LPM_SPEED_HIGH:
1692 s = "High Speed"; break;
1693 default:
1694 s = "Unknown Speed"; break;
1695 }
1696 s;
1697 }),
1698 (tmp_reg & LPM_PFSC) ? "Force Full Speed" : "Not Force",
1699 (tmp_reg & LPM_PHCD) ? "Disabled" : "Enabled",
1700 LPM_BA(tmp_reg));
1701 size -= t;
1702 next += t;
1703
1704 tmp_reg = readl(&dev->op_regs->usbmode);
1705 t = scnprintf(next, size,
1706 "USB Mode Reg:\n"
1707 "Controller Mode is : %s\n\n", ({
1708 char *s;
1709 switch (MODE_CM(tmp_reg)) {
1710 case MODE_IDLE:
1711 s = "Idle"; break;
1712 case MODE_DEVICE:
1713 s = "Device Controller"; break;
1714 case MODE_HOST:
1715 s = "Host Controller"; break;
1716 default:
1717 s = "None"; break;
1718 }
1719 s;
1720 }));
1721 size -= t;
1722 next += t;
1723
1724 tmp_reg = readl(&dev->op_regs->endptsetupstat);
1725 t = scnprintf(next, size,
1726 "Endpoint Setup Status Reg:\n"
1727 "SETUP on ep 0x%04x\n\n",
1728 tmp_reg & SETUPSTAT_MASK);
1729 size -= t;
1730 next += t;
1731
1732 for (i = 0; i < dev->ep_max / 2; i++) {
1733 tmp_reg = readl(&dev->op_regs->endptctrl[i]);
1734 t = scnprintf(next, size, "EP Ctrl Reg [%d]: 0x%08x\n",
1735 i, tmp_reg);
1736 size -= t;
1737 next += t;
1738 }
1739 tmp_reg = readl(&dev->op_regs->endptprime);
1740 t = scnprintf(next, size, "EP Prime Reg: 0x%08x\n\n", tmp_reg);
1741 size -= t;
1742 next += t;
1743
1744 /* langwell_udc, langwell_ep, langwell_request structure information */
1745 ep = &dev->ep[0];
1746 t = scnprintf(next, size, "%s MaxPacketSize: 0x%x, ep_num: %d\n",
1747 ep->ep.name, ep->ep.maxpacket, ep->ep_num);
1748 size -= t;
1749 next += t;
1750
1751 if (list_empty(&ep->queue)) {
1752 t = scnprintf(next, size, "its req queue is empty\n\n");
1753 size -= t;
1754 next += t;
1755 } else {
1756 list_for_each_entry(req, &ep->queue, queue) {
1757 t = scnprintf(next, size,
1758 "req %p actual 0x%x length 0x%x buf %p\n",
1759 &req->req, req->req.actual,
1760 req->req.length, req->req.buf);
1761 size -= t;
1762 next += t;
1763 }
1764 }
1765 /* other gadget->eplist ep */
1766 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1767 if (ep->desc) {
1768 t = scnprintf(next, size,
1769 "\n%s MaxPacketSize: 0x%x, "
1770 "ep_num: %d\n",
1771 ep->ep.name, ep->ep.maxpacket,
1772 ep->ep_num);
1773 size -= t;
1774 next += t;
1775
1776 if (list_empty(&ep->queue)) {
1777 t = scnprintf(next, size,
1778 "its req queue is empty\n\n");
1779 size -= t;
1780 next += t;
1781 } else {
1782 list_for_each_entry(req, &ep->queue, queue) {
1783 t = scnprintf(next, size,
1784 "req %p actual 0x%x length "
1785 "0x%x buf %p\n",
1786 &req->req, req->req.actual,
1787 req->req.length, req->req.buf);
1788 size -= t;
1789 next += t;
1790 }
1791 }
1792 }
1793 }
1794
1795 spin_unlock_irqrestore(&dev->lock, flags);
1796 return PAGE_SIZE - size;
1797}
1798static DEVICE_ATTR(langwell_udc, S_IRUGO, show_langwell_udc, NULL);
1799
1800
1801/*-------------------------------------------------------------------------*/
1802
1803/*
1804 * when a driver is successfully registered, it will receive
1805 * control requests including set_configuration(), which enables
1806 * non-control requests. then usb traffic follows until a
1807 * disconnect is reported. then a host may connect again, or
1808 * the driver might get unbound.
1809 */
1810
1811int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1812{
1813 struct langwell_udc *dev = the_controller;
1814 unsigned long flags;
1815 int retval;
1816
1817 if (!dev)
1818 return -ENODEV;
1819
1820 DBG(dev, "---> %s()\n", __func__);
1821
1822 if (dev->driver)
1823 return -EBUSY;
1824
1825 spin_lock_irqsave(&dev->lock, flags);
1826
1827 /* hook up the driver ... */
1828 driver->driver.bus = NULL;
1829 dev->driver = driver;
1830 dev->gadget.dev.driver = &driver->driver;
1831
1832 spin_unlock_irqrestore(&dev->lock, flags);
1833
1834 retval = driver->bind(&dev->gadget);
1835 if (retval) {
1836 DBG(dev, "bind to driver %s --> %d\n",
1837 driver->driver.name, retval);
1838 dev->driver = NULL;
1839 dev->gadget.dev.driver = NULL;
1840 return retval;
1841 }
1842
1843 retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
1844 if (retval)
1845 goto err_unbind;
1846
1847 dev->usb_state = USB_STATE_ATTACHED;
1848 dev->ep0_state = WAIT_FOR_SETUP;
1849 dev->ep0_dir = USB_DIR_OUT;
1850
1851 /* enable interrupt and set controller to run state */
1852 if (dev->got_irq)
1853 langwell_udc_start(dev);
1854
1855 VDBG(dev, "After langwell_udc_start(), print all registers:\n");
1856#ifdef VERBOSE
1857 print_all_registers(dev);
1858#endif
1859
1860 INFO(dev, "register driver: %s\n", driver->driver.name);
1861 VDBG(dev, "<--- %s()\n", __func__);
1862 return 0;
1863
1864err_unbind:
1865 driver->unbind(&dev->gadget);
1866 dev->gadget.dev.driver = NULL;
1867 dev->driver = NULL;
1868
1869 DBG(dev, "<--- %s()\n", __func__);
1870 return retval;
1871}
1872EXPORT_SYMBOL(usb_gadget_register_driver);
1873
1874
1875/* unregister gadget driver */
1876int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1877{
1878 struct langwell_udc *dev = the_controller;
1879 unsigned long flags;
1880
1881 if (!dev)
1882 return -ENODEV;
1883
1884 DBG(dev, "---> %s()\n", __func__);
1885
1886 if (unlikely(!driver || !driver->bind || !driver->unbind))
1887 return -EINVAL;
1888
1889 /* unbind OTG transceiver */
1890 if (dev->transceiver)
1891 (void)otg_set_peripheral(dev->transceiver, 0);
1892
1893 /* disable interrupt and set controller to stop state */
1894 langwell_udc_stop(dev);
1895
1896 dev->usb_state = USB_STATE_ATTACHED;
1897 dev->ep0_state = WAIT_FOR_SETUP;
1898 dev->ep0_dir = USB_DIR_OUT;
1899
1900 spin_lock_irqsave(&dev->lock, flags);
1901
1902 /* stop all usb activities */
1903 dev->gadget.speed = USB_SPEED_UNKNOWN;
1904 stop_activity(dev, driver);
1905 spin_unlock_irqrestore(&dev->lock, flags);
1906
1907 /* unbind gadget driver */
1908 driver->unbind(&dev->gadget);
1909 dev->gadget.dev.driver = NULL;
1910 dev->driver = NULL;
1911
1912 device_remove_file(&dev->pdev->dev, &dev_attr_function);
1913
1914 INFO(dev, "unregistered driver '%s'\n", driver->driver.name);
1915 DBG(dev, "<--- %s()\n", __func__);
1916 return 0;
1917}
1918EXPORT_SYMBOL(usb_gadget_unregister_driver);
1919
1920
1921/*-------------------------------------------------------------------------*/
1922
1923/*
1924 * setup tripwire is used as a semaphore to ensure that the setup data
1925 * payload is extracted from a dQH without being corrupted
1926 */
1927static void setup_tripwire(struct langwell_udc *dev)
1928{
1929 u32 usbcmd,
1930 endptsetupstat;
1931 unsigned long timeout;
1932 struct langwell_dqh *dqh;
1933
1934 VDBG(dev, "---> %s()\n", __func__);
1935
1936 /* ep0 OUT dQH */
1937 dqh = &dev->ep_dqh[EP_DIR_OUT];
1938
1939 /* Write-Clear endptsetupstat */
1940 endptsetupstat = readl(&dev->op_regs->endptsetupstat);
1941 writel(endptsetupstat, &dev->op_regs->endptsetupstat);
1942
1943 /* wait until endptsetupstat is cleared */
1944 timeout = jiffies + SETUPSTAT_TIMEOUT;
1945 while (readl(&dev->op_regs->endptsetupstat)) {
1946 if (time_after(jiffies, timeout)) {
1947 ERROR(dev, "setup_tripwire timeout\n");
1948 break;
1949 }
1950 cpu_relax();
1951 }
1952
1953 /* while a hazard exists when setup packet arrives */
1954 do {
1955 /* set setup tripwire bit */
1956 usbcmd = readl(&dev->op_regs->usbcmd);
1957 writel(usbcmd | CMD_SUTW, &dev->op_regs->usbcmd);
1958
1959 /* copy the setup packet to local buffer */
1960 memcpy(&dev->local_setup_buff, &dqh->dqh_setup, 8);
1961 } while (!(readl(&dev->op_regs->usbcmd) & CMD_SUTW));
1962
1963 /* Write-Clear setup tripwire bit */
1964 usbcmd = readl(&dev->op_regs->usbcmd);
1965 writel(usbcmd & ~CMD_SUTW, &dev->op_regs->usbcmd);
1966
1967 VDBG(dev, "<--- %s()\n", __func__);
1968}
1969
1970
1971/* protocol ep0 stall, will automatically be cleared on new transaction */
1972static void ep0_stall(struct langwell_udc *dev)
1973{
1974 u32 endptctrl;
1975
1976 VDBG(dev, "---> %s()\n", __func__);
1977
1978 /* set TX and RX to stall */
1979 endptctrl = readl(&dev->op_regs->endptctrl[0]);
1980 endptctrl |= EPCTRL_TXS | EPCTRL_RXS;
1981 writel(endptctrl, &dev->op_regs->endptctrl[0]);
1982
1983 /* update ep0 state */
1984 dev->ep0_state = WAIT_FOR_SETUP;
1985 dev->ep0_dir = USB_DIR_OUT;
1986
1987 VDBG(dev, "<--- %s()\n", __func__);
1988}
1989
1990
1991/* PRIME a status phase for ep0 */
1992static int prime_status_phase(struct langwell_udc *dev, int dir)
1993{
1994 struct langwell_request *req;
1995 struct langwell_ep *ep;
1996 int status = 0;
1997
1998 VDBG(dev, "---> %s()\n", __func__);
1999
2000 if (dir == EP_DIR_IN)
2001 dev->ep0_dir = USB_DIR_IN;
2002 else
2003 dev->ep0_dir = USB_DIR_OUT;
2004
2005 ep = &dev->ep[0];
2006 dev->ep0_state = WAIT_FOR_OUT_STATUS;
2007
2008 req = dev->status_req;
2009
2010 req->ep = ep;
2011 req->req.length = 0;
2012 req->req.status = -EINPROGRESS;
2013 req->req.actual = 0;
2014 req->req.complete = NULL;
2015 req->dtd_count = 0;
2016
2017 if (!req_to_dtd(req))
2018 status = queue_dtd(ep, req);
2019 else
2020 return -ENOMEM;
2021
2022 if (status)
2023 ERROR(dev, "can't queue ep0 status request\n");
2024
2025 list_add_tail(&req->queue, &ep->queue);
2026
2027 VDBG(dev, "<--- %s()\n", __func__);
2028 return status;
2029}
2030
2031
2032/* SET_ADDRESS request routine */
2033static void set_address(struct langwell_udc *dev, u16 value,
2034 u16 index, u16 length)
2035{
2036 VDBG(dev, "---> %s()\n", __func__);
2037
2038 /* save the new address to device struct */
2039 dev->dev_addr = (u8) value;
2040 VDBG(dev, "dev->dev_addr = %d\n", dev->dev_addr);
2041
2042 /* update usb state */
2043 dev->usb_state = USB_STATE_ADDRESS;
2044
2045 /* STATUS phase */
2046 if (prime_status_phase(dev, EP_DIR_IN))
2047 ep0_stall(dev);
2048
2049 VDBG(dev, "<--- %s()\n", __func__);
2050}
2051
2052
2053/* return endpoint by windex */
2054static struct langwell_ep *get_ep_by_windex(struct langwell_udc *dev,
2055 u16 wIndex)
2056{
2057 struct langwell_ep *ep;
2058 VDBG(dev, "---> %s()\n", __func__);
2059
2060 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2061 return &dev->ep[0];
2062
2063 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
2064 u8 bEndpointAddress;
2065 if (!ep->desc)
2066 continue;
2067
2068 bEndpointAddress = ep->desc->bEndpointAddress;
2069 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2070 continue;
2071
2072 if ((wIndex & USB_ENDPOINT_NUMBER_MASK)
2073 == (bEndpointAddress & USB_ENDPOINT_NUMBER_MASK))
2074 return ep;
2075 }
2076
2077 VDBG(dev, "<--- %s()\n", __func__);
2078 return NULL;
2079}
2080
2081
2082/* return whether endpoint is stalled, 0: not stalled; 1: stalled */
2083static int ep_is_stall(struct langwell_ep *ep)
2084{
2085 struct langwell_udc *dev = ep->dev;
2086 u32 endptctrl;
2087 int retval;
2088
2089 VDBG(dev, "---> %s()\n", __func__);
2090
2091 endptctrl = readl(&dev->op_regs->endptctrl[ep->ep_num]);
2092 if (is_in(ep))
2093 retval = endptctrl & EPCTRL_TXS ? 1 : 0;
2094 else
2095 retval = endptctrl & EPCTRL_RXS ? 1 : 0;
2096
2097 VDBG(dev, "<--- %s()\n", __func__);
2098 return retval;
2099}
2100
2101
2102/* GET_STATUS request routine */
2103static void get_status(struct langwell_udc *dev, u8 request_type, u16 value,
2104 u16 index, u16 length)
2105{
2106 struct langwell_request *req;
2107 struct langwell_ep *ep;
2108 u16 status_data = 0; /* 16 bits cpu view status data */
2109 int status = 0;
2110
2111 VDBG(dev, "---> %s()\n", __func__);
2112
2113 ep = &dev->ep[0];
2114
2115 if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
2116 /* get device status */
2117 status_data = 1 << USB_DEVICE_SELF_POWERED;
2118 status_data |= dev->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
2119 } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
2120 /* get interface status */
2121 status_data = 0;
2122 } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
2123 /* get endpoint status */
2124 struct langwell_ep *epn;
2125 epn = get_ep_by_windex(dev, index);
2126 /* stall if endpoint doesn't exist */
2127 if (!epn)
2128 goto stall;
2129
2130 status_data = ep_is_stall(epn) << USB_ENDPOINT_HALT;
2131 }
2132
2133 dev->ep0_dir = USB_DIR_IN;
2134
2135 /* borrow the per device status_req */
2136 req = dev->status_req;
2137
2138 /* fill in the reqest structure */
2139 *((u16 *) req->req.buf) = cpu_to_le16(status_data);
2140 req->ep = ep;
2141 req->req.length = 2;
2142 req->req.status = -EINPROGRESS;
2143 req->req.actual = 0;
2144 req->req.complete = NULL;
2145 req->dtd_count = 0;
2146
2147 /* prime the data phase */
2148 if (!req_to_dtd(req))
2149 status = queue_dtd(ep, req);
2150 else /* no mem */
2151 goto stall;
2152
2153 if (status) {
2154 ERROR(dev, "response error on GET_STATUS request\n");
2155 goto stall;
2156 }
2157
2158 list_add_tail(&req->queue, &ep->queue);
2159 dev->ep0_state = DATA_STATE_XMIT;
2160
2161 VDBG(dev, "<--- %s()\n", __func__);
2162 return;
2163stall:
2164 ep0_stall(dev);
2165 VDBG(dev, "<--- %s()\n", __func__);
2166}
2167
2168
2169/* setup packet interrupt handler */
2170static void handle_setup_packet(struct langwell_udc *dev,
2171 struct usb_ctrlrequest *setup)
2172{
2173 u16 wValue = le16_to_cpu(setup->wValue);
2174 u16 wIndex = le16_to_cpu(setup->wIndex);
2175 u16 wLength = le16_to_cpu(setup->wLength);
2176
2177 VDBG(dev, "---> %s()\n", __func__);
2178
2179 /* ep0 fifo flush */
2180 nuke(&dev->ep[0], -ESHUTDOWN);
2181
2182 DBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
2183 setup->bRequestType, setup->bRequest,
2184 wValue, wIndex, wLength);
2185
2186 /* RNDIS gadget delegate */
2187 if ((setup->bRequestType == 0x21) && (setup->bRequest == 0x00)) {
2188 /* USB_CDC_SEND_ENCAPSULATED_COMMAND */
2189 goto delegate;
2190 }
2191
2192 /* USB_CDC_GET_ENCAPSULATED_RESPONSE */
2193 if ((setup->bRequestType == 0xa1) && (setup->bRequest == 0x01)) {
2194 /* USB_CDC_GET_ENCAPSULATED_RESPONSE */
2195 goto delegate;
2196 }
2197
2198 /* We process some stardard setup requests here */
2199 switch (setup->bRequest) {
2200 case USB_REQ_GET_STATUS:
2201 DBG(dev, "SETUP: USB_REQ_GET_STATUS\n");
2202 /* get status, DATA and STATUS phase */
2203 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
2204 != (USB_DIR_IN | USB_TYPE_STANDARD))
2205 break;
2206 get_status(dev, setup->bRequestType, wValue, wIndex, wLength);
2207 goto end;
2208
2209 case USB_REQ_SET_ADDRESS:
2210 DBG(dev, "SETUP: USB_REQ_SET_ADDRESS\n");
2211 /* STATUS phase */
2212 if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD
2213 | USB_RECIP_DEVICE))
2214 break;
2215 set_address(dev, wValue, wIndex, wLength);
2216 goto end;
2217
2218 case USB_REQ_CLEAR_FEATURE:
2219 case USB_REQ_SET_FEATURE:
2220 /* STATUS phase */
2221 {
2222 int rc = -EOPNOTSUPP;
2223 if (setup->bRequest == USB_REQ_SET_FEATURE)
2224 DBG(dev, "SETUP: USB_REQ_SET_FEATURE\n");
2225 else if (setup->bRequest == USB_REQ_CLEAR_FEATURE)
2226 DBG(dev, "SETUP: USB_REQ_CLEAR_FEATURE\n");
2227
2228 if ((setup->bRequestType & (USB_RECIP_MASK | USB_TYPE_MASK))
2229 == (USB_RECIP_ENDPOINT | USB_TYPE_STANDARD)) {
2230 struct langwell_ep *epn;
2231 epn = get_ep_by_windex(dev, wIndex);
2232 /* stall if endpoint doesn't exist */
2233 if (!epn) {
2234 ep0_stall(dev);
2235 goto end;
2236 }
2237
2238 if (wValue != 0 || wLength != 0
2239 || epn->ep_num > dev->ep_max)
2240 break;
2241
2242 spin_unlock(&dev->lock);
2243 rc = langwell_ep_set_halt(&epn->ep,
2244 (setup->bRequest == USB_REQ_SET_FEATURE)
2245 ? 1 : 0);
2246 spin_lock(&dev->lock);
2247
2248 } else if ((setup->bRequestType & (USB_RECIP_MASK
2249 | USB_TYPE_MASK)) == (USB_RECIP_DEVICE
2250 | USB_TYPE_STANDARD)) {
2251 if (!gadget_is_otg(&dev->gadget))
2252 break;
2253 else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE) {
2254 dev->gadget.b_hnp_enable = 1;
2255#ifdef OTG_TRANSCEIVER
2256 if (!dev->lotg->otg.default_a)
2257 dev->lotg->hsm.b_hnp_enable = 1;
2258#endif
2259 } else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
2260 dev->gadget.a_hnp_support = 1;
2261 else if (setup->bRequest ==
2262 USB_DEVICE_A_ALT_HNP_SUPPORT)
2263 dev->gadget.a_alt_hnp_support = 1;
2264 else
2265 break;
2266 rc = 0;
2267 } else
2268 break;
2269
2270 if (rc == 0) {
2271 if (prime_status_phase(dev, EP_DIR_IN))
2272 ep0_stall(dev);
2273 }
2274 goto end;
2275 }
2276
2277 case USB_REQ_GET_DESCRIPTOR:
2278 DBG(dev, "SETUP: USB_REQ_GET_DESCRIPTOR\n");
2279 goto delegate;
2280
2281 case USB_REQ_SET_DESCRIPTOR:
2282 DBG(dev, "SETUP: USB_REQ_SET_DESCRIPTOR unsupported\n");
2283 goto delegate;
2284
2285 case USB_REQ_GET_CONFIGURATION:
2286 DBG(dev, "SETUP: USB_REQ_GET_CONFIGURATION\n");
2287 goto delegate;
2288
2289 case USB_REQ_SET_CONFIGURATION:
2290 DBG(dev, "SETUP: USB_REQ_SET_CONFIGURATION\n");
2291 goto delegate;
2292
2293 case USB_REQ_GET_INTERFACE:
2294 DBG(dev, "SETUP: USB_REQ_GET_INTERFACE\n");
2295 goto delegate;
2296
2297 case USB_REQ_SET_INTERFACE:
2298 DBG(dev, "SETUP: USB_REQ_SET_INTERFACE\n");
2299 goto delegate;
2300
2301 case USB_REQ_SYNCH_FRAME:
2302 DBG(dev, "SETUP: USB_REQ_SYNCH_FRAME unsupported\n");
2303 goto delegate;
2304
2305 default:
2306 /* delegate USB standard requests to the gadget driver */
2307 goto delegate;
2308delegate:
2309 /* USB requests handled by gadget */
2310 if (wLength) {
2311 /* DATA phase from gadget, STATUS phase from udc */
2312 dev->ep0_dir = (setup->bRequestType & USB_DIR_IN)
2313 ? USB_DIR_IN : USB_DIR_OUT;
2314 VDBG(dev, "dev->ep0_dir = 0x%x, wLength = %d\n",
2315 dev->ep0_dir, wLength);
2316 spin_unlock(&dev->lock);
2317 if (dev->driver->setup(&dev->gadget,
2318 &dev->local_setup_buff) < 0)
2319 ep0_stall(dev);
2320 spin_lock(&dev->lock);
2321 dev->ep0_state = (setup->bRequestType & USB_DIR_IN)
2322 ? DATA_STATE_XMIT : DATA_STATE_RECV;
2323 } else {
2324 /* no DATA phase, IN STATUS phase from gadget */
2325 dev->ep0_dir = USB_DIR_IN;
2326 VDBG(dev, "dev->ep0_dir = 0x%x, wLength = %d\n",
2327 dev->ep0_dir, wLength);
2328 spin_unlock(&dev->lock);
2329 if (dev->driver->setup(&dev->gadget,
2330 &dev->local_setup_buff) < 0)
2331 ep0_stall(dev);
2332 spin_lock(&dev->lock);
2333 dev->ep0_state = WAIT_FOR_OUT_STATUS;
2334 }
2335 break;
2336 }
2337end:
2338 VDBG(dev, "<--- %s()\n", __func__);
2339 return;
2340}
2341
2342
2343/* transfer completion, process endpoint request and free the completed dTDs
2344 * for this request
2345 */
2346static int process_ep_req(struct langwell_udc *dev, int index,
2347 struct langwell_request *curr_req)
2348{
2349 struct langwell_dtd *curr_dtd;
2350 struct langwell_dqh *curr_dqh;
2351 int td_complete, actual, remaining_length;
2352 int i, dir;
2353 u8 dtd_status = 0;
2354 int retval = 0;
2355
2356 curr_dqh = &dev->ep_dqh[index];
2357 dir = index % 2;
2358
2359 curr_dtd = curr_req->head;
2360 td_complete = 0;
2361 actual = curr_req->req.length;
2362
2363 VDBG(dev, "---> %s()\n", __func__);
2364
2365 for (i = 0; i < curr_req->dtd_count; i++) {
2366 remaining_length = le16_to_cpu(curr_dtd->dtd_total);
2367 actual -= remaining_length;
2368
2369 /* command execution states by dTD */
2370 dtd_status = curr_dtd->dtd_status;
2371
2372 if (!dtd_status) {
2373 /* transfers completed successfully */
2374 if (!remaining_length) {
2375 td_complete++;
2376 VDBG(dev, "dTD transmitted successfully\n");
2377 } else {
2378 if (dir) {
2379 VDBG(dev, "TX dTD remains data\n");
2380 retval = -EPROTO;
2381 break;
2382
2383 } else {
2384 td_complete++;
2385 break;
2386 }
2387 }
2388 } else {
2389 /* transfers completed with errors */
2390 if (dtd_status & DTD_STS_ACTIVE) {
2391 DBG(dev, "request not completed\n");
2392 retval = 1;
2393 return retval;
2394 } else if (dtd_status & DTD_STS_HALTED) {
2395 ERROR(dev, "dTD error %08x dQH[%d]\n",
2396 dtd_status, index);
2397 /* clear the errors and halt condition */
2398 curr_dqh->dtd_status = 0;
2399 retval = -EPIPE;
2400 break;
2401 } else if (dtd_status & DTD_STS_DBE) {
2402 DBG(dev, "data buffer (overflow) error\n");
2403 retval = -EPROTO;
2404 break;
2405 } else if (dtd_status & DTD_STS_TRE) {
2406 DBG(dev, "transaction(ISO) error\n");
2407 retval = -EILSEQ;
2408 break;
2409 } else
2410 ERROR(dev, "unknown error (0x%x)!\n",
2411 dtd_status);
2412 }
2413
2414 if (i != curr_req->dtd_count - 1)
2415 curr_dtd = (struct langwell_dtd *)
2416 curr_dtd->next_dtd_virt;
2417 }
2418
2419 if (retval)
2420 return retval;
2421
2422 curr_req->req.actual = actual;
2423
2424 VDBG(dev, "<--- %s()\n", __func__);
2425 return 0;
2426}
2427
2428
2429/* complete DATA or STATUS phase of ep0 prime status phase if needed */
2430static void ep0_req_complete(struct langwell_udc *dev,
2431 struct langwell_ep *ep0, struct langwell_request *req)
2432{
2433 u32 new_addr;
2434 VDBG(dev, "---> %s()\n", __func__);
2435
2436 if (dev->usb_state == USB_STATE_ADDRESS) {
2437 /* set the new address */
2438 new_addr = (u32)dev->dev_addr;
2439 writel(new_addr << USBADR_SHIFT, &dev->op_regs->deviceaddr);
2440
2441 new_addr = USBADR(readl(&dev->op_regs->deviceaddr));
2442 VDBG(dev, "new_addr = %d\n", new_addr);
2443 }
2444
2445 done(ep0, req, 0);
2446
2447 switch (dev->ep0_state) {
2448 case DATA_STATE_XMIT:
2449 /* receive status phase */
2450 if (prime_status_phase(dev, EP_DIR_OUT))
2451 ep0_stall(dev);
2452 break;
2453 case DATA_STATE_RECV:
2454 /* send status phase */
2455 if (prime_status_phase(dev, EP_DIR_IN))
2456 ep0_stall(dev);
2457 break;
2458 case WAIT_FOR_OUT_STATUS:
2459 dev->ep0_state = WAIT_FOR_SETUP;
2460 break;
2461 case WAIT_FOR_SETUP:
2462 ERROR(dev, "unexpect ep0 packets\n");
2463 break;
2464 default:
2465 ep0_stall(dev);
2466 break;
2467 }
2468
2469 VDBG(dev, "<--- %s()\n", __func__);
2470}
2471
2472
2473/* USB transfer completion interrupt */
2474static void handle_trans_complete(struct langwell_udc *dev)
2475{
2476 u32 complete_bits;
2477 int i, ep_num, dir, bit_mask, status;
2478 struct langwell_ep *epn;
2479 struct langwell_request *curr_req, *temp_req;
2480
2481 VDBG(dev, "---> %s()\n", __func__);
2482
2483 complete_bits = readl(&dev->op_regs->endptcomplete);
2484 VDBG(dev, "endptcomplete register: 0x%08x\n", complete_bits);
2485
2486 /* Write-Clear the bits in endptcomplete register */
2487 writel(complete_bits, &dev->op_regs->endptcomplete);
2488
2489 if (!complete_bits) {
2490 DBG(dev, "complete_bits = 0\n");
2491 goto done;
2492 }
2493
2494 for (i = 0; i < dev->ep_max; i++) {
2495 ep_num = i / 2;
2496 dir = i % 2;
2497
2498 bit_mask = 1 << (ep_num + 16 * dir);
2499
2500 if (!(complete_bits & bit_mask))
2501 continue;
2502
2503 /* ep0 */
2504 if (i == 1)
2505 epn = &dev->ep[0];
2506 else
2507 epn = &dev->ep[i];
2508
2509 if (epn->name == NULL) {
2510 WARNING(dev, "invalid endpoint\n");
2511 continue;
2512 }
2513
2514 if (i < 2)
2515 /* ep0 in and out */
2516 DBG(dev, "%s-%s transfer completed\n",
2517 epn->name,
2518 is_in(epn) ? "in" : "out");
2519 else
2520 DBG(dev, "%s transfer completed\n", epn->name);
2521
2522 /* process the req queue until an uncomplete request */
2523 list_for_each_entry_safe(curr_req, temp_req,
2524 &epn->queue, queue) {
2525 status = process_ep_req(dev, i, curr_req);
2526 VDBG(dev, "%s req status: %d\n", epn->name, status);
2527
2528 if (status)
2529 break;
2530
2531 /* write back status to req */
2532 curr_req->req.status = status;
2533
2534 /* ep0 request completion */
2535 if (ep_num == 0) {
2536 ep0_req_complete(dev, epn, curr_req);
2537 break;
2538 } else {
2539 done(epn, curr_req, status);
2540 }
2541 }
2542 }
2543done:
2544 VDBG(dev, "<--- %s()\n", __func__);
2545 return;
2546}
2547
2548
2549/* port change detect interrupt handler */
2550static void handle_port_change(struct langwell_udc *dev)
2551{
2552 u32 portsc1, devlc;
2553 u32 speed;
2554
2555 VDBG(dev, "---> %s()\n", __func__);
2556
2557 if (dev->bus_reset)
2558 dev->bus_reset = 0;
2559
2560 portsc1 = readl(&dev->op_regs->portsc1);
2561 devlc = readl(&dev->op_regs->devlc);
2562 VDBG(dev, "portsc1 = 0x%08x, devlc = 0x%08x\n",
2563 portsc1, devlc);
2564
2565 /* bus reset is finished */
2566 if (!(portsc1 & PORTS_PR)) {
2567 /* get the speed */
2568 speed = LPM_PSPD(devlc);
2569 switch (speed) {
2570 case LPM_SPEED_HIGH:
2571 dev->gadget.speed = USB_SPEED_HIGH;
2572 break;
2573 case LPM_SPEED_FULL:
2574 dev->gadget.speed = USB_SPEED_FULL;
2575 break;
2576 case LPM_SPEED_LOW:
2577 dev->gadget.speed = USB_SPEED_LOW;
2578 break;
2579 default:
2580 dev->gadget.speed = USB_SPEED_UNKNOWN;
2581 break;
2582 }
2583 VDBG(dev, "speed = %d, dev->gadget.speed = %d\n",
2584 speed, dev->gadget.speed);
2585 }
2586
2587 /* LPM L0 to L1 */
2588 if (dev->lpm && dev->lpm_state == LPM_L0)
2589 if (portsc1 & PORTS_SUSP && portsc1 & PORTS_SLP) {
2590 INFO(dev, "LPM L0 to L1\n");
2591 dev->lpm_state = LPM_L1;
2592 }
2593
2594 /* LPM L1 to L0, force resume or remote wakeup finished */
2595 if (dev->lpm && dev->lpm_state == LPM_L1)
2596 if (!(portsc1 & PORTS_SUSP)) {
2597 if (portsc1 & PORTS_SLP)
2598 INFO(dev, "LPM L1 to L0, force resume\n");
2599 else
2600 INFO(dev, "LPM L1 to L0, remote wakeup\n");
2601
2602 dev->lpm_state = LPM_L0;
2603 }
2604
2605 /* update USB state */
2606 if (!dev->resume_state)
2607 dev->usb_state = USB_STATE_DEFAULT;
2608
2609 VDBG(dev, "<--- %s()\n", __func__);
2610}
2611
2612
2613/* USB reset interrupt handler */
2614static void handle_usb_reset(struct langwell_udc *dev)
2615{
2616 u32 deviceaddr,
2617 endptsetupstat,
2618 endptcomplete;
2619 unsigned long timeout;
2620
2621 VDBG(dev, "---> %s()\n", __func__);
2622
2623 /* Write-Clear the device address */
2624 deviceaddr = readl(&dev->op_regs->deviceaddr);
2625 writel(deviceaddr & ~USBADR_MASK, &dev->op_regs->deviceaddr);
2626
2627 dev->dev_addr = 0;
2628
2629 /* clear usb state */
2630 dev->resume_state = 0;
2631
2632 /* LPM L1 to L0, reset */
2633 if (dev->lpm)
2634 dev->lpm_state = LPM_L0;
2635
2636 dev->ep0_dir = USB_DIR_OUT;
2637 dev->ep0_state = WAIT_FOR_SETUP;
2638 dev->remote_wakeup = 0; /* default to 0 on reset */
2639 dev->gadget.b_hnp_enable = 0;
2640 dev->gadget.a_hnp_support = 0;
2641 dev->gadget.a_alt_hnp_support = 0;
2642
2643 /* Write-Clear all the setup token semaphores */
2644 endptsetupstat = readl(&dev->op_regs->endptsetupstat);
2645 writel(endptsetupstat, &dev->op_regs->endptsetupstat);
2646
2647 /* Write-Clear all the endpoint complete status bits */
2648 endptcomplete = readl(&dev->op_regs->endptcomplete);
2649 writel(endptcomplete, &dev->op_regs->endptcomplete);
2650
2651 /* wait until all endptprime bits cleared */
2652 timeout = jiffies + PRIME_TIMEOUT;
2653 while (readl(&dev->op_regs->endptprime)) {
2654 if (time_after(jiffies, timeout)) {
2655 ERROR(dev, "USB reset timeout\n");
2656 break;
2657 }
2658 cpu_relax();
2659 }
2660
2661 /* write 1s to endptflush register to clear any primed buffers */
2662 writel((u32) ~0, &dev->op_regs->endptflush);
2663
2664 if (readl(&dev->op_regs->portsc1) & PORTS_PR) {
2665 VDBG(dev, "USB bus reset\n");
2666 /* bus is reseting */
2667 dev->bus_reset = 1;
2668
2669 /* reset all the queues, stop all USB activities */
2670 stop_activity(dev, dev->driver);
2671 dev->usb_state = USB_STATE_DEFAULT;
2672 } else {
2673 VDBG(dev, "device controller reset\n");
2674 /* controller reset */
2675 langwell_udc_reset(dev);
2676
2677 /* reset all the queues, stop all USB activities */
2678 stop_activity(dev, dev->driver);
2679
2680 /* reset ep0 dQH and endptctrl */
2681 ep0_reset(dev);
2682
2683 /* enable interrupt and set controller to run state */
2684 langwell_udc_start(dev);
2685
2686 dev->usb_state = USB_STATE_ATTACHED;
2687 }
2688
2689#ifdef OTG_TRANSCEIVER
2690 /* refer to USB OTG 6.6.2.3 b_hnp_en is cleared */
2691 if (!dev->lotg->otg.default_a)
2692 dev->lotg->hsm.b_hnp_enable = 0;
2693#endif
2694
2695 VDBG(dev, "<--- %s()\n", __func__);
2696}
2697
2698
2699/* USB bus suspend/resume interrupt */
2700static void handle_bus_suspend(struct langwell_udc *dev)
2701{
2702 u32 devlc;
2703 DBG(dev, "---> %s()\n", __func__);
2704
2705 dev->resume_state = dev->usb_state;
2706 dev->usb_state = USB_STATE_SUSPENDED;
2707
2708#ifdef OTG_TRANSCEIVER
2709 if (dev->lotg->otg.default_a) {
2710 if (dev->lotg->hsm.b_bus_suspend_vld == 1) {
2711 dev->lotg->hsm.b_bus_suspend = 1;
2712 /* notify transceiver the state changes */
2713 if (spin_trylock(&dev->lotg->wq_lock)) {
2714 langwell_update_transceiver();
2715 spin_unlock(&dev->lotg->wq_lock);
2716 }
2717 }
2718 dev->lotg->hsm.b_bus_suspend_vld++;
2719 } else {
2720 if (!dev->lotg->hsm.a_bus_suspend) {
2721 dev->lotg->hsm.a_bus_suspend = 1;
2722 /* notify transceiver the state changes */
2723 if (spin_trylock(&dev->lotg->wq_lock)) {
2724 langwell_update_transceiver();
2725 spin_unlock(&dev->lotg->wq_lock);
2726 }
2727 }
2728 }
2729#endif
2730
2731 /* report suspend to the driver */
2732 if (dev->driver) {
2733 if (dev->driver->suspend) {
2734 spin_unlock(&dev->lock);
2735 dev->driver->suspend(&dev->gadget);
2736 spin_lock(&dev->lock);
2737 DBG(dev, "suspend %s\n", dev->driver->driver.name);
2738 }
2739 }
2740
2741 /* enter PHY low power suspend */
2742 devlc = readl(&dev->op_regs->devlc);
2743 VDBG(dev, "devlc = 0x%08x\n", devlc);
2744 devlc |= LPM_PHCD;
2745 writel(devlc, &dev->op_regs->devlc);
2746
2747 DBG(dev, "<--- %s()\n", __func__);
2748}
2749
2750
2751static void handle_bus_resume(struct langwell_udc *dev)
2752{
2753 u32 devlc;
2754 DBG(dev, "---> %s()\n", __func__);
2755
2756 dev->usb_state = dev->resume_state;
2757 dev->resume_state = 0;
2758
2759 /* exit PHY low power suspend */
2760 devlc = readl(&dev->op_regs->devlc);
2761 VDBG(dev, "devlc = 0x%08x\n", devlc);
2762 devlc &= ~LPM_PHCD;
2763 writel(devlc, &dev->op_regs->devlc);
2764
2765#ifdef OTG_TRANSCEIVER
2766 if (dev->lotg->otg.default_a == 0)
2767 dev->lotg->hsm.a_bus_suspend = 0;
2768#endif
2769
2770 /* report resume to the driver */
2771 if (dev->driver) {
2772 if (dev->driver->resume) {
2773 spin_unlock(&dev->lock);
2774 dev->driver->resume(&dev->gadget);
2775 spin_lock(&dev->lock);
2776 DBG(dev, "resume %s\n", dev->driver->driver.name);
2777 }
2778 }
2779
2780 DBG(dev, "<--- %s()\n", __func__);
2781}
2782
2783
2784/* USB device controller interrupt handler */
2785static irqreturn_t langwell_irq(int irq, void *_dev)
2786{
2787 struct langwell_udc *dev = _dev;
2788 u32 usbsts,
2789 usbintr,
2790 irq_sts,
2791 portsc1;
2792
2793 VDBG(dev, "---> %s()\n", __func__);
2794
2795 if (dev->stopped) {
2796 VDBG(dev, "handle IRQ_NONE\n");
2797 VDBG(dev, "<--- %s()\n", __func__);
2798 return IRQ_NONE;
2799 }
2800
2801 spin_lock(&dev->lock);
2802
2803 /* USB status */
2804 usbsts = readl(&dev->op_regs->usbsts);
2805
2806 /* USB interrupt enable */
2807 usbintr = readl(&dev->op_regs->usbintr);
2808
2809 irq_sts = usbsts & usbintr;
2810 VDBG(dev, "usbsts = 0x%08x, usbintr = 0x%08x, irq_sts = 0x%08x\n",
2811 usbsts, usbintr, irq_sts);
2812
2813 if (!irq_sts) {
2814 VDBG(dev, "handle IRQ_NONE\n");
2815 VDBG(dev, "<--- %s()\n", __func__);
2816 spin_unlock(&dev->lock);
2817 return IRQ_NONE;
2818 }
2819
2820 /* Write-Clear interrupt status bits */
2821 writel(irq_sts, &dev->op_regs->usbsts);
2822
2823 /* resume from suspend */
2824 portsc1 = readl(&dev->op_regs->portsc1);
2825 if (dev->usb_state == USB_STATE_SUSPENDED)
2826 if (!(portsc1 & PORTS_SUSP))
2827 handle_bus_resume(dev);
2828
2829 /* USB interrupt */
2830 if (irq_sts & STS_UI) {
2831 VDBG(dev, "USB interrupt\n");
2832
2833 /* setup packet received from ep0 */
2834 if (readl(&dev->op_regs->endptsetupstat)
2835 & EP0SETUPSTAT_MASK) {
2836 VDBG(dev, "USB SETUP packet received interrupt\n");
2837 /* setup tripwire semaphone */
2838 setup_tripwire(dev);
2839 handle_setup_packet(dev, &dev->local_setup_buff);
2840 }
2841
2842 /* USB transfer completion */
2843 if (readl(&dev->op_regs->endptcomplete)) {
2844 VDBG(dev, "USB transfer completion interrupt\n");
2845 handle_trans_complete(dev);
2846 }
2847 }
2848
2849 /* SOF received interrupt (for ISO transfer) */
2850 if (irq_sts & STS_SRI) {
2851 /* FIXME */
2852 /* VDBG(dev, "SOF received interrupt\n"); */
2853 }
2854
2855 /* port change detect interrupt */
2856 if (irq_sts & STS_PCI) {
2857 VDBG(dev, "port change detect interrupt\n");
2858 handle_port_change(dev);
2859 }
2860
2861 /* suspend interrrupt */
2862 if (irq_sts & STS_SLI) {
2863 VDBG(dev, "suspend interrupt\n");
2864 handle_bus_suspend(dev);
2865 }
2866
2867 /* USB reset interrupt */
2868 if (irq_sts & STS_URI) {
2869 VDBG(dev, "USB reset interrupt\n");
2870 handle_usb_reset(dev);
2871 }
2872
2873 /* USB error or system error interrupt */
2874 if (irq_sts & (STS_UEI | STS_SEI)) {
2875 /* FIXME */
2876 WARNING(dev, "error IRQ, irq_sts: %x\n", irq_sts);
2877 }
2878
2879 spin_unlock(&dev->lock);
2880
2881 VDBG(dev, "<--- %s()\n", __func__);
2882 return IRQ_HANDLED;
2883}
2884
2885
2886/*-------------------------------------------------------------------------*/
2887
2888/* release device structure */
2889static void gadget_release(struct device *_dev)
2890{
2891 struct langwell_udc *dev = the_controller;
2892
2893 DBG(dev, "---> %s()\n", __func__);
2894
2895 complete(dev->done);
2896
2897 DBG(dev, "<--- %s()\n", __func__);
2898 kfree(dev);
2899}
2900
2901
2902/* tear down the binding between this driver and the pci device */
2903static void langwell_udc_remove(struct pci_dev *pdev)
2904{
2905 struct langwell_udc *dev = the_controller;
2906
2907 DECLARE_COMPLETION(done);
2908
2909 BUG_ON(dev->driver);
2910 DBG(dev, "---> %s()\n", __func__);
2911
2912 dev->done = &done;
2913
2914 /* free memory allocated in probe */
2915 if (dev->dtd_pool)
2916 dma_pool_destroy(dev->dtd_pool);
2917
2918 if (dev->status_req) {
2919 kfree(dev->status_req->req.buf);
2920 kfree(dev->status_req);
2921 }
2922
2923 if (dev->ep_dqh)
2924 dma_free_coherent(&pdev->dev, dev->ep_dqh_size,
2925 dev->ep_dqh, dev->ep_dqh_dma);
2926
2927 kfree(dev->ep);
2928
2929 /* diable IRQ handler */
2930 if (dev->got_irq)
2931 free_irq(pdev->irq, dev);
2932
2933#ifndef OTG_TRANSCEIVER
2934 if (dev->cap_regs)
2935 iounmap(dev->cap_regs);
2936
2937 if (dev->region)
2938 release_mem_region(pci_resource_start(pdev, 0),
2939 pci_resource_len(pdev, 0));
2940
2941 if (dev->enabled)
2942 pci_disable_device(pdev);
2943#else
2944 if (dev->transceiver) {
2945 otg_put_transceiver(dev->transceiver);
2946 dev->transceiver = NULL;
2947 dev->lotg = NULL;
2948 }
2949#endif
2950
2951 dev->cap_regs = NULL;
2952
2953 INFO(dev, "unbind\n");
2954 DBG(dev, "<--- %s()\n", __func__);
2955
2956 device_unregister(&dev->gadget.dev);
2957 device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
2958
2959#ifndef OTG_TRANSCEIVER
2960 pci_set_drvdata(pdev, NULL);
2961#endif
2962
2963 /* free dev, wait for the release() finished */
2964 wait_for_completion(&done);
2965
2966 the_controller = NULL;
2967}
2968
2969
2970/*
2971 * wrap this driver around the specified device, but
2972 * don't respond over USB until a gadget driver binds to us.
2973 */
2974static int langwell_udc_probe(struct pci_dev *pdev,
2975 const struct pci_device_id *id)
2976{
2977 struct langwell_udc *dev;
2978#ifndef OTG_TRANSCEIVER
2979 unsigned long resource, len;
2980#endif
2981 void __iomem *base = NULL;
2982 size_t size;
2983 int retval;
2984
2985 if (the_controller) {
2986 dev_warn(&pdev->dev, "ignoring\n");
2987 return -EBUSY;
2988 }
2989
2990 /* alloc, and start init */
2991 dev = kzalloc(sizeof *dev, GFP_KERNEL);
2992 if (dev == NULL) {
2993 retval = -ENOMEM;
2994 goto error;
2995 }
2996
2997 /* initialize device spinlock */
2998 spin_lock_init(&dev->lock);
2999
3000 dev->pdev = pdev;
3001 DBG(dev, "---> %s()\n", __func__);
3002
3003#ifdef OTG_TRANSCEIVER
3004 /* PCI device is already enabled by otg_transceiver driver */
3005 dev->enabled = 1;
3006
3007 /* mem region and register base */
3008 dev->region = 1;
3009 dev->transceiver = otg_get_transceiver();
3010 dev->lotg = otg_to_langwell(dev->transceiver);
3011 base = dev->lotg->regs;
3012#else
3013 pci_set_drvdata(pdev, dev);
3014
3015 /* now all the pci goodies ... */
3016 if (pci_enable_device(pdev) < 0) {
3017 retval = -ENODEV;
3018 goto error;
3019 }
3020 dev->enabled = 1;
3021
3022 /* control register: BAR 0 */
3023 resource = pci_resource_start(pdev, 0);
3024 len = pci_resource_len(pdev, 0);
3025 if (!request_mem_region(resource, len, driver_name)) {
3026 ERROR(dev, "controller already in use\n");
3027 retval = -EBUSY;
3028 goto error;
3029 }
3030 dev->region = 1;
3031
3032 base = ioremap_nocache(resource, len);
3033#endif
3034 if (base == NULL) {
3035 ERROR(dev, "can't map memory\n");
3036 retval = -EFAULT;
3037 goto error;
3038 }
3039
3040 dev->cap_regs = (struct langwell_cap_regs __iomem *) base;
3041 VDBG(dev, "dev->cap_regs: %p\n", dev->cap_regs);
3042 dev->op_regs = (struct langwell_op_regs __iomem *)
3043 (base + OP_REG_OFFSET);
3044 VDBG(dev, "dev->op_regs: %p\n", dev->op_regs);
3045
3046 /* irq setup after old hardware is cleaned up */
3047 if (!pdev->irq) {
3048 ERROR(dev, "No IRQ. Check PCI setup!\n");
3049 retval = -ENODEV;
3050 goto error;
3051 }
3052
3053#ifndef OTG_TRANSCEIVER
3054 INFO(dev, "irq %d, io mem: 0x%08lx, len: 0x%08lx, pci mem 0x%p\n",
3055 pdev->irq, resource, len, base);
3056 /* enables bus-mastering for device dev */
3057 pci_set_master(pdev);
3058
3059 if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED,
3060 driver_name, dev) != 0) {
3061 ERROR(dev, "request interrupt %d failed\n", pdev->irq);
3062 retval = -EBUSY;
3063 goto error;
3064 }
3065 dev->got_irq = 1;
3066#endif
3067
3068 /* set stopped bit */
3069 dev->stopped = 1;
3070
3071 /* capabilities and endpoint number */
3072 dev->lpm = (readl(&dev->cap_regs->hccparams) & HCC_LEN) ? 1 : 0;
3073 dev->dciversion = readw(&dev->cap_regs->dciversion);
3074 dev->devcap = (readl(&dev->cap_regs->dccparams) & DEVCAP) ? 1 : 0;
3075 VDBG(dev, "dev->lpm: %d\n", dev->lpm);
3076 VDBG(dev, "dev->dciversion: 0x%04x\n", dev->dciversion);
3077 VDBG(dev, "dccparams: 0x%08x\n", readl(&dev->cap_regs->dccparams));
3078 VDBG(dev, "dev->devcap: %d\n", dev->devcap);
3079 if (!dev->devcap) {
3080 ERROR(dev, "can't support device mode\n");
3081 retval = -ENODEV;
3082 goto error;
3083 }
3084
3085 /* a pair of endpoints (out/in) for each address */
3086 dev->ep_max = DEN(readl(&dev->cap_regs->dccparams)) * 2;
3087 VDBG(dev, "dev->ep_max: %d\n", dev->ep_max);
3088
3089 /* allocate endpoints memory */
3090 dev->ep = kzalloc(sizeof(struct langwell_ep) * dev->ep_max,
3091 GFP_KERNEL);
3092 if (!dev->ep) {
3093 ERROR(dev, "allocate endpoints memory failed\n");
3094 retval = -ENOMEM;
3095 goto error;
3096 }
3097
3098 /* allocate device dQH memory */
3099 size = dev->ep_max * sizeof(struct langwell_dqh);
3100 VDBG(dev, "orig size = %d\n", size);
3101 if (size < DQH_ALIGNMENT)
3102 size = DQH_ALIGNMENT;
3103 else if ((size % DQH_ALIGNMENT) != 0) {
3104 size += DQH_ALIGNMENT + 1;
3105 size &= ~(DQH_ALIGNMENT - 1);
3106 }
3107 dev->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
3108 &dev->ep_dqh_dma, GFP_KERNEL);
3109 if (!dev->ep_dqh) {
3110 ERROR(dev, "allocate dQH memory failed\n");
3111 retval = -ENOMEM;
3112 goto error;
3113 }
3114 dev->ep_dqh_size = size;
3115 VDBG(dev, "ep_dqh_size = %d\n", dev->ep_dqh_size);
3116
3117 /* initialize ep0 status request structure */
3118 dev->status_req = kzalloc(sizeof(struct langwell_request), GFP_KERNEL);
3119 if (!dev->status_req) {
3120 ERROR(dev, "allocate status_req memory failed\n");
3121 retval = -ENOMEM;
3122 goto error;
3123 }
3124 INIT_LIST_HEAD(&dev->status_req->queue);
3125
3126 /* allocate a small amount of memory to get valid address */
3127 dev->status_req->req.buf = kmalloc(8, GFP_KERNEL);
3128 dev->status_req->req.dma = virt_to_phys(dev->status_req->req.buf);
3129
3130 dev->resume_state = USB_STATE_NOTATTACHED;
3131 dev->usb_state = USB_STATE_POWERED;
3132 dev->ep0_dir = USB_DIR_OUT;
3133 dev->remote_wakeup = 0; /* default to 0 on reset */
3134
3135#ifndef OTG_TRANSCEIVER
3136 /* reset device controller */
3137 langwell_udc_reset(dev);
3138#endif
3139
3140 /* initialize gadget structure */
3141 dev->gadget.ops = &langwell_ops; /* usb_gadget_ops */
3142 dev->gadget.ep0 = &dev->ep[0].ep; /* gadget ep0 */
3143 INIT_LIST_HEAD(&dev->gadget.ep_list); /* ep_list */
3144 dev->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
3145 dev->gadget.is_dualspeed = 1; /* support dual speed */
3146#ifdef OTG_TRANSCEIVER
3147 dev->gadget.is_otg = 1; /* support otg mode */
3148#endif
3149
3150 /* the "gadget" abstracts/virtualizes the controller */
3151 dev_set_name(&dev->gadget.dev, "gadget");
3152 dev->gadget.dev.parent = &pdev->dev;
3153 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
3154 dev->gadget.dev.release = gadget_release;
3155 dev->gadget.name = driver_name; /* gadget name */
3156
3157 /* controller endpoints reinit */
3158 eps_reinit(dev);
3159
3160#ifndef OTG_TRANSCEIVER
3161 /* reset ep0 dQH and endptctrl */
3162 ep0_reset(dev);
3163#endif
3164
3165 /* create dTD dma_pool resource */
3166 dev->dtd_pool = dma_pool_create("langwell_dtd",
3167 &dev->pdev->dev,
3168 sizeof(struct langwell_dtd),
3169 DTD_ALIGNMENT,
3170 DMA_BOUNDARY);
3171
3172 if (!dev->dtd_pool) {
3173 retval = -ENOMEM;
3174 goto error;
3175 }
3176
3177 /* done */
3178 INFO(dev, "%s\n", driver_desc);
3179 INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
3180 INFO(dev, "Driver version: " DRIVER_VERSION "\n");
3181 INFO(dev, "Support (max) %d endpoints\n", dev->ep_max);
3182 INFO(dev, "Device interface version: 0x%04x\n", dev->dciversion);
3183 INFO(dev, "Controller mode: %s\n", dev->devcap ? "Device" : "Host");
3184 INFO(dev, "Support USB LPM: %s\n", dev->lpm ? "Yes" : "No");
3185
3186 VDBG(dev, "After langwell_udc_probe(), print all registers:\n");
3187#ifdef VERBOSE
3188 print_all_registers(dev);
3189#endif
3190
3191 the_controller = dev;
3192
3193 retval = device_register(&dev->gadget.dev);
3194 if (retval)
3195 goto error;
3196
3197 retval = device_create_file(&pdev->dev, &dev_attr_langwell_udc);
3198 if (retval)
3199 goto error;
3200
3201 VDBG(dev, "<--- %s()\n", __func__);
3202 return 0;
3203
3204error:
3205 if (dev) {
3206 DBG(dev, "<--- %s()\n", __func__);
3207 langwell_udc_remove(pdev);
3208 }
3209
3210 return retval;
3211}
3212
3213
3214/* device controller suspend */
3215static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
3216{
3217 struct langwell_udc *dev = the_controller;
3218 u32 devlc;
3219
3220 DBG(dev, "---> %s()\n", __func__);
3221
3222 /* disable interrupt and set controller to stop state */
3223 langwell_udc_stop(dev);
3224
3225 /* diable IRQ handler */
3226 if (dev->got_irq)
3227 free_irq(pdev->irq, dev);
3228 dev->got_irq = 0;
3229
3230
3231 /* save PCI state */
3232 pci_save_state(pdev);
3233
3234 /* set device power state */
3235 pci_set_power_state(pdev, PCI_D3hot);
3236
3237 /* enter PHY low power suspend */
3238 devlc = readl(&dev->op_regs->devlc);
3239 VDBG(dev, "devlc = 0x%08x\n", devlc);
3240 devlc |= LPM_PHCD;
3241 writel(devlc, &dev->op_regs->devlc);
3242
3243 DBG(dev, "<--- %s()\n", __func__);
3244 return 0;
3245}
3246
3247
3248/* device controller resume */
3249static int langwell_udc_resume(struct pci_dev *pdev)
3250{
3251 struct langwell_udc *dev = the_controller;
3252 u32 devlc;
3253
3254 DBG(dev, "---> %s()\n", __func__);
3255
3256 /* exit PHY low power suspend */
3257 devlc = readl(&dev->op_regs->devlc);
3258 VDBG(dev, "devlc = 0x%08x\n", devlc);
3259 devlc &= ~LPM_PHCD;
3260 writel(devlc, &dev->op_regs->devlc);
3261
3262 /* set device D0 power state */
3263 pci_set_power_state(pdev, PCI_D0);
3264
3265 /* restore PCI state */
3266 pci_restore_state(pdev);
3267
3268 /* enable IRQ handler */
3269 if (request_irq(pdev->irq, langwell_irq, IRQF_SHARED, driver_name, dev)
3270 != 0) {
3271 ERROR(dev, "request interrupt %d failed\n", pdev->irq);
3272 return -1;
3273 }
3274 dev->got_irq = 1;
3275
3276 /* reset and start controller to run state */
3277 if (dev->stopped) {
3278 /* reset device controller */
3279 langwell_udc_reset(dev);
3280
3281 /* reset ep0 dQH and endptctrl */
3282 ep0_reset(dev);
3283
3284 /* start device if gadget is loaded */
3285 if (dev->driver)
3286 langwell_udc_start(dev);
3287 }
3288
3289 /* reset USB status */
3290 dev->usb_state = USB_STATE_ATTACHED;
3291 dev->ep0_state = WAIT_FOR_SETUP;
3292 dev->ep0_dir = USB_DIR_OUT;
3293
3294 DBG(dev, "<--- %s()\n", __func__);
3295 return 0;
3296}
3297
3298
3299/* pci driver shutdown */
3300static void langwell_udc_shutdown(struct pci_dev *pdev)
3301{
3302 struct langwell_udc *dev = the_controller;
3303 u32 usbmode;
3304
3305 DBG(dev, "---> %s()\n", __func__);
3306
3307 /* reset controller mode to IDLE */
3308 usbmode = readl(&dev->op_regs->usbmode);
3309 DBG(dev, "usbmode = 0x%08x\n", usbmode);
3310 usbmode &= (~3 | MODE_IDLE);
3311 writel(usbmode, &dev->op_regs->usbmode);
3312
3313 DBG(dev, "<--- %s()\n", __func__);
3314}
3315
3316/*-------------------------------------------------------------------------*/
3317
3318static const struct pci_device_id pci_ids[] = { {
3319 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3320 .class_mask = ~0,
3321 .vendor = 0x8086,
3322 .device = 0x0811,
3323 .subvendor = PCI_ANY_ID,
3324 .subdevice = PCI_ANY_ID,
3325}, { /* end: all zeroes */ }
3326};
3327
3328
3329MODULE_DEVICE_TABLE(pci, pci_ids);
3330
3331
3332static struct pci_driver langwell_pci_driver = {
3333 .name = (char *) driver_name,
3334 .id_table = pci_ids,
3335
3336 .probe = langwell_udc_probe,
3337 .remove = langwell_udc_remove,
3338
3339 /* device controller suspend/resume */
3340 .suspend = langwell_udc_suspend,
3341 .resume = langwell_udc_resume,
3342
3343 .shutdown = langwell_udc_shutdown,
3344};
3345
3346
3347MODULE_DESCRIPTION(DRIVER_DESC);
3348MODULE_AUTHOR("Xiaochen Shen <xiaochen.shen@intel.com>");
3349MODULE_VERSION(DRIVER_VERSION);
3350MODULE_LICENSE("GPL");
3351
3352
3353static int __init init(void)
3354{
3355#ifdef OTG_TRANSCEIVER
3356 return langwell_register_peripheral(&langwell_pci_driver);
3357#else
3358 return pci_register_driver(&langwell_pci_driver);
3359#endif
3360}
3361module_init(init);
3362
3363
3364static void __exit cleanup(void)
3365{
3366#ifdef OTG_TRANSCEIVER
3367 return langwell_unregister_peripheral(&langwell_pci_driver);
3368#else
3369 pci_unregister_driver(&langwell_pci_driver);
3370#endif
3371}
3372module_exit(cleanup);
3373
diff --git a/drivers/usb/gadget/langwell_udc.h b/drivers/usb/gadget/langwell_udc.h
new file mode 100644
index 000000000000..9719934e1c08
--- /dev/null
+++ b/drivers/usb/gadget/langwell_udc.h
@@ -0,0 +1,228 @@
1/*
2 * Intel Langwell USB Device Controller driver
3 * Copyright (C) 2008-2009, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include <linux/usb/langwell_udc.h>
21
22#if defined(CONFIG_USB_LANGWELL_OTG)
23#include <linux/usb/langwell_otg.h>
24#endif
25
26
27/*-------------------------------------------------------------------------*/
28
29/* driver data structures and utilities */
30
31/*
32 * dTD: Device Endpoint Transfer Descriptor
33 * describe to the device controller the location and quantity of
34 * data to be send/received for given transfer
35 */
36struct langwell_dtd {
37 u32 dtd_next;
38/* bits 31:5, next transfer element pointer */
39#define DTD_NEXT(d) (((d)>>5)&0x7ffffff)
40#define DTD_NEXT_MASK (0x7ffffff << 5)
41/* terminate */
42#define DTD_TERM BIT(0)
43 /* bits 7:0, execution back states */
44 u32 dtd_status:8;
45#define DTD_STATUS(d) (((d)>>0)&0xff)
46#define DTD_STS_ACTIVE BIT(7) /* active */
47#define DTD_STS_HALTED BIT(6) /* halted */
48#define DTD_STS_DBE BIT(5) /* data buffer error */
49#define DTD_STS_TRE BIT(3) /* transaction error */
50 /* bits 9:8 */
51 u32 dtd_res0:2;
52 /* bits 11:10, multipier override */
53 u32 dtd_multo:2;
54#define DTD_MULTO (BIT(11) | BIT(10))
55 /* bits 14:12 */
56 u32 dtd_res1:3;
57 /* bit 15, interrupt on complete */
58 u32 dtd_ioc:1;
59#define DTD_IOC BIT(15)
60 /* bits 30:16, total bytes */
61 u32 dtd_total:15;
62#define DTD_TOTAL(d) (((d)>>16)&0x7fff)
63#define DTD_MAX_TRANSFER_LENGTH 0x4000
64 /* bit 31 */
65 u32 dtd_res2:1;
66 /* dTD buffer pointer page 0 to 4 */
67 u32 dtd_buf[5];
68#define DTD_OFFSET_MASK 0xfff
69/* bits 31:12, buffer pointer */
70#define DTD_BUFFER(d) (((d)>>12)&0x3ff)
71/* bits 11:0, current offset */
72#define DTD_C_OFFSET(d) (((d)>>0)&0xfff)
73/* bits 10:0, frame number */
74#define DTD_FRAME(d) (((d)>>0)&0x7ff)
75
76 /* driver-private parts */
77
78 /* dtd dma address */
79 dma_addr_t dtd_dma;
80 /* next dtd virtual address */
81 struct langwell_dtd *next_dtd_virt;
82};
83
84
85/*
86 * dQH: Device Endpoint Queue Head
87 * describe where all transfers are managed
88 * 48-byte data structure, aligned on 64-byte boundary
89 *
90 * These are associated with dTD structure
91 */
92struct langwell_dqh {
93 /* endpoint capabilities and characteristics */
94 u32 dqh_res0:15; /* bits 14:0 */
95 u32 dqh_ios:1; /* bit 15, interrupt on setup */
96#define DQH_IOS BIT(15)
97 u32 dqh_mpl:11; /* bits 26:16, maximum packet length */
98#define DQH_MPL (0x7ff << 16)
99 u32 dqh_res1:2; /* bits 28:27 */
100 u32 dqh_zlt:1; /* bit 29, zero length termination */
101#define DQH_ZLT BIT(29)
102 u32 dqh_mult:2; /* bits 31:30 */
103#define DQH_MULT (BIT(30) | BIT(31))
104
105 /* current dTD pointer */
106 u32 dqh_current; /* locate the transfer in progress */
107#define DQH_C_DTD(e) \
108 (((e)>>5)&0x7ffffff) /* bits 31:5, current dTD pointer */
109
110 /* transfer overlay, hardware parts of a struct langwell_dtd */
111 u32 dtd_next;
112 u32 dtd_status:8; /* bits 7:0, execution back states */
113 u32 dtd_res0:2; /* bits 9:8 */
114 u32 dtd_multo:2; /* bits 11:10, multipier override */
115 u32 dtd_res1:3; /* bits 14:12 */
116 u32 dtd_ioc:1; /* bit 15, interrupt on complete */
117 u32 dtd_total:15; /* bits 30:16, total bytes */
118 u32 dtd_res2:1; /* bit 31 */
119 u32 dtd_buf[5]; /* dTD buffer pointer page 0 to 4 */
120
121 u32 dqh_res2;
122 struct usb_ctrlrequest dqh_setup; /* setup packet buffer */
123} __attribute__ ((aligned(64)));
124
125
126/* endpoint data structure */
127struct langwell_ep {
128 struct usb_ep ep;
129 dma_addr_t dma;
130 struct langwell_udc *dev;
131 unsigned long irqs;
132 struct list_head queue;
133 struct langwell_dqh *dqh;
134 const struct usb_endpoint_descriptor *desc;
135 char name[14];
136 unsigned stopped:1,
137 ep_type:2,
138 ep_num:8;
139};
140
141
142/* request data structure */
143struct langwell_request {
144 struct usb_request req;
145 struct langwell_dtd *dtd, *head, *tail;
146 struct langwell_ep *ep;
147 dma_addr_t dtd_dma;
148 struct list_head queue;
149 unsigned dtd_count;
150 unsigned mapped:1;
151};
152
153
154/* ep0 transfer state */
155enum ep0_state {
156 WAIT_FOR_SETUP,
157 DATA_STATE_XMIT,
158 DATA_STATE_NEED_ZLP,
159 WAIT_FOR_OUT_STATUS,
160 DATA_STATE_RECV,
161};
162
163
164/* device suspend state */
165enum lpm_state {
166 LPM_L0, /* on */
167 LPM_L1, /* LPM L1 sleep */
168 LPM_L2, /* suspend */
169 LPM_L3, /* off */
170};
171
172
173/* device data structure */
174struct langwell_udc {
175 /* each pci device provides one gadget, several endpoints */
176 struct usb_gadget gadget;
177 spinlock_t lock; /* device lock */
178 struct langwell_ep *ep;
179 struct usb_gadget_driver *driver;
180 struct otg_transceiver *transceiver;
181 u8 dev_addr;
182 u32 usb_state;
183 u32 resume_state;
184 u32 bus_reset;
185 enum lpm_state lpm_state;
186 enum ep0_state ep0_state;
187 u32 ep0_dir;
188 u16 dciversion;
189 unsigned ep_max;
190 unsigned devcap:1,
191 enabled:1,
192 region:1,
193 got_irq:1,
194 powered:1,
195 remote_wakeup:1,
196 rate:1,
197 is_reset:1,
198 softconnected:1,
199 vbus_active:1,
200 suspended:1,
201 stopped:1,
202 lpm:1; /* LPM capability */
203
204 /* pci state used to access those endpoints */
205 struct pci_dev *pdev;
206
207 /* Langwell otg transceiver */
208 struct langwell_otg *lotg;
209
210 /* control registers */
211 struct langwell_cap_regs __iomem *cap_regs;
212 struct langwell_op_regs __iomem *op_regs;
213
214 struct usb_ctrlrequest local_setup_buff;
215 struct langwell_dqh *ep_dqh;
216 size_t ep_dqh_size;
217 dma_addr_t ep_dqh_dma;
218
219 /* ep0 status request */
220 struct langwell_request *status_req;
221
222 /* dma pool */
223 struct dma_pool *dtd_pool;
224
225 /* make sure release() is done */
226 struct completion *done;
227};
228
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 8cc676ecbb23..1937d8c7b433 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -38,7 +38,6 @@
38#include <linux/usb.h> 38#include <linux/usb.h>
39#include <linux/usb/ch9.h> 39#include <linux/usb/ch9.h>
40#include <linux/usb/gadget.h> 40#include <linux/usb/gadget.h>
41#include <mach/pxa2xx-regs.h> /* FIXME: for PSSR */
42#include <mach/udc.h> 41#include <mach/udc.h>
43 42
44#include "pxa27x_udc.h" 43#include "pxa27x_udc.h"
@@ -474,6 +473,23 @@ static inline void udc_clear_mask_UDCCR(struct pxa_udc *udc, int mask)
474} 473}
475 474
476/** 475/**
476 * ep_write_UDCCSR - set bits in UDCCSR
477 * @udc: udc device
478 * @mask: bits to set in UDCCR
479 *
480 * Sets bits in UDCCSR (UDCCSR0 and UDCCSR*).
481 *
482 * A specific case is applied to ep0 : the ACM bit is always set to 1, for
483 * SET_INTERFACE and SET_CONFIGURATION.
484 */
485static inline void ep_write_UDCCSR(struct pxa_ep *ep, int mask)
486{
487 if (is_ep0(ep))
488 mask |= UDCCSR0_ACM;
489 udc_ep_writel(ep, UDCCSR, mask);
490}
491
492/**
477 * ep_count_bytes_remain - get how many bytes in udc endpoint 493 * ep_count_bytes_remain - get how many bytes in udc endpoint
478 * @ep: udc endpoint 494 * @ep: udc endpoint
479 * 495 *
@@ -861,7 +877,7 @@ static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req)
861 *buf++ = udc_ep_readl(ep, UDCDR); 877 *buf++ = udc_ep_readl(ep, UDCDR);
862 req->req.actual += count; 878 req->req.actual += count;
863 879
864 udc_ep_writel(ep, UDCCSR, UDCCSR_PC); 880 ep_write_UDCCSR(ep, UDCCSR_PC);
865 881
866 return count; 882 return count;
867} 883}
@@ -969,12 +985,12 @@ static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
969 if (udccsr & UDCCSR_PC) { 985 if (udccsr & UDCCSR_PC) {
970 ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n", 986 ep_vdbg(ep, "Clearing Transmit Complete, udccsr=%x\n",
971 udccsr); 987 udccsr);
972 udc_ep_writel(ep, UDCCSR, UDCCSR_PC); 988 ep_write_UDCCSR(ep, UDCCSR_PC);
973 } 989 }
974 if (udccsr & UDCCSR_TRN) { 990 if (udccsr & UDCCSR_TRN) {
975 ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n", 991 ep_vdbg(ep, "Clearing Underrun on, udccsr=%x\n",
976 udccsr); 992 udccsr);
977 udc_ep_writel(ep, UDCCSR, UDCCSR_TRN); 993 ep_write_UDCCSR(ep, UDCCSR_TRN);
978 } 994 }
979 995
980 count = write_packet(ep, req, max); 996 count = write_packet(ep, req, max);
@@ -996,7 +1012,7 @@ static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
996 } 1012 }
997 1013
998 if (is_short) 1014 if (is_short)
999 udc_ep_writel(ep, UDCCSR, UDCCSR_SP); 1015 ep_write_UDCCSR(ep, UDCCSR_SP);
1000 1016
1001 /* requests complete when all IN data is in the FIFO */ 1017 /* requests complete when all IN data is in the FIFO */
1002 if (is_last) { 1018 if (is_last) {
@@ -1029,7 +1045,7 @@ static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
1029 1045
1030 while (epout_has_pkt(ep)) { 1046 while (epout_has_pkt(ep)) {
1031 count = read_packet(ep, req); 1047 count = read_packet(ep, req);
1032 udc_ep_writel(ep, UDCCSR, UDCCSR0_OPC); 1048 ep_write_UDCCSR(ep, UDCCSR0_OPC);
1033 inc_ep_stats_bytes(ep, count, !USB_DIR_IN); 1049 inc_ep_stats_bytes(ep, count, !USB_DIR_IN);
1034 1050
1035 is_short = (count < ep->fifo_size); 1051 is_short = (count < ep->fifo_size);
@@ -1074,7 +1090,7 @@ static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req)
1074 1090
1075 /* Sends either a short packet or a 0 length packet */ 1091 /* Sends either a short packet or a 0 length packet */
1076 if (unlikely(is_short)) 1092 if (unlikely(is_short))
1077 udc_ep_writel(ep, UDCCSR, UDCCSR0_IPR); 1093 ep_write_UDCCSR(ep, UDCCSR0_IPR);
1078 1094
1079 ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n", 1095 ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n",
1080 count, is_short ? "/S" : "", is_last ? "/L" : "", 1096 count, is_short ? "/S" : "", is_last ? "/L" : "",
@@ -1277,7 +1293,7 @@ static int pxa_ep_set_halt(struct usb_ep *_ep, int value)
1277 1293
1278 /* FST, FEF bits are the same for control and non control endpoints */ 1294 /* FST, FEF bits are the same for control and non control endpoints */
1279 rc = 0; 1295 rc = 0;
1280 udc_ep_writel(ep, UDCCSR, UDCCSR_FST | UDCCSR_FEF); 1296 ep_write_UDCCSR(ep, UDCCSR_FST | UDCCSR_FEF);
1281 if (is_ep0(ep)) 1297 if (is_ep0(ep))
1282 set_ep0state(ep->dev, STALL); 1298 set_ep0state(ep->dev, STALL);
1283 1299
@@ -1343,7 +1359,7 @@ static void pxa_ep_fifo_flush(struct usb_ep *_ep)
1343 udc_ep_readl(ep, UDCDR); 1359 udc_ep_readl(ep, UDCDR);
1344 } else { 1360 } else {
1345 /* most IN status is the same, but ISO can't stall */ 1361 /* most IN status is the same, but ISO can't stall */
1346 udc_ep_writel(ep, UDCCSR, 1362 ep_write_UDCCSR(ep,
1347 UDCCSR_PC | UDCCSR_FEF | UDCCSR_TRN 1363 UDCCSR_PC | UDCCSR_FEF | UDCCSR_TRN
1348 | (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST)); 1364 | (EPXFERTYPE_is_ISO(ep) ? 0 : UDCCSR_SST));
1349 } 1365 }
@@ -1728,6 +1744,7 @@ static void udc_enable(struct pxa_udc *udc)
1728 memset(&udc->stats, 0, sizeof(udc->stats)); 1744 memset(&udc->stats, 0, sizeof(udc->stats));
1729 1745
1730 udc_set_mask_UDCCR(udc, UDCCR_UDE); 1746 udc_set_mask_UDCCR(udc, UDCCR_UDE);
1747 ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_ACM);
1731 udelay(2); 1748 udelay(2);
1732 if (udc_readl(udc, UDCCR) & UDCCR_EMCE) 1749 if (udc_readl(udc, UDCCR) & UDCCR_EMCE)
1733 dev_err(udc->dev, "Configuration errors, udc disabled\n"); 1750 dev_err(udc->dev, "Configuration errors, udc disabled\n");
@@ -1893,6 +1910,15 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc,
1893 1910
1894 nuke(ep, -EPROTO); 1911 nuke(ep, -EPROTO);
1895 1912
1913 /*
1914 * In the PXA320 manual, in the section about Back-to-Back setup
1915 * packets, it describes this situation. The solution is to set OPC to
1916 * get rid of the status packet, and then continue with the setup
1917 * packet. Generalize to pxa27x CPUs.
1918 */
1919 if (epout_has_pkt(ep) && (ep_count_bytes_remain(ep) == 0))
1920 ep_write_UDCCSR(ep, UDCCSR0_OPC);
1921
1896 /* read SETUP packet */ 1922 /* read SETUP packet */
1897 for (i = 0; i < 2; i++) { 1923 for (i = 0; i < 2; i++) {
1898 if (unlikely(ep_is_empty(ep))) 1924 if (unlikely(ep_is_empty(ep)))
@@ -1919,7 +1945,7 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc,
1919 set_ep0state(udc, OUT_DATA_STAGE); 1945 set_ep0state(udc, OUT_DATA_STAGE);
1920 1946
1921 /* Tell UDC to enter Data Stage */ 1947 /* Tell UDC to enter Data Stage */
1922 udc_ep_writel(ep, UDCCSR, UDCCSR0_SA | UDCCSR0_OPC); 1948 ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
1923 1949
1924 i = udc->driver->setup(&udc->gadget, &u.r); 1950 i = udc->driver->setup(&udc->gadget, &u.r);
1925 if (i < 0) 1951 if (i < 0)
@@ -1929,7 +1955,7 @@ out:
1929stall: 1955stall:
1930 ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n", 1956 ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
1931 udc_ep_readl(ep, UDCCSR), i); 1957 udc_ep_readl(ep, UDCCSR), i);
1932 udc_ep_writel(ep, UDCCSR, UDCCSR0_FST | UDCCSR0_FTF); 1958 ep_write_UDCCSR(ep, UDCCSR0_FST | UDCCSR0_FTF);
1933 set_ep0state(udc, STALL); 1959 set_ep0state(udc, STALL);
1934 goto out; 1960 goto out;
1935} 1961}
@@ -1966,6 +1992,8 @@ stall:
1966 * cleared by software. 1992 * cleared by software.
1967 * - clearing UDCCSR0_OPC always flushes ep0. If in setup stage, never do it 1993 * - clearing UDCCSR0_OPC always flushes ep0. If in setup stage, never do it
1968 * before reading ep0. 1994 * before reading ep0.
1995 * This is true only for PXA27x. This is not true anymore for PXA3xx family
1996 * (check Back-to-Back setup packet in developers guide).
1969 * - irq can be called on a "packet complete" event (opc_irq=1), while 1997 * - irq can be called on a "packet complete" event (opc_irq=1), while
1970 * UDCCSR0_OPC is not yet raised (delta can be as big as 100ms 1998 * UDCCSR0_OPC is not yet raised (delta can be as big as 100ms
1971 * from experimentation). 1999 * from experimentation).
@@ -1998,7 +2026,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
1998 if (udccsr0 & UDCCSR0_SST) { 2026 if (udccsr0 & UDCCSR0_SST) {
1999 ep_dbg(ep, "clearing stall status\n"); 2027 ep_dbg(ep, "clearing stall status\n");
2000 nuke(ep, -EPIPE); 2028 nuke(ep, -EPIPE);
2001 udc_ep_writel(ep, UDCCSR, UDCCSR0_SST); 2029 ep_write_UDCCSR(ep, UDCCSR0_SST);
2002 ep0_idle(udc); 2030 ep0_idle(udc);
2003 } 2031 }
2004 2032
@@ -2023,7 +2051,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
2023 break; 2051 break;
2024 case IN_DATA_STAGE: /* GET_DESCRIPTOR */ 2052 case IN_DATA_STAGE: /* GET_DESCRIPTOR */
2025 if (epout_has_pkt(ep)) 2053 if (epout_has_pkt(ep))
2026 udc_ep_writel(ep, UDCCSR, UDCCSR0_OPC); 2054 ep_write_UDCCSR(ep, UDCCSR0_OPC);
2027 if (req && !ep_is_full(ep)) 2055 if (req && !ep_is_full(ep))
2028 completed = write_ep0_fifo(ep, req); 2056 completed = write_ep0_fifo(ep, req);
2029 if (completed) 2057 if (completed)
@@ -2036,7 +2064,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
2036 ep0_end_out_req(ep, req); 2064 ep0_end_out_req(ep, req);
2037 break; 2065 break;
2038 case STALL: 2066 case STALL:
2039 udc_ep_writel(ep, UDCCSR, UDCCSR0_FST); 2067 ep_write_UDCCSR(ep, UDCCSR0_FST);
2040 break; 2068 break;
2041 case IN_STATUS_STAGE: 2069 case IN_STATUS_STAGE:
2042 /* 2070 /*
@@ -2131,6 +2159,7 @@ static void pxa27x_change_configuration(struct pxa_udc *udc, int config)
2131 2159
2132 set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF); 2160 set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
2133 udc->driver->setup(&udc->gadget, &req); 2161 udc->driver->setup(&udc->gadget, &req);
2162 ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
2134} 2163}
2135 2164
2136/** 2165/**
@@ -2159,6 +2188,7 @@ static void pxa27x_change_interface(struct pxa_udc *udc, int iface, int alt)
2159 2188
2160 set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF); 2189 set_ep0state(udc, WAIT_ACK_SET_CONF_INTERF);
2161 udc->driver->setup(&udc->gadget, &req); 2190 udc->driver->setup(&udc->gadget, &req);
2191 ep_write_UDCCSR(&udc->pxa_ep[0], UDCCSR0_AREN);
2162} 2192}
2163 2193
2164/* 2194/*
@@ -2280,7 +2310,7 @@ static void irq_udc_reset(struct pxa_udc *udc)
2280 memset(&udc->stats, 0, sizeof udc->stats); 2310 memset(&udc->stats, 0, sizeof udc->stats);
2281 2311
2282 nuke(ep, -EPROTO); 2312 nuke(ep, -EPROTO);
2283 udc_ep_writel(ep, UDCCSR, UDCCSR0_FTF | UDCCSR0_OPC); 2313 ep_write_UDCCSR(ep, UDCCSR0_FTF | UDCCSR0_OPC);
2284 ep0_idle(udc); 2314 ep0_idle(udc);
2285} 2315}
2286 2316
@@ -2479,6 +2509,12 @@ static void pxa_udc_shutdown(struct platform_device *_dev)
2479 udc_disable(udc); 2509 udc_disable(udc);
2480} 2510}
2481 2511
2512#ifdef CONFIG_CPU_PXA27x
2513extern void pxa27x_clear_otgph(void);
2514#else
2515#define pxa27x_clear_otgph() do {} while (0)
2516#endif
2517
2482#ifdef CONFIG_PM 2518#ifdef CONFIG_PM
2483/** 2519/**
2484 * pxa_udc_suspend - Suspend udc device 2520 * pxa_udc_suspend - Suspend udc device
@@ -2546,8 +2582,7 @@ static int pxa_udc_resume(struct platform_device *_dev)
2546 * Software must configure the USB OTG pad, UDC, and UHC 2582 * Software must configure the USB OTG pad, UDC, and UHC
2547 * to the state they were in before entering sleep mode. 2583 * to the state they were in before entering sleep mode.
2548 */ 2584 */
2549 if (cpu_is_pxa27x()) 2585 pxa27x_clear_otgph();
2550 PSSR |= PSSR_OTGPH;
2551 2586
2552 return 0; 2587 return 0;
2553} 2588}
@@ -2571,7 +2606,7 @@ static struct platform_driver udc_driver = {
2571 2606
2572static int __init udc_init(void) 2607static int __init udc_init(void)
2573{ 2608{
2574 if (!cpu_is_pxa27x()) 2609 if (!cpu_is_pxa27x() && !cpu_is_pxa3xx())
2575 return -ENODEV; 2610 return -ENODEV;
2576 2611
2577 printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION); 2612 printk(KERN_INFO "%s: version %s\n", driver_name, DRIVER_VERSION);
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index db58125331da..e25225e26586 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -130,6 +130,8 @@
130#define UP2OCR_HXOE (1 << 17) /* Transceiver Output Enable */ 130#define UP2OCR_HXOE (1 << 17) /* Transceiver Output Enable */
131#define UP2OCR_SEOS (1 << 24) /* Single-Ended Output Select */ 131#define UP2OCR_SEOS (1 << 24) /* Single-Ended Output Select */
132 132
133#define UDCCSR0_ACM (1 << 9) /* Ack Control Mode */
134#define UDCCSR0_AREN (1 << 8) /* Ack Response Enable */
133#define UDCCSR0_SA (1 << 7) /* Setup Active */ 135#define UDCCSR0_SA (1 << 7) /* Setup Active */
134#define UDCCSR0_RNE (1 << 6) /* Receive FIFO Not Empty */ 136#define UDCCSR0_RNE (1 << 6) /* Receive FIFO Not Empty */
135#define UDCCSR0_FST (1 << 5) /* Force Stall */ 137#define UDCCSR0_FST (1 << 5) /* Force Stall */
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
new file mode 100644
index 000000000000..50c71aae2cc2
--- /dev/null
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -0,0 +1,3269 @@
1/* linux/drivers/usb/gadget/s3c-hsotg.c
2 *
3 * Copyright 2008 Openmoko, Inc.
4 * Copyright 2008 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * S3C USB2.0 High-speed / OtG driver
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/spinlock.h>
18#include <linux/interrupt.h>
19#include <linux/platform_device.h>
20#include <linux/dma-mapping.h>
21#include <linux/debugfs.h>
22#include <linux/seq_file.h>
23#include <linux/delay.h>
24#include <linux/io.h>
25
26#include <linux/usb/ch9.h>
27#include <linux/usb/gadget.h>
28
29#include <mach/map.h>
30
31#include <plat/regs-usb-hsotg-phy.h>
32#include <plat/regs-usb-hsotg.h>
33#include <plat/regs-sys.h>
34#include <plat/udc-hs.h>
35
36#define DMA_ADDR_INVALID (~((dma_addr_t)0))
37
38/* EP0_MPS_LIMIT
39 *
40 * Unfortunately there seems to be a limit of the amount of data that can
41 * be transfered by IN transactions on EP0. This is either 127 bytes or 3
42 * packets (which practially means 1 packet and 63 bytes of data) when the
43 * MPS is set to 64.
44 *
45 * This means if we are wanting to move >127 bytes of data, we need to
46 * split the transactions up, but just doing one packet at a time does
47 * not work (this may be an implicit DATA0 PID on first packet of the
48 * transaction) and doing 2 packets is outside the controller's limits.
49 *
50 * If we try to lower the MPS size for EP0, then no transfers work properly
51 * for EP0, and the system will fail basic enumeration. As no cause for this
52 * has currently been found, we cannot support any large IN transfers for
53 * EP0.
54 */
55#define EP0_MPS_LIMIT 64
56
57struct s3c_hsotg;
58struct s3c_hsotg_req;
59
60/**
61 * struct s3c_hsotg_ep - driver endpoint definition.
62 * @ep: The gadget layer representation of the endpoint.
63 * @name: The driver generated name for the endpoint.
64 * @queue: Queue of requests for this endpoint.
65 * @parent: Reference back to the parent device structure.
66 * @req: The current request that the endpoint is processing. This is
67 * used to indicate an request has been loaded onto the endpoint
68 * and has yet to be completed (maybe due to data move, or simply
69 * awaiting an ack from the core all the data has been completed).
70 * @debugfs: File entry for debugfs file for this endpoint.
71 * @lock: State lock to protect contents of endpoint.
72 * @dir_in: Set to true if this endpoint is of the IN direction, which
73 * means that it is sending data to the Host.
74 * @index: The index for the endpoint registers.
75 * @name: The name array passed to the USB core.
76 * @halted: Set if the endpoint has been halted.
77 * @periodic: Set if this is a periodic ep, such as Interrupt
78 * @sent_zlp: Set if we've sent a zero-length packet.
79 * @total_data: The total number of data bytes done.
80 * @fifo_size: The size of the FIFO (for periodic IN endpoints)
81 * @fifo_load: The amount of data loaded into the FIFO (periodic IN)
82 * @last_load: The offset of data for the last start of request.
83 * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN
84 *
85 * This is the driver's state for each registered enpoint, allowing it
86 * to keep track of transactions that need doing. Each endpoint has a
87 * lock to protect the state, to try and avoid using an overall lock
88 * for the host controller as much as possible.
89 *
90 * For periodic IN endpoints, we have fifo_size and fifo_load to try
91 * and keep track of the amount of data in the periodic FIFO for each
92 * of these as we don't have a status register that tells us how much
93 * is in each of them.
94 */
95struct s3c_hsotg_ep {
96 struct usb_ep ep;
97 struct list_head queue;
98 struct s3c_hsotg *parent;
99 struct s3c_hsotg_req *req;
100 struct dentry *debugfs;
101
102 spinlock_t lock;
103
104 unsigned long total_data;
105 unsigned int size_loaded;
106 unsigned int last_load;
107 unsigned int fifo_load;
108 unsigned short fifo_size;
109
110 unsigned char dir_in;
111 unsigned char index;
112
113 unsigned int halted:1;
114 unsigned int periodic:1;
115 unsigned int sent_zlp:1;
116
117 char name[10];
118};
119
120#define S3C_HSOTG_EPS (8+1) /* limit to 9 for the moment */
121
122/**
123 * struct s3c_hsotg - driver state.
124 * @dev: The parent device supplied to the probe function
125 * @driver: USB gadget driver
126 * @plat: The platform specific configuration data.
127 * @regs: The memory area mapped for accessing registers.
128 * @regs_res: The resource that was allocated when claiming register space.
129 * @irq: The IRQ number we are using
130 * @debug_root: root directrory for debugfs.
131 * @debug_file: main status file for debugfs.
132 * @debug_fifo: FIFO status file for debugfs.
133 * @ep0_reply: Request used for ep0 reply.
134 * @ep0_buff: Buffer for EP0 reply data, if needed.
135 * @ctrl_buff: Buffer for EP0 control requests.
136 * @ctrl_req: Request for EP0 control packets.
137 * @eps: The endpoints being supplied to the gadget framework
138 */
139struct s3c_hsotg {
140 struct device *dev;
141 struct usb_gadget_driver *driver;
142 struct s3c_hsotg_plat *plat;
143
144 void __iomem *regs;
145 struct resource *regs_res;
146 int irq;
147
148 struct dentry *debug_root;
149 struct dentry *debug_file;
150 struct dentry *debug_fifo;
151
152 struct usb_request *ep0_reply;
153 struct usb_request *ctrl_req;
154 u8 ep0_buff[8];
155 u8 ctrl_buff[8];
156
157 struct usb_gadget gadget;
158 struct s3c_hsotg_ep eps[];
159};
160
161/**
162 * struct s3c_hsotg_req - data transfer request
163 * @req: The USB gadget request
164 * @queue: The list of requests for the endpoint this is queued for.
165 * @in_progress: Has already had size/packets written to core
166 * @mapped: DMA buffer for this request has been mapped via dma_map_single().
167 */
168struct s3c_hsotg_req {
169 struct usb_request req;
170 struct list_head queue;
171 unsigned char in_progress;
172 unsigned char mapped;
173};
174
175/* conversion functions */
176static inline struct s3c_hsotg_req *our_req(struct usb_request *req)
177{
178 return container_of(req, struct s3c_hsotg_req, req);
179}
180
181static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep)
182{
183 return container_of(ep, struct s3c_hsotg_ep, ep);
184}
185
186static inline struct s3c_hsotg *to_hsotg(struct usb_gadget *gadget)
187{
188 return container_of(gadget, struct s3c_hsotg, gadget);
189}
190
191static inline void __orr32(void __iomem *ptr, u32 val)
192{
193 writel(readl(ptr) | val, ptr);
194}
195
196static inline void __bic32(void __iomem *ptr, u32 val)
197{
198 writel(readl(ptr) & ~val, ptr);
199}
200
201/* forward decleration of functions */
202static void s3c_hsotg_dump(struct s3c_hsotg *hsotg);
203
204/**
205 * using_dma - return the DMA status of the driver.
206 * @hsotg: The driver state.
207 *
208 * Return true if we're using DMA.
209 *
210 * Currently, we have the DMA support code worked into everywhere
211 * that needs it, but the AMBA DMA implementation in the hardware can
212 * only DMA from 32bit aligned addresses. This means that gadgets such
213 * as the CDC Ethernet cannot work as they often pass packets which are
214 * not 32bit aligned.
215 *
216 * Unfortunately the choice to use DMA or not is global to the controller
217 * and seems to be only settable when the controller is being put through
218 * a core reset. This means we either need to fix the gadgets to take
219 * account of DMA alignment, or add bounce buffers (yuerk).
220 *
221 * Until this issue is sorted out, we always return 'false'.
222 */
223static inline bool using_dma(struct s3c_hsotg *hsotg)
224{
225 return false; /* support is not complete */
226}
227
228/**
229 * s3c_hsotg_en_gsint - enable one or more of the general interrupt
230 * @hsotg: The device state
231 * @ints: A bitmask of the interrupts to enable
232 */
233static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints)
234{
235 u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK);
236 u32 new_gsintmsk;
237
238 new_gsintmsk = gsintmsk | ints;
239
240 if (new_gsintmsk != gsintmsk) {
241 dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
242 writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK);
243 }
244}
245
246/**
247 * s3c_hsotg_disable_gsint - disable one or more of the general interrupt
248 * @hsotg: The device state
249 * @ints: A bitmask of the interrupts to enable
250 */
251static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints)
252{
253 u32 gsintmsk = readl(hsotg->regs + S3C_GINTMSK);
254 u32 new_gsintmsk;
255
256 new_gsintmsk = gsintmsk & ~ints;
257
258 if (new_gsintmsk != gsintmsk)
259 writel(new_gsintmsk, hsotg->regs + S3C_GINTMSK);
260}
261
262/**
263 * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq
264 * @hsotg: The device state
265 * @ep: The endpoint index
266 * @dir_in: True if direction is in.
267 * @en: The enable value, true to enable
268 *
269 * Set or clear the mask for an individual endpoint's interrupt
270 * request.
271 */
272static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg,
273 unsigned int ep, unsigned int dir_in,
274 unsigned int en)
275{
276 unsigned long flags;
277 u32 bit = 1 << ep;
278 u32 daint;
279
280 if (!dir_in)
281 bit <<= 16;
282
283 local_irq_save(flags);
284 daint = readl(hsotg->regs + S3C_DAINTMSK);
285 if (en)
286 daint |= bit;
287 else
288 daint &= ~bit;
289 writel(daint, hsotg->regs + S3C_DAINTMSK);
290 local_irq_restore(flags);
291}
292
293/**
294 * s3c_hsotg_init_fifo - initialise non-periodic FIFOs
295 * @hsotg: The device instance.
296 */
297static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
298{
299 /* the ryu 2.6.24 release ahs
300 writel(0x1C0, hsotg->regs + S3C_GRXFSIZ);
301 writel(S3C_GNPTXFSIZ_NPTxFStAddr(0x200) |
302 S3C_GNPTXFSIZ_NPTxFDep(0x1C0),
303 hsotg->regs + S3C_GNPTXFSIZ);
304 */
305
306 /* set FIFO sizes to 2048/0x1C0 */
307
308 writel(2048, hsotg->regs + S3C_GRXFSIZ);
309 writel(S3C_GNPTXFSIZ_NPTxFStAddr(2048) |
310 S3C_GNPTXFSIZ_NPTxFDep(0x1C0),
311 hsotg->regs + S3C_GNPTXFSIZ);
312}
313
314/**
315 * @ep: USB endpoint to allocate request for.
316 * @flags: Allocation flags
317 *
318 * Allocate a new USB request structure appropriate for the specified endpoint
319 */
320struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep, gfp_t flags)
321{
322 struct s3c_hsotg_req *req;
323
324 req = kzalloc(sizeof(struct s3c_hsotg_req), flags);
325 if (!req)
326 return NULL;
327
328 INIT_LIST_HEAD(&req->queue);
329
330 req->req.dma = DMA_ADDR_INVALID;
331 return &req->req;
332}
333
334/**
335 * is_ep_periodic - return true if the endpoint is in periodic mode.
336 * @hs_ep: The endpoint to query.
337 *
338 * Returns true if the endpoint is in periodic mode, meaning it is being
339 * used for an Interrupt or ISO transfer.
340 */
341static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep)
342{
343 return hs_ep->periodic;
344}
345
346/**
347 * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request
348 * @hsotg: The device state.
349 * @hs_ep: The endpoint for the request
350 * @hs_req: The request being processed.
351 *
352 * This is the reverse of s3c_hsotg_map_dma(), called for the completion
353 * of a request to ensure the buffer is ready for access by the caller.
354*/
355static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
356 struct s3c_hsotg_ep *hs_ep,
357 struct s3c_hsotg_req *hs_req)
358{
359 struct usb_request *req = &hs_req->req;
360 enum dma_data_direction dir;
361
362 dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
363
364 /* ignore this if we're not moving any data */
365 if (hs_req->req.length == 0)
366 return;
367
368 if (hs_req->mapped) {
369 /* we mapped this, so unmap and remove the dma */
370
371 dma_unmap_single(hsotg->dev, req->dma, req->length, dir);
372
373 req->dma = DMA_ADDR_INVALID;
374 hs_req->mapped = 0;
375 } else {
376 dma_sync_single(hsotg->dev, req->dma, req->length, dir);
377 }
378}
379
380/**
381 * s3c_hsotg_write_fifo - write packet Data to the TxFIFO
382 * @hsotg: The controller state.
383 * @hs_ep: The endpoint we're going to write for.
384 * @hs_req: The request to write data for.
385 *
386 * This is called when the TxFIFO has some space in it to hold a new
387 * transmission and we have something to give it. The actual setup of
388 * the data size is done elsewhere, so all we have to do is to actually
389 * write the data.
390 *
391 * The return value is zero if there is more space (or nothing was done)
392 * otherwise -ENOSPC is returned if the FIFO space was used up.
393 *
394 * This routine is only needed for PIO
395*/
396static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
397 struct s3c_hsotg_ep *hs_ep,
398 struct s3c_hsotg_req *hs_req)
399{
400 bool periodic = is_ep_periodic(hs_ep);
401 u32 gnptxsts = readl(hsotg->regs + S3C_GNPTXSTS);
402 int buf_pos = hs_req->req.actual;
403 int to_write = hs_ep->size_loaded;
404 void *data;
405 int can_write;
406 int pkt_round;
407
408 to_write -= (buf_pos - hs_ep->last_load);
409
410 /* if there's nothing to write, get out early */
411 if (to_write == 0)
412 return 0;
413
414 if (periodic) {
415 u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index));
416 int size_left;
417 int size_done;
418
419 /* work out how much data was loaded so we can calculate
420 * how much data is left in the fifo. */
421
422 size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
423
424 dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
425 __func__, size_left,
426 hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
427
428 /* how much of the data has moved */
429 size_done = hs_ep->size_loaded - size_left;
430
431 /* how much data is left in the fifo */
432 can_write = hs_ep->fifo_load - size_done;
433 dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
434 __func__, can_write);
435
436 can_write = hs_ep->fifo_size - can_write;
437 dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
438 __func__, can_write);
439
440 if (can_write <= 0) {
441 s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
442 return -ENOSPC;
443 }
444 } else {
445 if (S3C_GNPTXSTS_NPTxQSpcAvail_GET(gnptxsts) == 0) {
446 dev_dbg(hsotg->dev,
447 "%s: no queue slots available (0x%08x)\n",
448 __func__, gnptxsts);
449
450 s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_NPTxFEmp);
451 return -ENOSPC;
452 }
453
454 can_write = S3C_GNPTXSTS_NPTxFSpcAvail_GET(gnptxsts);
455 }
456
457 dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, mps %d\n",
458 __func__, gnptxsts, can_write, to_write, hs_ep->ep.maxpacket);
459
460 /* limit to 512 bytes of data, it seems at least on the non-periodic
461 * FIFO, requests of >512 cause the endpoint to get stuck with a
462 * fragment of the end of the transfer in it.
463 */
464 if (can_write > 512)
465 can_write = 512;
466
467 /* see if we can write data */
468
469 if (to_write > can_write) {
470 to_write = can_write;
471 pkt_round = to_write % hs_ep->ep.maxpacket;
472
473 /* Not sure, but we probably shouldn't be writing partial
474 * packets into the FIFO, so round the write down to an
475 * exact number of packets.
476 *
477 * Note, we do not currently check to see if we can ever
478 * write a full packet or not to the FIFO.
479 */
480
481 if (pkt_round)
482 to_write -= pkt_round;
483
484 /* enable correct FIFO interrupt to alert us when there
485 * is more room left. */
486
487 s3c_hsotg_en_gsint(hsotg,
488 periodic ? S3C_GINTSTS_PTxFEmp :
489 S3C_GINTSTS_NPTxFEmp);
490 }
491
492 dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
493 to_write, hs_req->req.length, can_write, buf_pos);
494
495 if (to_write <= 0)
496 return -ENOSPC;
497
498 hs_req->req.actual = buf_pos + to_write;
499 hs_ep->total_data += to_write;
500
501 if (periodic)
502 hs_ep->fifo_load += to_write;
503
504 to_write = DIV_ROUND_UP(to_write, 4);
505 data = hs_req->req.buf + buf_pos;
506
507 writesl(hsotg->regs + S3C_EPFIFO(hs_ep->index), data, to_write);
508
509 return (to_write >= can_write) ? -ENOSPC : 0;
510}
511
512/**
513 * get_ep_limit - get the maximum data legnth for this endpoint
514 * @hs_ep: The endpoint
515 *
516 * Return the maximum data that can be queued in one go on a given endpoint
517 * so that transfers that are too long can be split.
518 */
519static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep)
520{
521 int index = hs_ep->index;
522 unsigned maxsize;
523 unsigned maxpkt;
524
525 if (index != 0) {
526 maxsize = S3C_DxEPTSIZ_XferSize_LIMIT + 1;
527 maxpkt = S3C_DxEPTSIZ_PktCnt_LIMIT + 1;
528 } else {
529 if (hs_ep->dir_in) {
530 /* maxsize = S3C_DIEPTSIZ0_XferSize_LIMIT + 1; */
531 maxsize = 64+64+1;
532 maxpkt = S3C_DIEPTSIZ0_PktCnt_LIMIT + 1;
533 } else {
534 maxsize = 0x3f;
535 maxpkt = 2;
536 }
537 }
538
539 /* we made the constant loading easier above by using +1 */
540 maxpkt--;
541 maxsize--;
542
543 /* constrain by packet count if maxpkts*pktsize is greater
544 * than the length register size. */
545
546 if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
547 maxsize = maxpkt * hs_ep->ep.maxpacket;
548
549 return maxsize;
550}
551
552/**
553 * s3c_hsotg_start_req - start a USB request from an endpoint's queue
554 * @hsotg: The controller state.
555 * @hs_ep: The endpoint to process a request for
556 * @hs_req: The request to start.
557 * @continuing: True if we are doing more for the current request.
558 *
559 * Start the given request running by setting the endpoint registers
560 * appropriately, and writing any data to the FIFOs.
561 */
562static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
563 struct s3c_hsotg_ep *hs_ep,
564 struct s3c_hsotg_req *hs_req,
565 bool continuing)
566{
567 struct usb_request *ureq = &hs_req->req;
568 int index = hs_ep->index;
569 int dir_in = hs_ep->dir_in;
570 u32 epctrl_reg;
571 u32 epsize_reg;
572 u32 epsize;
573 u32 ctrl;
574 unsigned length;
575 unsigned packets;
576 unsigned maxreq;
577
578 if (index != 0) {
579 if (hs_ep->req && !continuing) {
580 dev_err(hsotg->dev, "%s: active request\n", __func__);
581 WARN_ON(1);
582 return;
583 } else if (hs_ep->req != hs_req && continuing) {
584 dev_err(hsotg->dev,
585 "%s: continue different req\n", __func__);
586 WARN_ON(1);
587 return;
588 }
589 }
590
591 epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
592 epsize_reg = dir_in ? S3C_DIEPTSIZ(index) : S3C_DOEPTSIZ(index);
593
594 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
595 __func__, readl(hsotg->regs + epctrl_reg), index,
596 hs_ep->dir_in ? "in" : "out");
597
598 length = ureq->length - ureq->actual;
599
600 if (0)
601 dev_dbg(hsotg->dev,
602 "REQ buf %p len %d dma 0x%08x noi=%d zp=%d snok=%d\n",
603 ureq->buf, length, ureq->dma,
604 ureq->no_interrupt, ureq->zero, ureq->short_not_ok);
605
606 maxreq = get_ep_limit(hs_ep);
607 if (length > maxreq) {
608 int round = maxreq % hs_ep->ep.maxpacket;
609
610 dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
611 __func__, length, maxreq, round);
612
613 /* round down to multiple of packets */
614 if (round)
615 maxreq -= round;
616
617 length = maxreq;
618 }
619
620 if (length)
621 packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
622 else
623 packets = 1; /* send one packet if length is zero. */
624
625 if (dir_in && index != 0)
626 epsize = S3C_DxEPTSIZ_MC(1);
627 else
628 epsize = 0;
629
630 if (index != 0 && ureq->zero) {
631 /* test for the packets being exactly right for the
632 * transfer */
633
634 if (length == (packets * hs_ep->ep.maxpacket))
635 packets++;
636 }
637
638 epsize |= S3C_DxEPTSIZ_PktCnt(packets);
639 epsize |= S3C_DxEPTSIZ_XferSize(length);
640
641 dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
642 __func__, packets, length, ureq->length, epsize, epsize_reg);
643
644 /* store the request as the current one we're doing */
645 hs_ep->req = hs_req;
646
647 /* write size / packets */
648 writel(epsize, hsotg->regs + epsize_reg);
649
650 ctrl = readl(hsotg->regs + epctrl_reg);
651
652 if (ctrl & S3C_DxEPCTL_Stall) {
653 dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
654
655 /* not sure what we can do here, if it is EP0 then we should
656 * get this cleared once the endpoint has transmitted the
657 * STALL packet, otherwise it needs to be cleared by the
658 * host.
659 */
660 }
661
662 if (using_dma(hsotg)) {
663 unsigned int dma_reg;
664
665 /* write DMA address to control register, buffer already
666 * synced by s3c_hsotg_ep_queue(). */
667
668 dma_reg = dir_in ? S3C_DIEPDMA(index) : S3C_DOEPDMA(index);
669 writel(ureq->dma, hsotg->regs + dma_reg);
670
671 dev_dbg(hsotg->dev, "%s: 0x%08x => 0x%08x\n",
672 __func__, ureq->dma, dma_reg);
673 }
674
675 ctrl |= S3C_DxEPCTL_EPEna; /* ensure ep enabled */
676 ctrl |= S3C_DxEPCTL_USBActEp;
677 ctrl |= S3C_DxEPCTL_CNAK; /* clear NAK set by core */
678
679 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
680 writel(ctrl, hsotg->regs + epctrl_reg);
681
682 /* set these, it seems that DMA support increments past the end
683 * of the packet buffer so we need to calculate the length from
684 * this information. */
685 hs_ep->size_loaded = length;
686 hs_ep->last_load = ureq->actual;
687
688 if (dir_in && !using_dma(hsotg)) {
689 /* set these anyway, we may need them for non-periodic in */
690 hs_ep->fifo_load = 0;
691
692 s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
693 }
694
695 /* clear the INTknTXFEmpMsk when we start request, more as a aide
696 * to debugging to see what is going on. */
697 if (dir_in)
698 writel(S3C_DIEPMSK_INTknTXFEmpMsk,
699 hsotg->regs + S3C_DIEPINT(index));
700
701 /* Note, trying to clear the NAK here causes problems with transmit
702 * on the S3C6400 ending up with the TXFIFO becomming full. */
703
704 /* check ep is enabled */
705 if (!(readl(hsotg->regs + epctrl_reg) & S3C_DxEPCTL_EPEna))
706 dev_warn(hsotg->dev,
707 "ep%d: failed to become enabled (DxEPCTL=0x%08x)?\n",
708 index, readl(hsotg->regs + epctrl_reg));
709
710 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n",
711 __func__, readl(hsotg->regs + epctrl_reg));
712}
713
714/**
715 * s3c_hsotg_map_dma - map the DMA memory being used for the request
716 * @hsotg: The device state.
717 * @hs_ep: The endpoint the request is on.
718 * @req: The request being processed.
719 *
720 * We've been asked to queue a request, so ensure that the memory buffer
721 * is correctly setup for DMA. If we've been passed an extant DMA address
722 * then ensure the buffer has been synced to memory. If our buffer has no
723 * DMA memory, then we map the memory and mark our request to allow us to
724 * cleanup on completion.
725*/
726static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,
727 struct s3c_hsotg_ep *hs_ep,
728 struct usb_request *req)
729{
730 enum dma_data_direction dir;
731 struct s3c_hsotg_req *hs_req = our_req(req);
732
733 dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
734
735 /* if the length is zero, ignore the DMA data */
736 if (hs_req->req.length == 0)
737 return 0;
738
739 if (req->dma == DMA_ADDR_INVALID) {
740 dma_addr_t dma;
741
742 dma = dma_map_single(hsotg->dev, req->buf, req->length, dir);
743
744 if (unlikely(dma_mapping_error(hsotg->dev, dma)))
745 goto dma_error;
746
747 if (dma & 3) {
748 dev_err(hsotg->dev, "%s: unaligned dma buffer\n",
749 __func__);
750
751 dma_unmap_single(hsotg->dev, dma, req->length, dir);
752 return -EINVAL;
753 }
754
755 hs_req->mapped = 1;
756 req->dma = dma;
757 } else {
758 dma_sync_single(hsotg->dev, req->dma, req->length, dir);
759 hs_req->mapped = 0;
760 }
761
762 return 0;
763
764dma_error:
765 dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
766 __func__, req->buf, req->length);
767
768 return -EIO;
769}
770
771static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
772 gfp_t gfp_flags)
773{
774 struct s3c_hsotg_req *hs_req = our_req(req);
775 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
776 struct s3c_hsotg *hs = hs_ep->parent;
777 unsigned long irqflags;
778 bool first;
779
780 dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
781 ep->name, req, req->length, req->buf, req->no_interrupt,
782 req->zero, req->short_not_ok);
783
784 /* initialise status of the request */
785 INIT_LIST_HEAD(&hs_req->queue);
786 req->actual = 0;
787 req->status = -EINPROGRESS;
788
789 /* if we're using DMA, sync the buffers as necessary */
790 if (using_dma(hs)) {
791 int ret = s3c_hsotg_map_dma(hs, hs_ep, req);
792 if (ret)
793 return ret;
794 }
795
796 spin_lock_irqsave(&hs_ep->lock, irqflags);
797
798 first = list_empty(&hs_ep->queue);
799 list_add_tail(&hs_req->queue, &hs_ep->queue);
800
801 if (first)
802 s3c_hsotg_start_req(hs, hs_ep, hs_req, false);
803
804 spin_unlock_irqrestore(&hs_ep->lock, irqflags);
805
806 return 0;
807}
808
809static void s3c_hsotg_ep_free_request(struct usb_ep *ep,
810 struct usb_request *req)
811{
812 struct s3c_hsotg_req *hs_req = our_req(req);
813
814 kfree(hs_req);
815}
816
817/**
818 * s3c_hsotg_complete_oursetup - setup completion callback
819 * @ep: The endpoint the request was on.
820 * @req: The request completed.
821 *
822 * Called on completion of any requests the driver itself
823 * submitted that need cleaning up.
824 */
825static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,
826 struct usb_request *req)
827{
828 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
829 struct s3c_hsotg *hsotg = hs_ep->parent;
830
831 dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
832
833 s3c_hsotg_ep_free_request(ep, req);
834}
835
836/**
837 * ep_from_windex - convert control wIndex value to endpoint
838 * @hsotg: The driver state.
839 * @windex: The control request wIndex field (in host order).
840 *
841 * Convert the given wIndex into a pointer to an driver endpoint
842 * structure, or return NULL if it is not a valid endpoint.
843*/
844static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg,
845 u32 windex)
846{
847 struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F];
848 int dir = (windex & USB_DIR_IN) ? 1 : 0;
849 int idx = windex & 0x7F;
850
851 if (windex >= 0x100)
852 return NULL;
853
854 if (idx > S3C_HSOTG_EPS)
855 return NULL;
856
857 if (idx && ep->dir_in != dir)
858 return NULL;
859
860 return ep;
861}
862
863/**
864 * s3c_hsotg_send_reply - send reply to control request
865 * @hsotg: The device state
866 * @ep: Endpoint 0
867 * @buff: Buffer for request
868 * @length: Length of reply.
869 *
870 * Create a request and queue it on the given endpoint. This is useful as
871 * an internal method of sending replies to certain control requests, etc.
872 */
873static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg,
874 struct s3c_hsotg_ep *ep,
875 void *buff,
876 int length)
877{
878 struct usb_request *req;
879 int ret;
880
881 dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
882
883 req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
884 hsotg->ep0_reply = req;
885 if (!req) {
886 dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
887 return -ENOMEM;
888 }
889
890 req->buf = hsotg->ep0_buff;
891 req->length = length;
892 req->zero = 1; /* always do zero-length final transfer */
893 req->complete = s3c_hsotg_complete_oursetup;
894
895 if (length)
896 memcpy(req->buf, buff, length);
897 else
898 ep->sent_zlp = 1;
899
900 ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
901 if (ret) {
902 dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
903 return ret;
904 }
905
906 return 0;
907}
908
909/**
910 * s3c_hsotg_process_req_status - process request GET_STATUS
911 * @hsotg: The device state
912 * @ctrl: USB control request
913 */
914static int s3c_hsotg_process_req_status(struct s3c_hsotg *hsotg,
915 struct usb_ctrlrequest *ctrl)
916{
917 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
918 struct s3c_hsotg_ep *ep;
919 __le16 reply;
920 int ret;
921
922 dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
923
924 if (!ep0->dir_in) {
925 dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
926 return -EINVAL;
927 }
928
929 switch (ctrl->bRequestType & USB_RECIP_MASK) {
930 case USB_RECIP_DEVICE:
931 reply = cpu_to_le16(0); /* bit 0 => self powered,
932 * bit 1 => remote wakeup */
933 break;
934
935 case USB_RECIP_INTERFACE:
936 /* currently, the data result should be zero */
937 reply = cpu_to_le16(0);
938 break;
939
940 case USB_RECIP_ENDPOINT:
941 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
942 if (!ep)
943 return -ENOENT;
944
945 reply = cpu_to_le16(ep->halted ? 1 : 0);
946 break;
947
948 default:
949 return 0;
950 }
951
952 if (le16_to_cpu(ctrl->wLength) != 2)
953 return -EINVAL;
954
955 ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2);
956 if (ret) {
957 dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
958 return ret;
959 }
960
961 return 1;
962}
963
964static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value);
965
966/**
967 * s3c_hsotg_process_req_featire - process request {SET,CLEAR}_FEATURE
968 * @hsotg: The device state
969 * @ctrl: USB control request
970 */
971static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
972 struct usb_ctrlrequest *ctrl)
973{
974 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
975 struct s3c_hsotg_ep *ep;
976
977 dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
978 __func__, set ? "SET" : "CLEAR");
979
980 if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
981 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
982 if (!ep) {
983 dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
984 __func__, le16_to_cpu(ctrl->wIndex));
985 return -ENOENT;
986 }
987
988 switch (le16_to_cpu(ctrl->wValue)) {
989 case USB_ENDPOINT_HALT:
990 s3c_hsotg_ep_sethalt(&ep->ep, set);
991 break;
992
993 default:
994 return -ENOENT;
995 }
996 } else
997 return -ENOENT; /* currently only deal with endpoint */
998
999 return 1;
1000}
1001
1002/**
1003 * s3c_hsotg_process_control - process a control request
1004 * @hsotg: The device state
1005 * @ctrl: The control request received
1006 *
1007 * The controller has received the SETUP phase of a control request, and
1008 * needs to work out what to do next (and whether to pass it on to the
1009 * gadget driver).
1010 */
1011static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
1012 struct usb_ctrlrequest *ctrl)
1013{
1014 struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
1015 int ret = 0;
1016 u32 dcfg;
1017
1018 ep0->sent_zlp = 0;
1019
1020 dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n",
1021 ctrl->bRequest, ctrl->bRequestType,
1022 ctrl->wValue, ctrl->wLength);
1023
1024 /* record the direction of the request, for later use when enquing
1025 * packets onto EP0. */
1026
1027 ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0;
1028 dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in);
1029
1030 /* if we've no data with this request, then the last part of the
1031 * transaction is going to implicitly be IN. */
1032 if (ctrl->wLength == 0)
1033 ep0->dir_in = 1;
1034
1035 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1036 switch (ctrl->bRequest) {
1037 case USB_REQ_SET_ADDRESS:
1038 dcfg = readl(hsotg->regs + S3C_DCFG);
1039 dcfg &= ~S3C_DCFG_DevAddr_MASK;
1040 dcfg |= ctrl->wValue << S3C_DCFG_DevAddr_SHIFT;
1041 writel(dcfg, hsotg->regs + S3C_DCFG);
1042
1043 dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
1044
1045 ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
1046 return;
1047
1048 case USB_REQ_GET_STATUS:
1049 ret = s3c_hsotg_process_req_status(hsotg, ctrl);
1050 break;
1051
1052 case USB_REQ_CLEAR_FEATURE:
1053 case USB_REQ_SET_FEATURE:
1054 ret = s3c_hsotg_process_req_feature(hsotg, ctrl);
1055 break;
1056 }
1057 }
1058
1059 /* as a fallback, try delivering it to the driver to deal with */
1060
1061 if (ret == 0 && hsotg->driver) {
1062 ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
1063 if (ret < 0)
1064 dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
1065 }
1066
1067 if (ret > 0) {
1068 if (!ep0->dir_in) {
1069 /* need to generate zlp in reply or take data */
1070 /* todo - deal with any data we might be sent? */
1071 ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
1072 }
1073 }
1074
1075 /* the request is either unhandlable, or is not formatted correctly
1076 * so respond with a STALL for the status stage to indicate failure.
1077 */
1078
1079 if (ret < 0) {
1080 u32 reg;
1081 u32 ctrl;
1082
1083 dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
1084 reg = (ep0->dir_in) ? S3C_DIEPCTL0 : S3C_DOEPCTL0;
1085
1086 /* S3C_DxEPCTL_Stall will be cleared by EP once it has
1087 * taken effect, so no need to clear later. */
1088
1089 ctrl = readl(hsotg->regs + reg);
1090 ctrl |= S3C_DxEPCTL_Stall;
1091 ctrl |= S3C_DxEPCTL_CNAK;
1092 writel(ctrl, hsotg->regs + reg);
1093
1094 dev_dbg(hsotg->dev,
1095 "writen DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n",
1096 ctrl, reg, readl(hsotg->regs + reg));
1097
1098 /* don't belive we need to anything more to get the EP
1099 * to reply with a STALL packet */
1100 }
1101}
1102
1103static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
1104
1105/**
1106 * s3c_hsotg_complete_setup - completion of a setup transfer
1107 * @ep: The endpoint the request was on.
1108 * @req: The request completed.
1109 *
1110 * Called on completion of any requests the driver itself submitted for
1111 * EP0 setup packets
1112 */
1113static void s3c_hsotg_complete_setup(struct usb_ep *ep,
1114 struct usb_request *req)
1115{
1116 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
1117 struct s3c_hsotg *hsotg = hs_ep->parent;
1118
1119 if (req->status < 0) {
1120 dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
1121 return;
1122 }
1123
1124 if (req->actual == 0)
1125 s3c_hsotg_enqueue_setup(hsotg);
1126 else
1127 s3c_hsotg_process_control(hsotg, req->buf);
1128}
1129
1130/**
1131 * s3c_hsotg_enqueue_setup - start a request for EP0 packets
1132 * @hsotg: The device state.
1133 *
1134 * Enqueue a request on EP0 if necessary to received any SETUP packets
1135 * received from the host.
1136 */
1137static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg)
1138{
1139 struct usb_request *req = hsotg->ctrl_req;
1140 struct s3c_hsotg_req *hs_req = our_req(req);
1141 int ret;
1142
1143 dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
1144
1145 req->zero = 0;
1146 req->length = 8;
1147 req->buf = hsotg->ctrl_buff;
1148 req->complete = s3c_hsotg_complete_setup;
1149
1150 if (!list_empty(&hs_req->queue)) {
1151 dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
1152 return;
1153 }
1154
1155 hsotg->eps[0].dir_in = 0;
1156
1157 ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC);
1158 if (ret < 0) {
1159 dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
1160 /* Don't think there's much we can do other than watch the
1161 * driver fail. */
1162 }
1163}
1164
1165/**
1166 * get_ep_head - return the first request on the endpoint
1167 * @hs_ep: The controller endpoint to get
1168 *
1169 * Get the first request on the endpoint.
1170*/
1171static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep)
1172{
1173 if (list_empty(&hs_ep->queue))
1174 return NULL;
1175
1176 return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue);
1177}
1178
1179/**
1180 * s3c_hsotg_complete_request - complete a request given to us
1181 * @hsotg: The device state.
1182 * @hs_ep: The endpoint the request was on.
1183 * @hs_req: The request to complete.
1184 * @result: The result code (0 => Ok, otherwise errno)
1185 *
1186 * The given request has finished, so call the necessary completion
1187 * if it has one and then look to see if we can start a new request
1188 * on the endpoint.
1189 *
1190 * Note, expects the ep to already be locked as appropriate.
1191*/
1192static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg,
1193 struct s3c_hsotg_ep *hs_ep,
1194 struct s3c_hsotg_req *hs_req,
1195 int result)
1196{
1197 bool restart;
1198
1199 if (!hs_req) {
1200 dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
1201 return;
1202 }
1203
1204 dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
1205 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
1206
1207 /* only replace the status if we've not already set an error
1208 * from a previous transaction */
1209
1210 if (hs_req->req.status == -EINPROGRESS)
1211 hs_req->req.status = result;
1212
1213 hs_ep->req = NULL;
1214 list_del_init(&hs_req->queue);
1215
1216 if (using_dma(hsotg))
1217 s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
1218
1219 /* call the complete request with the locks off, just in case the
1220 * request tries to queue more work for this endpoint. */
1221
1222 if (hs_req->req.complete) {
1223 spin_unlock(&hs_ep->lock);
1224 hs_req->req.complete(&hs_ep->ep, &hs_req->req);
1225 spin_lock(&hs_ep->lock);
1226 }
1227
1228 /* Look to see if there is anything else to do. Note, the completion
1229 * of the previous request may have caused a new request to be started
1230 * so be careful when doing this. */
1231
1232 if (!hs_ep->req && result >= 0) {
1233 restart = !list_empty(&hs_ep->queue);
1234 if (restart) {
1235 hs_req = get_ep_head(hs_ep);
1236 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false);
1237 }
1238 }
1239}
1240
1241/**
1242 * s3c_hsotg_complete_request_lock - complete a request given to us (locked)
1243 * @hsotg: The device state.
1244 * @hs_ep: The endpoint the request was on.
1245 * @hs_req: The request to complete.
1246 * @result: The result code (0 => Ok, otherwise errno)
1247 *
1248 * See s3c_hsotg_complete_request(), but called with the endpoint's
1249 * lock held.
1250*/
1251static void s3c_hsotg_complete_request_lock(struct s3c_hsotg *hsotg,
1252 struct s3c_hsotg_ep *hs_ep,
1253 struct s3c_hsotg_req *hs_req,
1254 int result)
1255{
1256 unsigned long flags;
1257
1258 spin_lock_irqsave(&hs_ep->lock, flags);
1259 s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
1260 spin_unlock_irqrestore(&hs_ep->lock, flags);
1261}
1262
1263/**
1264 * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint
1265 * @hsotg: The device state.
1266 * @ep_idx: The endpoint index for the data
1267 * @size: The size of data in the fifo, in bytes
1268 *
1269 * The FIFO status shows there is data to read from the FIFO for a given
1270 * endpoint, so sort out whether we need to read the data into a request
1271 * that has been made for that endpoint.
1272 */
1273static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
1274{
1275 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx];
1276 struct s3c_hsotg_req *hs_req = hs_ep->req;
1277 void __iomem *fifo = hsotg->regs + S3C_EPFIFO(ep_idx);
1278 int to_read;
1279 int max_req;
1280 int read_ptr;
1281
1282 if (!hs_req) {
1283 u32 epctl = readl(hsotg->regs + S3C_DOEPCTL(ep_idx));
1284 int ptr;
1285
1286 dev_warn(hsotg->dev,
1287 "%s: FIFO %d bytes on ep%d but no req (DxEPCTl=0x%08x)\n",
1288 __func__, size, ep_idx, epctl);
1289
1290 /* dump the data from the FIFO, we've nothing we can do */
1291 for (ptr = 0; ptr < size; ptr += 4)
1292 (void)readl(fifo);
1293
1294 return;
1295 }
1296
1297 spin_lock(&hs_ep->lock);
1298
1299 to_read = size;
1300 read_ptr = hs_req->req.actual;
1301 max_req = hs_req->req.length - read_ptr;
1302
1303 if (to_read > max_req) {
1304 /* more data appeared than we where willing
1305 * to deal with in this request.
1306 */
1307
1308 /* currently we don't deal this */
1309 WARN_ON_ONCE(1);
1310 }
1311
1312 dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
1313 __func__, to_read, max_req, read_ptr, hs_req->req.length);
1314
1315 hs_ep->total_data += to_read;
1316 hs_req->req.actual += to_read;
1317 to_read = DIV_ROUND_UP(to_read, 4);
1318
1319 /* note, we might over-write the buffer end by 3 bytes depending on
1320 * alignment of the data. */
1321 readsl(fifo, hs_req->req.buf + read_ptr, to_read);
1322
1323 spin_unlock(&hs_ep->lock);
1324}
1325
1326/**
1327 * s3c_hsotg_send_zlp - send zero-length packet on control endpoint
1328 * @hsotg: The device instance
1329 * @req: The request currently on this endpoint
1330 *
1331 * Generate a zero-length IN packet request for terminating a SETUP
1332 * transaction.
1333 *
1334 * Note, since we don't write any data to the TxFIFO, then it is
1335 * currently belived that we do not need to wait for any space in
1336 * the TxFIFO.
1337 */
1338static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg,
1339 struct s3c_hsotg_req *req)
1340{
1341 u32 ctrl;
1342
1343 if (!req) {
1344 dev_warn(hsotg->dev, "%s: no request?\n", __func__);
1345 return;
1346 }
1347
1348 if (req->req.length == 0) {
1349 hsotg->eps[0].sent_zlp = 1;
1350 s3c_hsotg_enqueue_setup(hsotg);
1351 return;
1352 }
1353
1354 hsotg->eps[0].dir_in = 1;
1355 hsotg->eps[0].sent_zlp = 1;
1356
1357 dev_dbg(hsotg->dev, "sending zero-length packet\n");
1358
1359 /* issue a zero-sized packet to terminate this */
1360 writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) |
1361 S3C_DxEPTSIZ_XferSize(0), hsotg->regs + S3C_DIEPTSIZ(0));
1362
1363 ctrl = readl(hsotg->regs + S3C_DIEPCTL0);
1364 ctrl |= S3C_DxEPCTL_CNAK; /* clear NAK set by core */
1365 ctrl |= S3C_DxEPCTL_EPEna; /* ensure ep enabled */
1366 ctrl |= S3C_DxEPCTL_USBActEp;
1367 writel(ctrl, hsotg->regs + S3C_DIEPCTL0);
1368}
1369
1370/**
1371 * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
1372 * @hsotg: The device instance
1373 * @epnum: The endpoint received from
1374 * @was_setup: Set if processing a SetupDone event.
1375 *
1376 * The RXFIFO has delivered an OutDone event, which means that the data
1377 * transfer for an OUT endpoint has been completed, either by a short
1378 * packet or by the finish of a transfer.
1379*/
1380static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,
1381 int epnum, bool was_setup)
1382{
1383 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum];
1384 struct s3c_hsotg_req *hs_req = hs_ep->req;
1385 struct usb_request *req = &hs_req->req;
1386 int result = 0;
1387
1388 if (!hs_req) {
1389 dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
1390 return;
1391 }
1392
1393 if (using_dma(hsotg)) {
1394 u32 epsize = readl(hsotg->regs + S3C_DOEPTSIZ(epnum));
1395 unsigned size_done;
1396 unsigned size_left;
1397
1398 /* Calculate the size of the transfer by checking how much
1399 * is left in the endpoint size register and then working it
1400 * out from the amount we loaded for the transfer.
1401 *
1402 * We need to do this as DMA pointers are always 32bit aligned
1403 * so may overshoot/undershoot the transfer.
1404 */
1405
1406 size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
1407
1408 size_done = hs_ep->size_loaded - size_left;
1409 size_done += hs_ep->last_load;
1410
1411 req->actual = size_done;
1412 }
1413
1414 if (req->actual < req->length && req->short_not_ok) {
1415 dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
1416 __func__, req->actual, req->length);
1417
1418 /* todo - what should we return here? there's no one else
1419 * even bothering to check the status. */
1420 }
1421
1422 if (epnum == 0) {
1423 if (!was_setup && req->complete != s3c_hsotg_complete_setup)
1424 s3c_hsotg_send_zlp(hsotg, hs_req);
1425 }
1426
1427 s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, result);
1428}
1429
1430/**
1431 * s3c_hsotg_read_frameno - read current frame number
1432 * @hsotg: The device instance
1433 *
1434 * Return the current frame number
1435*/
1436static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
1437{
1438 u32 dsts;
1439
1440 dsts = readl(hsotg->regs + S3C_DSTS);
1441 dsts &= S3C_DSTS_SOFFN_MASK;
1442 dsts >>= S3C_DSTS_SOFFN_SHIFT;
1443
1444 return dsts;
1445}
1446
1447/**
1448 * s3c_hsotg_handle_rx - RX FIFO has data
1449 * @hsotg: The device instance
1450 *
1451 * The IRQ handler has detected that the RX FIFO has some data in it
1452 * that requires processing, so find out what is in there and do the
1453 * appropriate read.
1454 *
1455 * The RXFIFO is a true FIFO, the packets comming out are still in packet
1456 * chunks, so if you have x packets received on an endpoint you'll get x
1457 * FIFO events delivered, each with a packet's worth of data in it.
1458 *
1459 * When using DMA, we should not be processing events from the RXFIFO
1460 * as the actual data should be sent to the memory directly and we turn
1461 * on the completion interrupts to get notifications of transfer completion.
1462 */
1463void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
1464{
1465 u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP);
1466 u32 epnum, status, size;
1467
1468 WARN_ON(using_dma(hsotg));
1469
1470 epnum = grxstsr & S3C_GRXSTS_EPNum_MASK;
1471 status = grxstsr & S3C_GRXSTS_PktSts_MASK;
1472
1473 size = grxstsr & S3C_GRXSTS_ByteCnt_MASK;
1474 size >>= S3C_GRXSTS_ByteCnt_SHIFT;
1475
1476 if (1)
1477 dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
1478 __func__, grxstsr, size, epnum);
1479
1480#define __status(x) ((x) >> S3C_GRXSTS_PktSts_SHIFT)
1481
1482 switch (status >> S3C_GRXSTS_PktSts_SHIFT) {
1483 case __status(S3C_GRXSTS_PktSts_GlobalOutNAK):
1484 dev_dbg(hsotg->dev, "GlobalOutNAK\n");
1485 break;
1486
1487 case __status(S3C_GRXSTS_PktSts_OutDone):
1488 dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
1489 s3c_hsotg_read_frameno(hsotg));
1490
1491 if (!using_dma(hsotg))
1492 s3c_hsotg_handle_outdone(hsotg, epnum, false);
1493 break;
1494
1495 case __status(S3C_GRXSTS_PktSts_SetupDone):
1496 dev_dbg(hsotg->dev,
1497 "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1498 s3c_hsotg_read_frameno(hsotg),
1499 readl(hsotg->regs + S3C_DOEPCTL(0)));
1500
1501 s3c_hsotg_handle_outdone(hsotg, epnum, true);
1502 break;
1503
1504 case __status(S3C_GRXSTS_PktSts_OutRX):
1505 s3c_hsotg_rx_data(hsotg, epnum, size);
1506 break;
1507
1508 case __status(S3C_GRXSTS_PktSts_SetupRX):
1509 dev_dbg(hsotg->dev,
1510 "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1511 s3c_hsotg_read_frameno(hsotg),
1512 readl(hsotg->regs + S3C_DOEPCTL(0)));
1513
1514 s3c_hsotg_rx_data(hsotg, epnum, size);
1515 break;
1516
1517 default:
1518 dev_warn(hsotg->dev, "%s: unknown status %08x\n",
1519 __func__, grxstsr);
1520
1521 s3c_hsotg_dump(hsotg);
1522 break;
1523 }
1524}
1525
1526/**
1527 * s3c_hsotg_ep0_mps - turn max packet size into register setting
1528 * @mps: The maximum packet size in bytes.
1529*/
1530static u32 s3c_hsotg_ep0_mps(unsigned int mps)
1531{
1532 switch (mps) {
1533 case 64:
1534 return S3C_D0EPCTL_MPS_64;
1535 case 32:
1536 return S3C_D0EPCTL_MPS_32;
1537 case 16:
1538 return S3C_D0EPCTL_MPS_16;
1539 case 8:
1540 return S3C_D0EPCTL_MPS_8;
1541 }
1542
1543 /* bad max packet size, warn and return invalid result */
1544 WARN_ON(1);
1545 return (u32)-1;
1546}
1547
1548/**
1549 * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field
1550 * @hsotg: The driver state.
1551 * @ep: The index number of the endpoint
1552 * @mps: The maximum packet size in bytes
1553 *
1554 * Configure the maximum packet size for the given endpoint, updating
1555 * the hardware control registers to reflect this.
1556 */
1557static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,
1558 unsigned int ep, unsigned int mps)
1559{
1560 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep];
1561 void __iomem *regs = hsotg->regs;
1562 u32 mpsval;
1563 u32 reg;
1564
1565 if (ep == 0) {
1566 /* EP0 is a special case */
1567 mpsval = s3c_hsotg_ep0_mps(mps);
1568 if (mpsval > 3)
1569 goto bad_mps;
1570 } else {
1571 if (mps >= S3C_DxEPCTL_MPS_LIMIT+1)
1572 goto bad_mps;
1573
1574 mpsval = mps;
1575 }
1576
1577 hs_ep->ep.maxpacket = mps;
1578
1579 /* update both the in and out endpoint controldir_ registers, even
1580 * if one of the directions may not be in use. */
1581
1582 reg = readl(regs + S3C_DIEPCTL(ep));
1583 reg &= ~S3C_DxEPCTL_MPS_MASK;
1584 reg |= mpsval;
1585 writel(reg, regs + S3C_DIEPCTL(ep));
1586
1587 reg = readl(regs + S3C_DOEPCTL(ep));
1588 reg &= ~S3C_DxEPCTL_MPS_MASK;
1589 reg |= mpsval;
1590 writel(reg, regs + S3C_DOEPCTL(ep));
1591
1592 return;
1593
1594bad_mps:
1595 dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
1596}
1597
1598
1599/**
1600 * s3c_hsotg_trytx - check to see if anything needs transmitting
1601 * @hsotg: The driver state
1602 * @hs_ep: The driver endpoint to check.
1603 *
1604 * Check to see if there is a request that has data to send, and if so
1605 * make an attempt to write data into the FIFO.
1606 */
1607static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg,
1608 struct s3c_hsotg_ep *hs_ep)
1609{
1610 struct s3c_hsotg_req *hs_req = hs_ep->req;
1611
1612 if (!hs_ep->dir_in || !hs_req)
1613 return 0;
1614
1615 if (hs_req->req.actual < hs_req->req.length) {
1616 dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
1617 hs_ep->index);
1618 return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
1619 }
1620
1621 return 0;
1622}
1623
1624/**
1625 * s3c_hsotg_complete_in - complete IN transfer
1626 * @hsotg: The device state.
1627 * @hs_ep: The endpoint that has just completed.
1628 *
1629 * An IN transfer has been completed, update the transfer's state and then
1630 * call the relevant completion routines.
1631 */
1632static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg,
1633 struct s3c_hsotg_ep *hs_ep)
1634{
1635 struct s3c_hsotg_req *hs_req = hs_ep->req;
1636 u32 epsize = readl(hsotg->regs + S3C_DIEPTSIZ(hs_ep->index));
1637 int size_left, size_done;
1638
1639 if (!hs_req) {
1640 dev_dbg(hsotg->dev, "XferCompl but no req\n");
1641 return;
1642 }
1643
1644 /* Calculate the size of the transfer by checking how much is left
1645 * in the endpoint size register and then working it out from
1646 * the amount we loaded for the transfer.
1647 *
1648 * We do this even for DMA, as the transfer may have incremented
1649 * past the end of the buffer (DMA transfers are always 32bit
1650 * aligned).
1651 */
1652
1653 size_left = S3C_DxEPTSIZ_XferSize_GET(epsize);
1654
1655 size_done = hs_ep->size_loaded - size_left;
1656 size_done += hs_ep->last_load;
1657
1658 if (hs_req->req.actual != size_done)
1659 dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
1660 __func__, hs_req->req.actual, size_done);
1661
1662 hs_req->req.actual = size_done;
1663
1664 /* if we did all of the transfer, and there is more data left
1665 * around, then try restarting the rest of the request */
1666
1667 if (!size_left && hs_req->req.actual < hs_req->req.length) {
1668 dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
1669 s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
1670 } else
1671 s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, 0);
1672}
1673
1674/**
1675 * s3c_hsotg_epint - handle an in/out endpoint interrupt
1676 * @hsotg: The driver state
1677 * @idx: The index for the endpoint (0..15)
1678 * @dir_in: Set if this is an IN endpoint
1679 *
1680 * Process and clear any interrupt pending for an individual endpoint
1681*/
1682static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
1683 int dir_in)
1684{
1685 struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx];
1686 u32 epint_reg = dir_in ? S3C_DIEPINT(idx) : S3C_DOEPINT(idx);
1687 u32 epctl_reg = dir_in ? S3C_DIEPCTL(idx) : S3C_DOEPCTL(idx);
1688 u32 epsiz_reg = dir_in ? S3C_DIEPTSIZ(idx) : S3C_DOEPTSIZ(idx);
1689 u32 ints;
1690 u32 clear = 0;
1691
1692 ints = readl(hsotg->regs + epint_reg);
1693
1694 dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
1695 __func__, idx, dir_in ? "in" : "out", ints);
1696
1697 if (ints & S3C_DxEPINT_XferCompl) {
1698 dev_dbg(hsotg->dev,
1699 "%s: XferCompl: DxEPCTL=0x%08x, DxEPTSIZ=%08x\n",
1700 __func__, readl(hsotg->regs + epctl_reg),
1701 readl(hsotg->regs + epsiz_reg));
1702
1703 /* we get OutDone from the FIFO, so we only need to look
1704 * at completing IN requests here */
1705 if (dir_in) {
1706 s3c_hsotg_complete_in(hsotg, hs_ep);
1707
1708 if (idx == 0)
1709 s3c_hsotg_enqueue_setup(hsotg);
1710 } else if (using_dma(hsotg)) {
1711 /* We're using DMA, we need to fire an OutDone here
1712 * as we ignore the RXFIFO. */
1713
1714 s3c_hsotg_handle_outdone(hsotg, idx, false);
1715 }
1716
1717 clear |= S3C_DxEPINT_XferCompl;
1718 }
1719
1720 if (ints & S3C_DxEPINT_EPDisbld) {
1721 dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
1722 clear |= S3C_DxEPINT_EPDisbld;
1723 }
1724
1725 if (ints & S3C_DxEPINT_AHBErr) {
1726 dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
1727 clear |= S3C_DxEPINT_AHBErr;
1728 }
1729
1730 if (ints & S3C_DxEPINT_Setup) { /* Setup or Timeout */
1731 dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__);
1732
1733 if (using_dma(hsotg) && idx == 0) {
1734 /* this is the notification we've received a
1735 * setup packet. In non-DMA mode we'd get this
1736 * from the RXFIFO, instead we need to process
1737 * the setup here. */
1738
1739 if (dir_in)
1740 WARN_ON_ONCE(1);
1741 else
1742 s3c_hsotg_handle_outdone(hsotg, 0, true);
1743 }
1744
1745 clear |= S3C_DxEPINT_Setup;
1746 }
1747
1748 if (ints & S3C_DxEPINT_Back2BackSetup) {
1749 dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
1750 clear |= S3C_DxEPINT_Back2BackSetup;
1751 }
1752
1753 if (dir_in) {
1754 /* not sure if this is important, but we'll clear it anyway
1755 */
1756 if (ints & S3C_DIEPMSK_INTknTXFEmpMsk) {
1757 dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
1758 __func__, idx);
1759 clear |= S3C_DIEPMSK_INTknTXFEmpMsk;
1760 }
1761
1762 /* this probably means something bad is happening */
1763 if (ints & S3C_DIEPMSK_INTknEPMisMsk) {
1764 dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
1765 __func__, idx);
1766 clear |= S3C_DIEPMSK_INTknEPMisMsk;
1767 }
1768 }
1769
1770 writel(clear, hsotg->regs + epint_reg);
1771}
1772
1773/**
1774 * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
1775 * @hsotg: The device state.
1776 *
1777 * Handle updating the device settings after the enumeration phase has
1778 * been completed.
1779*/
1780static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg)
1781{
1782 u32 dsts = readl(hsotg->regs + S3C_DSTS);
1783 int ep0_mps = 0, ep_mps;
1784
1785 /* This should signal the finish of the enumeration phase
1786 * of the USB handshaking, so we should now know what rate
1787 * we connected at. */
1788
1789 dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
1790
1791 /* note, since we're limited by the size of transfer on EP0, and
1792 * it seems IN transfers must be a even number of packets we do
1793 * not advertise a 64byte MPS on EP0. */
1794
1795 /* catch both EnumSpd_FS and EnumSpd_FS48 */
1796 switch (dsts & S3C_DSTS_EnumSpd_MASK) {
1797 case S3C_DSTS_EnumSpd_FS:
1798 case S3C_DSTS_EnumSpd_FS48:
1799 hsotg->gadget.speed = USB_SPEED_FULL;
1800 dev_info(hsotg->dev, "new device is full-speed\n");
1801
1802 ep0_mps = EP0_MPS_LIMIT;
1803 ep_mps = 64;
1804 break;
1805
1806 case S3C_DSTS_EnumSpd_HS:
1807 dev_info(hsotg->dev, "new device is high-speed\n");
1808 hsotg->gadget.speed = USB_SPEED_HIGH;
1809
1810 ep0_mps = EP0_MPS_LIMIT;
1811 ep_mps = 512;
1812 break;
1813
1814 case S3C_DSTS_EnumSpd_LS:
1815 hsotg->gadget.speed = USB_SPEED_LOW;
1816 dev_info(hsotg->dev, "new device is low-speed\n");
1817
1818 /* note, we don't actually support LS in this driver at the
1819 * moment, and the documentation seems to imply that it isn't
1820 * supported by the PHYs on some of the devices.
1821 */
1822 break;
1823 }
1824
1825 /* we should now know the maximum packet size for an
1826 * endpoint, so set the endpoints to a default value. */
1827
1828 if (ep0_mps) {
1829 int i;
1830 s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps);
1831 for (i = 1; i < S3C_HSOTG_EPS; i++)
1832 s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps);
1833 }
1834
1835 /* ensure after enumeration our EP0 is active */
1836
1837 s3c_hsotg_enqueue_setup(hsotg);
1838
1839 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
1840 readl(hsotg->regs + S3C_DIEPCTL0),
1841 readl(hsotg->regs + S3C_DOEPCTL0));
1842}
1843
1844/**
1845 * kill_all_requests - remove all requests from the endpoint's queue
1846 * @hsotg: The device state.
1847 * @ep: The endpoint the requests may be on.
1848 * @result: The result code to use.
1849 * @force: Force removal of any current requests
1850 *
1851 * Go through the requests on the given endpoint and mark them
1852 * completed with the given result code.
1853 */
1854static void kill_all_requests(struct s3c_hsotg *hsotg,
1855 struct s3c_hsotg_ep *ep,
1856 int result, bool force)
1857{
1858 struct s3c_hsotg_req *req, *treq;
1859 unsigned long flags;
1860
1861 spin_lock_irqsave(&ep->lock, flags);
1862
1863 list_for_each_entry_safe(req, treq, &ep->queue, queue) {
1864 /* currently, we can't do much about an already
1865 * running request on an in endpoint */
1866
1867 if (ep->req == req && ep->dir_in && !force)
1868 continue;
1869
1870 s3c_hsotg_complete_request(hsotg, ep, req,
1871 result);
1872 }
1873
1874 spin_unlock_irqrestore(&ep->lock, flags);
1875}
1876
1877#define call_gadget(_hs, _entry) \
1878 if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN && \
1879 (_hs)->driver && (_hs)->driver->_entry) \
1880 (_hs)->driver->_entry(&(_hs)->gadget);
1881
1882/**
1883 * s3c_hsotg_disconnect_irq - disconnect irq service
1884 * @hsotg: The device state.
1885 *
1886 * A disconnect IRQ has been received, meaning that the host has
1887 * lost contact with the bus. Remove all current transactions
1888 * and signal the gadget driver that this has happened.
1889*/
1890static void s3c_hsotg_disconnect_irq(struct s3c_hsotg *hsotg)
1891{
1892 unsigned ep;
1893
1894 for (ep = 0; ep < S3C_HSOTG_EPS; ep++)
1895 kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true);
1896
1897 call_gadget(hsotg, disconnect);
1898}
1899
1900/**
1901 * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
1902 * @hsotg: The device state:
1903 * @periodic: True if this is a periodic FIFO interrupt
1904 */
1905static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic)
1906{
1907 struct s3c_hsotg_ep *ep;
1908 int epno, ret;
1909
1910 /* look through for any more data to transmit */
1911
1912 for (epno = 0; epno < S3C_HSOTG_EPS; epno++) {
1913 ep = &hsotg->eps[epno];
1914
1915 if (!ep->dir_in)
1916 continue;
1917
1918 if ((periodic && !ep->periodic) ||
1919 (!periodic && ep->periodic))
1920 continue;
1921
1922 ret = s3c_hsotg_trytx(hsotg, ep);
1923 if (ret < 0)
1924 break;
1925 }
1926}
1927
1928static struct s3c_hsotg *our_hsotg;
1929
1930/* IRQ flags which will trigger a retry around the IRQ loop */
1931#define IRQ_RETRY_MASK (S3C_GINTSTS_NPTxFEmp | \
1932 S3C_GINTSTS_PTxFEmp | \
1933 S3C_GINTSTS_RxFLvl)
1934
1935/**
1936 * s3c_hsotg_irq - handle device interrupt
1937 * @irq: The IRQ number triggered
1938 * @pw: The pw value when registered the handler.
1939 */
1940static irqreturn_t s3c_hsotg_irq(int irq, void *pw)
1941{
1942 struct s3c_hsotg *hsotg = pw;
1943 int retry_count = 8;
1944 u32 gintsts;
1945 u32 gintmsk;
1946
1947irq_retry:
1948 gintsts = readl(hsotg->regs + S3C_GINTSTS);
1949 gintmsk = readl(hsotg->regs + S3C_GINTMSK);
1950
1951 dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
1952 __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
1953
1954 gintsts &= gintmsk;
1955
1956 if (gintsts & S3C_GINTSTS_OTGInt) {
1957 u32 otgint = readl(hsotg->regs + S3C_GOTGINT);
1958
1959 dev_info(hsotg->dev, "OTGInt: %08x\n", otgint);
1960
1961 writel(otgint, hsotg->regs + S3C_GOTGINT);
1962 writel(S3C_GINTSTS_OTGInt, hsotg->regs + S3C_GINTSTS);
1963 }
1964
1965 if (gintsts & S3C_GINTSTS_DisconnInt) {
1966 dev_dbg(hsotg->dev, "%s: DisconnInt\n", __func__);
1967 writel(S3C_GINTSTS_DisconnInt, hsotg->regs + S3C_GINTSTS);
1968
1969 s3c_hsotg_disconnect_irq(hsotg);
1970 }
1971
1972 if (gintsts & S3C_GINTSTS_SessReqInt) {
1973 dev_dbg(hsotg->dev, "%s: SessReqInt\n", __func__);
1974 writel(S3C_GINTSTS_SessReqInt, hsotg->regs + S3C_GINTSTS);
1975 }
1976
1977 if (gintsts & S3C_GINTSTS_EnumDone) {
1978 s3c_hsotg_irq_enumdone(hsotg);
1979 writel(S3C_GINTSTS_EnumDone, hsotg->regs + S3C_GINTSTS);
1980 }
1981
1982 if (gintsts & S3C_GINTSTS_ConIDStsChng) {
1983 dev_dbg(hsotg->dev, "ConIDStsChg (DSTS=0x%08x, GOTCTL=%08x)\n",
1984 readl(hsotg->regs + S3C_DSTS),
1985 readl(hsotg->regs + S3C_GOTGCTL));
1986
1987 writel(S3C_GINTSTS_ConIDStsChng, hsotg->regs + S3C_GINTSTS);
1988 }
1989
1990 if (gintsts & (S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt)) {
1991 u32 daint = readl(hsotg->regs + S3C_DAINT);
1992 u32 daint_out = daint >> S3C_DAINT_OutEP_SHIFT;
1993 u32 daint_in = daint & ~(daint_out << S3C_DAINT_OutEP_SHIFT);
1994 int ep;
1995
1996 dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
1997
1998 for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) {
1999 if (daint_out & 1)
2000 s3c_hsotg_epint(hsotg, ep, 0);
2001 }
2002
2003 for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) {
2004 if (daint_in & 1)
2005 s3c_hsotg_epint(hsotg, ep, 1);
2006 }
2007
2008 writel(daint, hsotg->regs + S3C_DAINT);
2009 writel(gintsts & (S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt),
2010 hsotg->regs + S3C_GINTSTS);
2011 }
2012
2013 if (gintsts & S3C_GINTSTS_USBRst) {
2014 dev_info(hsotg->dev, "%s: USBRst\n", __func__);
2015 dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
2016 readl(hsotg->regs + S3C_GNPTXSTS));
2017
2018 kill_all_requests(hsotg, &hsotg->eps[0], -ECONNRESET, true);
2019
2020 /* it seems after a reset we can end up with a situation
2021 * where the TXFIFO still has data in it... try flushing
2022 * it to remove anything that may still be in it.
2023 */
2024
2025 if (1) {
2026 writel(S3C_GRSTCTL_TxFNum(0) | S3C_GRSTCTL_TxFFlsh,
2027 hsotg->regs + S3C_GRSTCTL);
2028
2029 dev_info(hsotg->dev, "GNPTXSTS=%08x\n",
2030 readl(hsotg->regs + S3C_GNPTXSTS));
2031 }
2032
2033 s3c_hsotg_enqueue_setup(hsotg);
2034
2035 writel(S3C_GINTSTS_USBRst, hsotg->regs + S3C_GINTSTS);
2036 }
2037
2038 /* check both FIFOs */
2039
2040 if (gintsts & S3C_GINTSTS_NPTxFEmp) {
2041 dev_dbg(hsotg->dev, "NPTxFEmp\n");
2042
2043 /* Disable the interrupt to stop it happening again
2044 * unless one of these endpoint routines decides that
2045 * it needs re-enabling */
2046
2047 s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_NPTxFEmp);
2048 s3c_hsotg_irq_fifoempty(hsotg, false);
2049
2050 writel(S3C_GINTSTS_NPTxFEmp, hsotg->regs + S3C_GINTSTS);
2051 }
2052
2053 if (gintsts & S3C_GINTSTS_PTxFEmp) {
2054 dev_dbg(hsotg->dev, "PTxFEmp\n");
2055
2056 /* See note in S3C_GINTSTS_NPTxFEmp */
2057
2058 s3c_hsotg_disable_gsint(hsotg, S3C_GINTSTS_PTxFEmp);
2059 s3c_hsotg_irq_fifoempty(hsotg, true);
2060
2061 writel(S3C_GINTSTS_PTxFEmp, hsotg->regs + S3C_GINTSTS);
2062 }
2063
2064 if (gintsts & S3C_GINTSTS_RxFLvl) {
2065 /* note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
2066 * we need to retry s3c_hsotg_handle_rx if this is still
2067 * set. */
2068
2069 s3c_hsotg_handle_rx(hsotg);
2070 writel(S3C_GINTSTS_RxFLvl, hsotg->regs + S3C_GINTSTS);
2071 }
2072
2073 if (gintsts & S3C_GINTSTS_ModeMis) {
2074 dev_warn(hsotg->dev, "warning, mode mismatch triggered\n");
2075 writel(S3C_GINTSTS_ModeMis, hsotg->regs + S3C_GINTSTS);
2076 }
2077
2078 if (gintsts & S3C_GINTSTS_USBSusp) {
2079 dev_info(hsotg->dev, "S3C_GINTSTS_USBSusp\n");
2080 writel(S3C_GINTSTS_USBSusp, hsotg->regs + S3C_GINTSTS);
2081
2082 call_gadget(hsotg, suspend);
2083 }
2084
2085 if (gintsts & S3C_GINTSTS_WkUpInt) {
2086 dev_info(hsotg->dev, "S3C_GINTSTS_WkUpIn\n");
2087 writel(S3C_GINTSTS_WkUpInt, hsotg->regs + S3C_GINTSTS);
2088
2089 call_gadget(hsotg, resume);
2090 }
2091
2092 if (gintsts & S3C_GINTSTS_ErlySusp) {
2093 dev_dbg(hsotg->dev, "S3C_GINTSTS_ErlySusp\n");
2094 writel(S3C_GINTSTS_ErlySusp, hsotg->regs + S3C_GINTSTS);
2095 }
2096
2097 /* these next two seem to crop-up occasionally causing the core
2098 * to shutdown the USB transfer, so try clearing them and logging
2099 * the occurence. */
2100
2101 if (gintsts & S3C_GINTSTS_GOUTNakEff) {
2102 dev_info(hsotg->dev, "GOUTNakEff triggered\n");
2103
2104 s3c_hsotg_dump(hsotg);
2105
2106 writel(S3C_DCTL_CGOUTNak, hsotg->regs + S3C_DCTL);
2107 writel(S3C_GINTSTS_GOUTNakEff, hsotg->regs + S3C_GINTSTS);
2108 }
2109
2110 if (gintsts & S3C_GINTSTS_GINNakEff) {
2111 dev_info(hsotg->dev, "GINNakEff triggered\n");
2112
2113 s3c_hsotg_dump(hsotg);
2114
2115 writel(S3C_DCTL_CGNPInNAK, hsotg->regs + S3C_DCTL);
2116 writel(S3C_GINTSTS_GINNakEff, hsotg->regs + S3C_GINTSTS);
2117 }
2118
2119 /* if we've had fifo events, we should try and go around the
2120 * loop again to see if there's any point in returning yet. */
2121
2122 if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
2123 goto irq_retry;
2124
2125 return IRQ_HANDLED;
2126}
2127
2128/**
2129 * s3c_hsotg_ep_enable - enable the given endpoint
2130 * @ep: The USB endpint to configure
2131 * @desc: The USB endpoint descriptor to configure with.
2132 *
2133 * This is called from the USB gadget code's usb_ep_enable().
2134*/
2135static int s3c_hsotg_ep_enable(struct usb_ep *ep,
2136 const struct usb_endpoint_descriptor *desc)
2137{
2138 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2139 struct s3c_hsotg *hsotg = hs_ep->parent;
2140 unsigned long flags;
2141 int index = hs_ep->index;
2142 u32 epctrl_reg;
2143 u32 epctrl;
2144 u32 mps;
2145 int dir_in;
2146
2147 dev_dbg(hsotg->dev,
2148 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
2149 __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
2150 desc->wMaxPacketSize, desc->bInterval);
2151
2152 /* not to be called for EP0 */
2153 WARN_ON(index == 0);
2154
2155 dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
2156 if (dir_in != hs_ep->dir_in) {
2157 dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
2158 return -EINVAL;
2159 }
2160
2161 mps = le16_to_cpu(desc->wMaxPacketSize);
2162
2163 /* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */
2164
2165 epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
2166 epctrl = readl(hsotg->regs + epctrl_reg);
2167
2168 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
2169 __func__, epctrl, epctrl_reg);
2170
2171 spin_lock_irqsave(&hs_ep->lock, flags);
2172
2173 epctrl &= ~(S3C_DxEPCTL_EPType_MASK | S3C_DxEPCTL_MPS_MASK);
2174 epctrl |= S3C_DxEPCTL_MPS(mps);
2175
2176 /* mark the endpoint as active, otherwise the core may ignore
2177 * transactions entirely for this endpoint */
2178 epctrl |= S3C_DxEPCTL_USBActEp;
2179
2180 /* set the NAK status on the endpoint, otherwise we might try and
2181 * do something with data that we've yet got a request to process
2182 * since the RXFIFO will take data for an endpoint even if the
2183 * size register hasn't been set.
2184 */
2185
2186 epctrl |= S3C_DxEPCTL_SNAK;
2187
2188 /* update the endpoint state */
2189 hs_ep->ep.maxpacket = mps;
2190
2191 /* default, set to non-periodic */
2192 hs_ep->periodic = 0;
2193
2194 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
2195 case USB_ENDPOINT_XFER_ISOC:
2196 dev_err(hsotg->dev, "no current ISOC support\n");
2197 return -EINVAL;
2198
2199 case USB_ENDPOINT_XFER_BULK:
2200 epctrl |= S3C_DxEPCTL_EPType_Bulk;
2201 break;
2202
2203 case USB_ENDPOINT_XFER_INT:
2204 if (dir_in) {
2205 /* Allocate our TxFNum by simply using the index
2206 * of the endpoint for the moment. We could do
2207 * something better if the host indicates how
2208 * many FIFOs we are expecting to use. */
2209
2210 hs_ep->periodic = 1;
2211 epctrl |= S3C_DxEPCTL_TxFNum(index);
2212 }
2213
2214 epctrl |= S3C_DxEPCTL_EPType_Intterupt;
2215 break;
2216
2217 case USB_ENDPOINT_XFER_CONTROL:
2218 epctrl |= S3C_DxEPCTL_EPType_Control;
2219 break;
2220 }
2221
2222 /* for non control endpoints, set PID to D0 */
2223 if (index)
2224 epctrl |= S3C_DxEPCTL_SetD0PID;
2225
2226 dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
2227 __func__, epctrl);
2228
2229 writel(epctrl, hsotg->regs + epctrl_reg);
2230 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
2231 __func__, readl(hsotg->regs + epctrl_reg));
2232
2233 /* enable the endpoint interrupt */
2234 s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
2235
2236 spin_unlock_irqrestore(&hs_ep->lock, flags);
2237 return 0;
2238}
2239
2240static int s3c_hsotg_ep_disable(struct usb_ep *ep)
2241{
2242 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2243 struct s3c_hsotg *hsotg = hs_ep->parent;
2244 int dir_in = hs_ep->dir_in;
2245 int index = hs_ep->index;
2246 unsigned long flags;
2247 u32 epctrl_reg;
2248 u32 ctrl;
2249
2250 dev_info(hsotg->dev, "%s(ep %p)\n", __func__, ep);
2251
2252 if (ep == &hsotg->eps[0].ep) {
2253 dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
2254 return -EINVAL;
2255 }
2256
2257 epctrl_reg = dir_in ? S3C_DIEPCTL(index) : S3C_DOEPCTL(index);
2258
2259 /* terminate all requests with shutdown */
2260 kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);
2261
2262 spin_lock_irqsave(&hs_ep->lock, flags);
2263
2264 ctrl = readl(hsotg->regs + epctrl_reg);
2265 ctrl &= ~S3C_DxEPCTL_EPEna;
2266 ctrl &= ~S3C_DxEPCTL_USBActEp;
2267 ctrl |= S3C_DxEPCTL_SNAK;
2268
2269 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
2270 writel(ctrl, hsotg->regs + epctrl_reg);
2271
2272 /* disable endpoint interrupts */
2273 s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
2274
2275 spin_unlock_irqrestore(&hs_ep->lock, flags);
2276 return 0;
2277}
2278
2279/**
2280 * on_list - check request is on the given endpoint
2281 * @ep: The endpoint to check.
2282 * @test: The request to test if it is on the endpoint.
2283*/
2284static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test)
2285{
2286 struct s3c_hsotg_req *req, *treq;
2287
2288 list_for_each_entry_safe(req, treq, &ep->queue, queue) {
2289 if (req == test)
2290 return true;
2291 }
2292
2293 return false;
2294}
2295
2296static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2297{
2298 struct s3c_hsotg_req *hs_req = our_req(req);
2299 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2300 struct s3c_hsotg *hs = hs_ep->parent;
2301 unsigned long flags;
2302
2303 dev_info(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
2304
2305 if (hs_req == hs_ep->req) {
2306 dev_dbg(hs->dev, "%s: already in progress\n", __func__);
2307 return -EINPROGRESS;
2308 }
2309
2310 spin_lock_irqsave(&hs_ep->lock, flags);
2311
2312 if (!on_list(hs_ep, hs_req)) {
2313 spin_unlock_irqrestore(&hs_ep->lock, flags);
2314 return -EINVAL;
2315 }
2316
2317 s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
2318 spin_unlock_irqrestore(&hs_ep->lock, flags);
2319
2320 return 0;
2321}
2322
2323static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
2324{
2325 struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2326 struct s3c_hsotg *hs = hs_ep->parent;
2327 int index = hs_ep->index;
2328 unsigned long irqflags;
2329 u32 epreg;
2330 u32 epctl;
2331
2332 dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
2333
2334 spin_lock_irqsave(&hs_ep->lock, irqflags);
2335
2336 /* write both IN and OUT control registers */
2337
2338 epreg = S3C_DIEPCTL(index);
2339 epctl = readl(hs->regs + epreg);
2340
2341 if (value)
2342 epctl |= S3C_DxEPCTL_Stall;
2343 else
2344 epctl &= ~S3C_DxEPCTL_Stall;
2345
2346 writel(epctl, hs->regs + epreg);
2347
2348 epreg = S3C_DOEPCTL(index);
2349 epctl = readl(hs->regs + epreg);
2350
2351 if (value)
2352 epctl |= S3C_DxEPCTL_Stall;
2353 else
2354 epctl &= ~S3C_DxEPCTL_Stall;
2355
2356 writel(epctl, hs->regs + epreg);
2357
2358 spin_unlock_irqrestore(&hs_ep->lock, irqflags);
2359
2360 return 0;
2361}
2362
2363static struct usb_ep_ops s3c_hsotg_ep_ops = {
2364 .enable = s3c_hsotg_ep_enable,
2365 .disable = s3c_hsotg_ep_disable,
2366 .alloc_request = s3c_hsotg_ep_alloc_request,
2367 .free_request = s3c_hsotg_ep_free_request,
2368 .queue = s3c_hsotg_ep_queue,
2369 .dequeue = s3c_hsotg_ep_dequeue,
2370 .set_halt = s3c_hsotg_ep_sethalt,
2371 /* note, don't belive we have any call for the fifo routines */
2372};
2373
2374/**
2375 * s3c_hsotg_corereset - issue softreset to the core
2376 * @hsotg: The device state
2377 *
2378 * Issue a soft reset to the core, and await the core finishing it.
2379*/
2380static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
2381{
2382 int timeout;
2383 u32 grstctl;
2384
2385 dev_dbg(hsotg->dev, "resetting core\n");
2386
2387 /* issue soft reset */
2388 writel(S3C_GRSTCTL_CSftRst, hsotg->regs + S3C_GRSTCTL);
2389
2390 timeout = 1000;
2391 do {
2392 grstctl = readl(hsotg->regs + S3C_GRSTCTL);
2393 } while (!(grstctl & S3C_GRSTCTL_CSftRst) && timeout-- > 0);
2394
2395 if (!grstctl & S3C_GRSTCTL_CSftRst) {
2396 dev_err(hsotg->dev, "Failed to get CSftRst asserted\n");
2397 return -EINVAL;
2398 }
2399
2400 timeout = 1000;
2401
2402 while (1) {
2403 u32 grstctl = readl(hsotg->regs + S3C_GRSTCTL);
2404
2405 if (timeout-- < 0) {
2406 dev_info(hsotg->dev,
2407 "%s: reset failed, GRSTCTL=%08x\n",
2408 __func__, grstctl);
2409 return -ETIMEDOUT;
2410 }
2411
2412 if (grstctl & S3C_GRSTCTL_CSftRst)
2413 continue;
2414
2415 if (!(grstctl & S3C_GRSTCTL_AHBIdle))
2416 continue;
2417
2418 break; /* reset done */
2419 }
2420
2421 dev_dbg(hsotg->dev, "reset successful\n");
2422 return 0;
2423}
2424
2425int usb_gadget_register_driver(struct usb_gadget_driver *driver)
2426{
2427 struct s3c_hsotg *hsotg = our_hsotg;
2428 int ret;
2429
2430 if (!hsotg) {
2431 printk(KERN_ERR "%s: called with no device\n", __func__);
2432 return -ENODEV;
2433 }
2434
2435 if (!driver) {
2436 dev_err(hsotg->dev, "%s: no driver\n", __func__);
2437 return -EINVAL;
2438 }
2439
2440 if (driver->speed != USB_SPEED_HIGH &&
2441 driver->speed != USB_SPEED_FULL) {
2442 dev_err(hsotg->dev, "%s: bad speed\n", __func__);
2443 }
2444
2445 if (!driver->bind || !driver->setup) {
2446 dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
2447 return -EINVAL;
2448 }
2449
2450 WARN_ON(hsotg->driver);
2451
2452 driver->driver.bus = NULL;
2453 hsotg->driver = driver;
2454 hsotg->gadget.dev.driver = &driver->driver;
2455 hsotg->gadget.dev.dma_mask = hsotg->dev->dma_mask;
2456 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
2457
2458 ret = device_add(&hsotg->gadget.dev);
2459 if (ret) {
2460 dev_err(hsotg->dev, "failed to register gadget device\n");
2461 goto err;
2462 }
2463
2464 ret = driver->bind(&hsotg->gadget);
2465 if (ret) {
2466 dev_err(hsotg->dev, "failed bind %s\n", driver->driver.name);
2467
2468 hsotg->gadget.dev.driver = NULL;
2469 hsotg->driver = NULL;
2470 goto err;
2471 }
2472
2473 /* we must now enable ep0 ready for host detection and then
2474 * set configuration. */
2475
2476 s3c_hsotg_corereset(hsotg);
2477
2478 /* set the PLL on, remove the HNP/SRP and set the PHY */
2479 writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) |
2480 (0x5 << 10), hsotg->regs + S3C_GUSBCFG);
2481
2482 /* looks like soft-reset changes state of FIFOs */
2483 s3c_hsotg_init_fifo(hsotg);
2484
2485 __orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);
2486
2487 writel(1 << 18 | S3C_DCFG_DevSpd_HS, hsotg->regs + S3C_DCFG);
2488
2489 writel(S3C_GINTSTS_DisconnInt | S3C_GINTSTS_SessReqInt |
2490 S3C_GINTSTS_ConIDStsChng | S3C_GINTSTS_USBRst |
2491 S3C_GINTSTS_EnumDone | S3C_GINTSTS_OTGInt |
2492 S3C_GINTSTS_USBSusp | S3C_GINTSTS_WkUpInt |
2493 S3C_GINTSTS_GOUTNakEff | S3C_GINTSTS_GINNakEff |
2494 S3C_GINTSTS_ErlySusp,
2495 hsotg->regs + S3C_GINTMSK);
2496
2497 if (using_dma(hsotg))
2498 writel(S3C_GAHBCFG_GlblIntrEn | S3C_GAHBCFG_DMAEn |
2499 S3C_GAHBCFG_HBstLen_Incr4,
2500 hsotg->regs + S3C_GAHBCFG);
2501 else
2502 writel(S3C_GAHBCFG_GlblIntrEn, hsotg->regs + S3C_GAHBCFG);
2503
2504 /* Enabling INTknTXFEmpMsk here seems to be a big mistake, we end
2505 * up being flooded with interrupts if the host is polling the
2506 * endpoint to try and read data. */
2507
2508 writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |
2509 S3C_DIEPMSK_INTknEPMisMsk |
2510 S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk,
2511 hsotg->regs + S3C_DIEPMSK);
2512
2513 /* don't need XferCompl, we get that from RXFIFO in slave mode. In
2514 * DMA mode we may need this. */
2515 writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |
2516 S3C_DOEPMSK_EPDisbldMsk |
2517 using_dma(hsotg) ? (S3C_DIEPMSK_XferComplMsk |
2518 S3C_DIEPMSK_TimeOUTMsk) : 0,
2519 hsotg->regs + S3C_DOEPMSK);
2520
2521 writel(0, hsotg->regs + S3C_DAINTMSK);
2522
2523 dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2524 readl(hsotg->regs + S3C_DIEPCTL0),
2525 readl(hsotg->regs + S3C_DOEPCTL0));
2526
2527 /* enable in and out endpoint interrupts */
2528 s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt);
2529
2530 /* Enable the RXFIFO when in slave mode, as this is how we collect
2531 * the data. In DMA mode, we get events from the FIFO but also
2532 * things we cannot process, so do not use it. */
2533 if (!using_dma(hsotg))
2534 s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_RxFLvl);
2535
2536 /* Enable interrupts for EP0 in and out */
2537 s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1);
2538 s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1);
2539
2540 __orr32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);
2541 udelay(10); /* see openiboot */
2542 __bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);
2543
2544 dev_info(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + S3C_DCTL));
2545
2546 /* S3C_DxEPCTL_USBActEp says RO in manual, but seems to be set by
2547 writing to the EPCTL register.. */
2548
2549 /* set to read 1 8byte packet */
2550 writel(S3C_DxEPTSIZ_MC(1) | S3C_DxEPTSIZ_PktCnt(1) |
2551 S3C_DxEPTSIZ_XferSize(8), hsotg->regs + DOEPTSIZ0);
2552
2553 writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
2554 S3C_DxEPCTL_CNAK | S3C_DxEPCTL_EPEna |
2555 S3C_DxEPCTL_USBActEp,
2556 hsotg->regs + S3C_DOEPCTL0);
2557
2558 /* enable, but don't activate EP0in */
2559 writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
2560 S3C_DxEPCTL_USBActEp, hsotg->regs + S3C_DIEPCTL0);
2561
2562 s3c_hsotg_enqueue_setup(hsotg);
2563
2564 dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2565 readl(hsotg->regs + S3C_DIEPCTL0),
2566 readl(hsotg->regs + S3C_DOEPCTL0));
2567
2568 /* clear global NAKs */
2569 writel(S3C_DCTL_CGOUTNak | S3C_DCTL_CGNPInNAK,
2570 hsotg->regs + S3C_DCTL);
2571
2572 /* remove the soft-disconnect and let's go */
2573 __bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_SftDiscon);
2574
2575 /* report to the user, and return */
2576
2577 dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
2578 return 0;
2579
2580err:
2581 hsotg->driver = NULL;
2582 hsotg->gadget.dev.driver = NULL;
2583 return ret;
2584}
2585
2586int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2587{
2588 struct s3c_hsotg *hsotg = our_hsotg;
2589 int ep;
2590
2591 if (!hsotg)
2592 return -ENODEV;
2593
2594 if (!driver || driver != hsotg->driver || !driver->unbind)
2595 return -EINVAL;
2596
2597 /* all endpoints should be shutdown */
2598 for (ep = 0; ep < S3C_HSOTG_EPS; ep++)
2599 s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
2600
2601 call_gadget(hsotg, disconnect);
2602
2603 driver->unbind(&hsotg->gadget);
2604 hsotg->driver = NULL;
2605 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
2606
2607 device_del(&hsotg->gadget.dev);
2608
2609 dev_info(hsotg->dev, "unregistered gadget driver '%s'\n",
2610 driver->driver.name);
2611
2612 return 0;
2613}
2614EXPORT_SYMBOL(usb_gadget_unregister_driver);
2615
2616static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)
2617{
2618 return s3c_hsotg_read_frameno(to_hsotg(gadget));
2619}
2620
2621static struct usb_gadget_ops s3c_hsotg_gadget_ops = {
2622 .get_frame = s3c_hsotg_gadget_getframe,
2623};
2624
2625/**
2626 * s3c_hsotg_initep - initialise a single endpoint
2627 * @hsotg: The device state.
2628 * @hs_ep: The endpoint to be initialised.
2629 * @epnum: The endpoint number
2630 *
2631 * Initialise the given endpoint (as part of the probe and device state
2632 * creation) to give to the gadget driver. Setup the endpoint name, any
2633 * direction information and other state that may be required.
2634 */
2635static void __devinit s3c_hsotg_initep(struct s3c_hsotg *hsotg,
2636 struct s3c_hsotg_ep *hs_ep,
2637 int epnum)
2638{
2639 u32 ptxfifo;
2640 char *dir;
2641
2642 if (epnum == 0)
2643 dir = "";
2644 else if ((epnum % 2) == 0) {
2645 dir = "out";
2646 } else {
2647 dir = "in";
2648 hs_ep->dir_in = 1;
2649 }
2650
2651 hs_ep->index = epnum;
2652
2653 snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
2654
2655 INIT_LIST_HEAD(&hs_ep->queue);
2656 INIT_LIST_HEAD(&hs_ep->ep.ep_list);
2657
2658 spin_lock_init(&hs_ep->lock);
2659
2660 /* add to the list of endpoints known by the gadget driver */
2661 if (epnum)
2662 list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
2663
2664 hs_ep->parent = hsotg;
2665 hs_ep->ep.name = hs_ep->name;
2666 hs_ep->ep.maxpacket = epnum ? 512 : EP0_MPS_LIMIT;
2667 hs_ep->ep.ops = &s3c_hsotg_ep_ops;
2668
2669 /* Read the FIFO size for the Periodic TX FIFO, even if we're
2670 * an OUT endpoint, we may as well do this if in future the
2671 * code is changed to make each endpoint's direction changeable.
2672 */
2673
2674 ptxfifo = readl(hsotg->regs + S3C_DPTXFSIZn(epnum));
2675 hs_ep->fifo_size = S3C_DPTXFSIZn_DPTxFSize_GET(ptxfifo);
2676
2677 /* if we're using dma, we need to set the next-endpoint pointer
2678 * to be something valid.
2679 */
2680
2681 if (using_dma(hsotg)) {
2682 u32 next = S3C_DxEPCTL_NextEp((epnum + 1) % 15);
2683 writel(next, hsotg->regs + S3C_DIEPCTL(epnum));
2684 writel(next, hsotg->regs + S3C_DOEPCTL(epnum));
2685 }
2686}
2687
2688/**
2689 * s3c_hsotg_otgreset - reset the OtG phy block
2690 * @hsotg: The host state.
2691 *
2692 * Power up the phy, set the basic configuration and start the PHY.
2693 */
2694static void s3c_hsotg_otgreset(struct s3c_hsotg *hsotg)
2695{
2696 u32 osc;
2697
2698 writel(0, S3C_PHYPWR);
2699 mdelay(1);
2700
2701 osc = hsotg->plat->is_osc ? S3C_PHYCLK_EXT_OSC : 0;
2702
2703 writel(osc | 0x10, S3C_PHYCLK);
2704
2705 /* issue a full set of resets to the otg and core */
2706
2707 writel(S3C_RSTCON_PHY, S3C_RSTCON);
2708 udelay(20); /* at-least 10uS */
2709 writel(0, S3C_RSTCON);
2710}
2711
2712
2713static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
2714{
2715 /* unmask subset of endpoint interrupts */
2716
2717 writel(S3C_DIEPMSK_TimeOUTMsk | S3C_DIEPMSK_AHBErrMsk |
2718 S3C_DIEPMSK_EPDisbldMsk | S3C_DIEPMSK_XferComplMsk,
2719 hsotg->regs + S3C_DIEPMSK);
2720
2721 writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |
2722 S3C_DOEPMSK_EPDisbldMsk | S3C_DOEPMSK_XferComplMsk,
2723 hsotg->regs + S3C_DOEPMSK);
2724
2725 writel(0, hsotg->regs + S3C_DAINTMSK);
2726
2727 if (0) {
2728 /* post global nak until we're ready */
2729 writel(S3C_DCTL_SGNPInNAK | S3C_DCTL_SGOUTNak,
2730 hsotg->regs + S3C_DCTL);
2731 }
2732
2733 /* setup fifos */
2734
2735 dev_info(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
2736 readl(hsotg->regs + S3C_GRXFSIZ),
2737 readl(hsotg->regs + S3C_GNPTXFSIZ));
2738
2739 s3c_hsotg_init_fifo(hsotg);
2740
2741 /* set the PLL on, remove the HNP/SRP and set the PHY */
2742 writel(S3C_GUSBCFG_PHYIf16 | S3C_GUSBCFG_TOutCal(7) | (0x5 << 10),
2743 hsotg->regs + S3C_GUSBCFG);
2744
2745 writel(using_dma(hsotg) ? S3C_GAHBCFG_DMAEn : 0x0,
2746 hsotg->regs + S3C_GAHBCFG);
2747}
2748
2749static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
2750{
2751 struct device *dev = hsotg->dev;
2752 void __iomem *regs = hsotg->regs;
2753 u32 val;
2754 int idx;
2755
2756 dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
2757 readl(regs + S3C_DCFG), readl(regs + S3C_DCTL),
2758 readl(regs + S3C_DIEPMSK));
2759
2760 dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n",
2761 readl(regs + S3C_GAHBCFG), readl(regs + 0x44));
2762
2763 dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
2764 readl(regs + S3C_GRXFSIZ), readl(regs + S3C_GNPTXFSIZ));
2765
2766 /* show periodic fifo settings */
2767
2768 for (idx = 1; idx <= 15; idx++) {
2769 val = readl(regs + S3C_DPTXFSIZn(idx));
2770 dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
2771 val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT,
2772 val & S3C_DPTXFSIZn_DPTxFStAddr_MASK);
2773 }
2774
2775 for (idx = 0; idx < 15; idx++) {
2776 dev_info(dev,
2777 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
2778 readl(regs + S3C_DIEPCTL(idx)),
2779 readl(regs + S3C_DIEPTSIZ(idx)),
2780 readl(regs + S3C_DIEPDMA(idx)));
2781
2782 val = readl(regs + S3C_DOEPCTL(idx));
2783 dev_info(dev,
2784 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
2785 idx, readl(regs + S3C_DOEPCTL(idx)),
2786 readl(regs + S3C_DOEPTSIZ(idx)),
2787 readl(regs + S3C_DOEPDMA(idx)));
2788
2789 }
2790
2791 dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
2792 readl(regs + S3C_DVBUSDIS), readl(regs + S3C_DVBUSPULSE));
2793}
2794
2795
2796/**
2797 * state_show - debugfs: show overall driver and device state.
2798 * @seq: The seq file to write to.
2799 * @v: Unused parameter.
2800 *
2801 * This debugfs entry shows the overall state of the hardware and
2802 * some general information about each of the endpoints available
2803 * to the system.
2804 */
2805static int state_show(struct seq_file *seq, void *v)
2806{
2807 struct s3c_hsotg *hsotg = seq->private;
2808 void __iomem *regs = hsotg->regs;
2809 int idx;
2810
2811 seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
2812 readl(regs + S3C_DCFG),
2813 readl(regs + S3C_DCTL),
2814 readl(regs + S3C_DSTS));
2815
2816 seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
2817 readl(regs + S3C_DIEPMSK), readl(regs + S3C_DOEPMSK));
2818
2819 seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
2820 readl(regs + S3C_GINTMSK),
2821 readl(regs + S3C_GINTSTS));
2822
2823 seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
2824 readl(regs + S3C_DAINTMSK),
2825 readl(regs + S3C_DAINT));
2826
2827 seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
2828 readl(regs + S3C_GNPTXSTS),
2829 readl(regs + S3C_GRXSTSR));
2830
2831 seq_printf(seq, "\nEndpoint status:\n");
2832
2833 for (idx = 0; idx < 15; idx++) {
2834 u32 in, out;
2835
2836 in = readl(regs + S3C_DIEPCTL(idx));
2837 out = readl(regs + S3C_DOEPCTL(idx));
2838
2839 seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
2840 idx, in, out);
2841
2842 in = readl(regs + S3C_DIEPTSIZ(idx));
2843 out = readl(regs + S3C_DOEPTSIZ(idx));
2844
2845 seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
2846 in, out);
2847
2848 seq_printf(seq, "\n");
2849 }
2850
2851 return 0;
2852}
2853
2854static int state_open(struct inode *inode, struct file *file)
2855{
2856 return single_open(file, state_show, inode->i_private);
2857}
2858
2859static const struct file_operations state_fops = {
2860 .owner = THIS_MODULE,
2861 .open = state_open,
2862 .read = seq_read,
2863 .llseek = seq_lseek,
2864 .release = single_release,
2865};
2866
2867/**
2868 * fifo_show - debugfs: show the fifo information
2869 * @seq: The seq_file to write data to.
2870 * @v: Unused parameter.
2871 *
2872 * Show the FIFO information for the overall fifo and all the
2873 * periodic transmission FIFOs.
2874*/
2875static int fifo_show(struct seq_file *seq, void *v)
2876{
2877 struct s3c_hsotg *hsotg = seq->private;
2878 void __iomem *regs = hsotg->regs;
2879 u32 val;
2880 int idx;
2881
2882 seq_printf(seq, "Non-periodic FIFOs:\n");
2883 seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + S3C_GRXFSIZ));
2884
2885 val = readl(regs + S3C_GNPTXFSIZ);
2886 seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
2887 val >> S3C_GNPTXFSIZ_NPTxFDep_SHIFT,
2888 val & S3C_GNPTXFSIZ_NPTxFStAddr_MASK);
2889
2890 seq_printf(seq, "\nPeriodic TXFIFOs:\n");
2891
2892 for (idx = 1; idx <= 15; idx++) {
2893 val = readl(regs + S3C_DPTXFSIZn(idx));
2894
2895 seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
2896 val >> S3C_DPTXFSIZn_DPTxFSize_SHIFT,
2897 val & S3C_DPTXFSIZn_DPTxFStAddr_MASK);
2898 }
2899
2900 return 0;
2901}
2902
2903static int fifo_open(struct inode *inode, struct file *file)
2904{
2905 return single_open(file, fifo_show, inode->i_private);
2906}
2907
2908static const struct file_operations fifo_fops = {
2909 .owner = THIS_MODULE,
2910 .open = fifo_open,
2911 .read = seq_read,
2912 .llseek = seq_lseek,
2913 .release = single_release,
2914};
2915
2916
2917static const char *decode_direction(int is_in)
2918{
2919 return is_in ? "in" : "out";
2920}
2921
2922/**
2923 * ep_show - debugfs: show the state of an endpoint.
2924 * @seq: The seq_file to write data to.
2925 * @v: Unused parameter.
2926 *
2927 * This debugfs entry shows the state of the given endpoint (one is
2928 * registered for each available).
2929*/
2930static int ep_show(struct seq_file *seq, void *v)
2931{
2932 struct s3c_hsotg_ep *ep = seq->private;
2933 struct s3c_hsotg *hsotg = ep->parent;
2934 struct s3c_hsotg_req *req;
2935 void __iomem *regs = hsotg->regs;
2936 int index = ep->index;
2937 int show_limit = 15;
2938 unsigned long flags;
2939
2940 seq_printf(seq, "Endpoint index %d, named %s, dir %s:\n",
2941 ep->index, ep->ep.name, decode_direction(ep->dir_in));
2942
2943 /* first show the register state */
2944
2945 seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
2946 readl(regs + S3C_DIEPCTL(index)),
2947 readl(regs + S3C_DOEPCTL(index)));
2948
2949 seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
2950 readl(regs + S3C_DIEPDMA(index)),
2951 readl(regs + S3C_DOEPDMA(index)));
2952
2953 seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
2954 readl(regs + S3C_DIEPINT(index)),
2955 readl(regs + S3C_DOEPINT(index)));
2956
2957 seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
2958 readl(regs + S3C_DIEPTSIZ(index)),
2959 readl(regs + S3C_DOEPTSIZ(index)));
2960
2961 seq_printf(seq, "\n");
2962 seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
2963 seq_printf(seq, "total_data=%ld\n", ep->total_data);
2964
2965 seq_printf(seq, "request list (%p,%p):\n",
2966 ep->queue.next, ep->queue.prev);
2967
2968 spin_lock_irqsave(&ep->lock, flags);
2969
2970 list_for_each_entry(req, &ep->queue, queue) {
2971 if (--show_limit < 0) {
2972 seq_printf(seq, "not showing more requests...\n");
2973 break;
2974 }
2975
2976 seq_printf(seq, "%c req %p: %d bytes @%p, ",
2977 req == ep->req ? '*' : ' ',
2978 req, req->req.length, req->req.buf);
2979 seq_printf(seq, "%d done, res %d\n",
2980 req->req.actual, req->req.status);
2981 }
2982
2983 spin_unlock_irqrestore(&ep->lock, flags);
2984
2985 return 0;
2986}
2987
2988static int ep_open(struct inode *inode, struct file *file)
2989{
2990 return single_open(file, ep_show, inode->i_private);
2991}
2992
2993static const struct file_operations ep_fops = {
2994 .owner = THIS_MODULE,
2995 .open = ep_open,
2996 .read = seq_read,
2997 .llseek = seq_lseek,
2998 .release = single_release,
2999};
3000
3001/**
3002 * s3c_hsotg_create_debug - create debugfs directory and files
3003 * @hsotg: The driver state
3004 *
3005 * Create the debugfs files to allow the user to get information
3006 * about the state of the system. The directory name is created
3007 * with the same name as the device itself, in case we end up
3008 * with multiple blocks in future systems.
3009*/
3010static void __devinit s3c_hsotg_create_debug(struct s3c_hsotg *hsotg)
3011{
3012 struct dentry *root;
3013 unsigned epidx;
3014
3015 root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
3016 hsotg->debug_root = root;
3017 if (IS_ERR(root)) {
3018 dev_err(hsotg->dev, "cannot create debug root\n");
3019 return;
3020 }
3021
3022 /* create general state file */
3023
3024 hsotg->debug_file = debugfs_create_file("state", 0444, root,
3025 hsotg, &state_fops);
3026
3027 if (IS_ERR(hsotg->debug_file))
3028 dev_err(hsotg->dev, "%s: failed to create state\n", __func__);
3029
3030 hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root,
3031 hsotg, &fifo_fops);
3032
3033 if (IS_ERR(hsotg->debug_fifo))
3034 dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__);
3035
3036 /* create one file for each endpoint */
3037
3038 for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) {
3039 struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
3040
3041 ep->debugfs = debugfs_create_file(ep->name, 0444,
3042 root, ep, &ep_fops);
3043
3044 if (IS_ERR(ep->debugfs))
3045 dev_err(hsotg->dev, "failed to create %s debug file\n",
3046 ep->name);
3047 }
3048}
3049
3050/**
3051 * s3c_hsotg_delete_debug - cleanup debugfs entries
3052 * @hsotg: The driver state
3053 *
3054 * Cleanup (remove) the debugfs files for use on module exit.
3055*/
3056static void __devexit s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
3057{
3058 unsigned epidx;
3059
3060 for (epidx = 0; epidx < S3C_HSOTG_EPS; epidx++) {
3061 struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
3062 debugfs_remove(ep->debugfs);
3063 }
3064
3065 debugfs_remove(hsotg->debug_file);
3066 debugfs_remove(hsotg->debug_fifo);
3067 debugfs_remove(hsotg->debug_root);
3068}
3069
3070/**
3071 * s3c_hsotg_gate - set the hardware gate for the block
3072 * @pdev: The device we bound to
3073 * @on: On or off.
3074 *
3075 * Set the hardware gate setting into the block. If we end up on
3076 * something other than an S3C64XX, then we might need to change this
3077 * to using a platform data callback, or some other mechanism.
3078 */
3079static void s3c_hsotg_gate(struct platform_device *pdev, bool on)
3080{
3081 unsigned long flags;
3082 u32 others;
3083
3084 local_irq_save(flags);
3085
3086 others = __raw_readl(S3C64XX_OTHERS);
3087 if (on)
3088 others |= S3C64XX_OTHERS_USBMASK;
3089 else
3090 others &= ~S3C64XX_OTHERS_USBMASK;
3091 __raw_writel(others, S3C64XX_OTHERS);
3092
3093 local_irq_restore(flags);
3094}
3095
3096struct s3c_hsotg_plat s3c_hsotg_default_pdata;
3097
3098static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
3099{
3100 struct s3c_hsotg_plat *plat = pdev->dev.platform_data;
3101 struct device *dev = &pdev->dev;
3102 struct s3c_hsotg *hsotg;
3103 struct resource *res;
3104 int epnum;
3105 int ret;
3106
3107 if (!plat)
3108 plat = &s3c_hsotg_default_pdata;
3109
3110 hsotg = kzalloc(sizeof(struct s3c_hsotg) +
3111 sizeof(struct s3c_hsotg_ep) * S3C_HSOTG_EPS,
3112 GFP_KERNEL);
3113 if (!hsotg) {
3114 dev_err(dev, "cannot get memory\n");
3115 return -ENOMEM;
3116 }
3117
3118 hsotg->dev = dev;
3119 hsotg->plat = plat;
3120
3121 platform_set_drvdata(pdev, hsotg);
3122
3123 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3124 if (!res) {
3125 dev_err(dev, "cannot find register resource 0\n");
3126 ret = -EINVAL;
3127 goto err_mem;
3128 }
3129
3130 hsotg->regs_res = request_mem_region(res->start, resource_size(res),
3131 dev_name(dev));
3132 if (!hsotg->regs_res) {
3133 dev_err(dev, "cannot reserve registers\n");
3134 ret = -ENOENT;
3135 goto err_mem;
3136 }
3137
3138 hsotg->regs = ioremap(res->start, resource_size(res));
3139 if (!hsotg->regs) {
3140 dev_err(dev, "cannot map registers\n");
3141 ret = -ENXIO;
3142 goto err_regs_res;
3143 }
3144
3145 ret = platform_get_irq(pdev, 0);
3146 if (ret < 0) {
3147 dev_err(dev, "cannot find IRQ\n");
3148 goto err_regs;
3149 }
3150
3151 hsotg->irq = ret;
3152
3153 ret = request_irq(ret, s3c_hsotg_irq, 0, dev_name(dev), hsotg);
3154 if (ret < 0) {
3155 dev_err(dev, "cannot claim IRQ\n");
3156 goto err_regs;
3157 }
3158
3159 dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);
3160
3161 device_initialize(&hsotg->gadget.dev);
3162
3163 dev_set_name(&hsotg->gadget.dev, "gadget");
3164
3165 hsotg->gadget.is_dualspeed = 1;
3166 hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
3167 hsotg->gadget.name = dev_name(dev);
3168
3169 hsotg->gadget.dev.parent = dev;
3170 hsotg->gadget.dev.dma_mask = dev->dma_mask;
3171
3172 /* setup endpoint information */
3173
3174 INIT_LIST_HEAD(&hsotg->gadget.ep_list);
3175 hsotg->gadget.ep0 = &hsotg->eps[0].ep;
3176
3177 /* allocate EP0 request */
3178
3179 hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep,
3180 GFP_KERNEL);
3181 if (!hsotg->ctrl_req) {
3182 dev_err(dev, "failed to allocate ctrl req\n");
3183 goto err_regs;
3184 }
3185
3186 /* reset the system */
3187
3188 s3c_hsotg_gate(pdev, true);
3189
3190 s3c_hsotg_otgreset(hsotg);
3191 s3c_hsotg_corereset(hsotg);
3192 s3c_hsotg_init(hsotg);
3193
3194 /* initialise the endpoints now the core has been initialised */
3195 for (epnum = 0; epnum < S3C_HSOTG_EPS; epnum++)
3196 s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum);
3197
3198 s3c_hsotg_create_debug(hsotg);
3199
3200 s3c_hsotg_dump(hsotg);
3201
3202 our_hsotg = hsotg;
3203 return 0;
3204
3205err_regs:
3206 iounmap(hsotg->regs);
3207
3208err_regs_res:
3209 release_resource(hsotg->regs_res);
3210 kfree(hsotg->regs_res);
3211
3212err_mem:
3213 kfree(hsotg);
3214 return ret;
3215}
3216
3217static int __devexit s3c_hsotg_remove(struct platform_device *pdev)
3218{
3219 struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
3220
3221 s3c_hsotg_delete_debug(hsotg);
3222
3223 usb_gadget_unregister_driver(hsotg->driver);
3224
3225 free_irq(hsotg->irq, hsotg);
3226 iounmap(hsotg->regs);
3227
3228 release_resource(hsotg->regs_res);
3229 kfree(hsotg->regs_res);
3230
3231 s3c_hsotg_gate(pdev, false);
3232
3233 kfree(hsotg);
3234 return 0;
3235}
3236
3237#if 1
3238#define s3c_hsotg_suspend NULL
3239#define s3c_hsotg_resume NULL
3240#endif
3241
3242static struct platform_driver s3c_hsotg_driver = {
3243 .driver = {
3244 .name = "s3c-hsotg",
3245 .owner = THIS_MODULE,
3246 },
3247 .probe = s3c_hsotg_probe,
3248 .remove = __devexit_p(s3c_hsotg_remove),
3249 .suspend = s3c_hsotg_suspend,
3250 .resume = s3c_hsotg_resume,
3251};
3252
3253static int __init s3c_hsotg_modinit(void)
3254{
3255 return platform_driver_register(&s3c_hsotg_driver);
3256}
3257
3258static void __exit s3c_hsotg_modexit(void)
3259{
3260 platform_driver_unregister(&s3c_hsotg_driver);
3261}
3262
3263module_init(s3c_hsotg_modinit);
3264module_exit(s3c_hsotg_modexit);
3265
3266MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device");
3267MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
3268MODULE_LICENSE("GPL");
3269MODULE_ALIAS("platform:s3c-hsotg");
diff --git a/drivers/usb/gadget/u_audio.c b/drivers/usb/gadget/u_audio.c
new file mode 100644
index 000000000000..0f3d22fc030e
--- /dev/null
+++ b/drivers/usb/gadget/u_audio.c
@@ -0,0 +1,319 @@
1/*
2 * u_audio.c -- ALSA audio utilities for Gadget stack
3 *
4 * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
5 * Copyright (C) 2008 Analog Devices, Inc
6 *
7 * Enter bugs at http://blackfin.uclinux.org/
8 *
9 * Licensed under the GPL-2 or later.
10 */
11
12#include <linux/kernel.h>
13#include <linux/utsname.h>
14#include <linux/device.h>
15#include <linux/delay.h>
16#include <linux/ctype.h>
17#include <linux/random.h>
18#include <linux/syscalls.h>
19
20#include "u_audio.h"
21
22/*
23 * This component encapsulates the ALSA devices for USB audio gadget
24 */
25
26#define FILE_PCM_PLAYBACK "/dev/snd/pcmC0D0p"
27#define FILE_PCM_CAPTURE "/dev/snd/pcmC0D0c"
28#define FILE_CONTROL "/dev/snd/controlC0"
29
30static char *fn_play = FILE_PCM_PLAYBACK;
31module_param(fn_play, charp, S_IRUGO);
32MODULE_PARM_DESC(fn_play, "Playback PCM device file name");
33
34static char *fn_cap = FILE_PCM_CAPTURE;
35module_param(fn_cap, charp, S_IRUGO);
36MODULE_PARM_DESC(fn_cap, "Capture PCM device file name");
37
38static char *fn_cntl = FILE_CONTROL;
39module_param(fn_cntl, charp, S_IRUGO);
40MODULE_PARM_DESC(fn_cntl, "Control device file name");
41
42/*-------------------------------------------------------------------------*/
43
44/**
45 * Some ALSA internal helper functions
46 */
47static int snd_interval_refine_set(struct snd_interval *i, unsigned int val)
48{
49 struct snd_interval t;
50 t.empty = 0;
51 t.min = t.max = val;
52 t.openmin = t.openmax = 0;
53 t.integer = 1;
54 return snd_interval_refine(i, &t);
55}
56
57static int _snd_pcm_hw_param_set(struct snd_pcm_hw_params *params,
58 snd_pcm_hw_param_t var, unsigned int val,
59 int dir)
60{
61 int changed;
62 if (hw_is_mask(var)) {
63 struct snd_mask *m = hw_param_mask(params, var);
64 if (val == 0 && dir < 0) {
65 changed = -EINVAL;
66 snd_mask_none(m);
67 } else {
68 if (dir > 0)
69 val++;
70 else if (dir < 0)
71 val--;
72 changed = snd_mask_refine_set(
73 hw_param_mask(params, var), val);
74 }
75 } else if (hw_is_interval(var)) {
76 struct snd_interval *i = hw_param_interval(params, var);
77 if (val == 0 && dir < 0) {
78 changed = -EINVAL;
79 snd_interval_none(i);
80 } else if (dir == 0)
81 changed = snd_interval_refine_set(i, val);
82 else {
83 struct snd_interval t;
84 t.openmin = 1;
85 t.openmax = 1;
86 t.empty = 0;
87 t.integer = 0;
88 if (dir < 0) {
89 t.min = val - 1;
90 t.max = val;
91 } else {
92 t.min = val;
93 t.max = val+1;
94 }
95 changed = snd_interval_refine(i, &t);
96 }
97 } else
98 return -EINVAL;
99 if (changed) {
100 params->cmask |= 1 << var;
101 params->rmask |= 1 << var;
102 }
103 return changed;
104}
105/*-------------------------------------------------------------------------*/
106
107/**
108 * Set default hardware params
109 */
110static int playback_default_hw_params(struct gaudio_snd_dev *snd)
111{
112 struct snd_pcm_substream *substream = snd->substream;
113 struct snd_pcm_hw_params *params;
114 snd_pcm_sframes_t result;
115
116 /*
117 * SNDRV_PCM_ACCESS_RW_INTERLEAVED,
118 * SNDRV_PCM_FORMAT_S16_LE
119 * CHANNELS: 2
120 * RATE: 48000
121 */
122 snd->access = SNDRV_PCM_ACCESS_RW_INTERLEAVED;
123 snd->format = SNDRV_PCM_FORMAT_S16_LE;
124 snd->channels = 2;
125 snd->rate = 48000;
126
127 params = kzalloc(sizeof(*params), GFP_KERNEL);
128 if (!params)
129 return -ENOMEM;
130
131 _snd_pcm_hw_params_any(params);
132 _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_ACCESS,
133 snd->access, 0);
134 _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_FORMAT,
135 snd->format, 0);
136 _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_CHANNELS,
137 snd->channels, 0);
138 _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_RATE,
139 snd->rate, 0);
140
141 snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
142 snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, params);
143
144 result = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_PREPARE, NULL);
145 if (result < 0) {
146 ERROR(snd->card,
147 "Preparing sound card failed: %d\n", (int)result);
148 kfree(params);
149 return result;
150 }
151
152 /* Store the hardware parameters */
153 snd->access = params_access(params);
154 snd->format = params_format(params);
155 snd->channels = params_channels(params);
156 snd->rate = params_rate(params);
157
158 kfree(params);
159
160 INFO(snd->card,
161 "Hardware params: access %x, format %x, channels %d, rate %d\n",
162 snd->access, snd->format, snd->channels, snd->rate);
163
164 return 0;
165}
166
167/**
168 * Playback audio buffer data by ALSA PCM device
169 */
170static size_t u_audio_playback(struct gaudio *card, void *buf, size_t count)
171{
172 struct gaudio_snd_dev *snd = &card->playback;
173 struct snd_pcm_substream *substream = snd->substream;
174 struct snd_pcm_runtime *runtime = substream->runtime;
175 mm_segment_t old_fs;
176 ssize_t result;
177 snd_pcm_sframes_t frames;
178
179try_again:
180 if (runtime->status->state == SNDRV_PCM_STATE_XRUN ||
181 runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
182 result = snd_pcm_kernel_ioctl(substream,
183 SNDRV_PCM_IOCTL_PREPARE, NULL);
184 if (result < 0) {
185 ERROR(card, "Preparing sound card failed: %d\n",
186 (int)result);
187 return result;
188 }
189 }
190
191 frames = bytes_to_frames(runtime, count);
192 old_fs = get_fs();
193 set_fs(KERNEL_DS);
194 result = snd_pcm_lib_write(snd->substream, buf, frames);
195 if (result != frames) {
196 ERROR(card, "Playback error: %d\n", (int)result);
197 set_fs(old_fs);
198 goto try_again;
199 }
200 set_fs(old_fs);
201
202 return 0;
203}
204
205static int u_audio_get_playback_channels(struct gaudio *card)
206{
207 return card->playback.channels;
208}
209
210static int u_audio_get_playback_rate(struct gaudio *card)
211{
212 return card->playback.rate;
213}
214
215/**
216 * Open ALSA PCM and control device files
217 * Initial the PCM or control device
218 */
219static int gaudio_open_snd_dev(struct gaudio *card)
220{
221 struct snd_pcm_file *pcm_file;
222 struct gaudio_snd_dev *snd;
223
224 if (!card)
225 return -ENODEV;
226
227 /* Open control device */
228 snd = &card->control;
229 snd->filp = filp_open(fn_cntl, O_RDWR, 0);
230 if (IS_ERR(snd->filp)) {
231 int ret = PTR_ERR(snd->filp);
232 ERROR(card, "unable to open sound control device file: %s\n",
233 fn_cntl);
234 snd->filp = NULL;
235 return ret;
236 }
237 snd->card = card;
238
239 /* Open PCM playback device and setup substream */
240 snd = &card->playback;
241 snd->filp = filp_open(fn_play, O_WRONLY, 0);
242 if (IS_ERR(snd->filp)) {
243 ERROR(card, "No such PCM playback device: %s\n", fn_play);
244 snd->filp = NULL;
245 }
246 pcm_file = snd->filp->private_data;
247 snd->substream = pcm_file->substream;
248 snd->card = card;
249 playback_default_hw_params(snd);
250
251 /* Open PCM capture device and setup substream */
252 snd = &card->capture;
253 snd->filp = filp_open(fn_cap, O_RDONLY, 0);
254 if (IS_ERR(snd->filp)) {
255 ERROR(card, "No such PCM capture device: %s\n", fn_cap);
256 snd->filp = NULL;
257 }
258 pcm_file = snd->filp->private_data;
259 snd->substream = pcm_file->substream;
260 snd->card = card;
261
262 return 0;
263}
264
265/**
266 * Close ALSA PCM and control device files
267 */
268static int gaudio_close_snd_dev(struct gaudio *gau)
269{
270 struct gaudio_snd_dev *snd;
271
272 /* Close control device */
273 snd = &gau->control;
274 if (!IS_ERR(snd->filp))
275 filp_close(snd->filp, current->files);
276
277 /* Close PCM playback device and setup substream */
278 snd = &gau->playback;
279 if (!IS_ERR(snd->filp))
280 filp_close(snd->filp, current->files);
281
282 /* Close PCM capture device and setup substream */
283 snd = &gau->capture;
284 if (!IS_ERR(snd->filp))
285 filp_close(snd->filp, current->files);
286
287 return 0;
288}
289
290/**
291 * gaudio_setup - setup ALSA interface and preparing for USB transfer
292 *
293 * This sets up PCM, mixer or MIDI ALSA devices fore USB gadget using.
294 *
295 * Returns negative errno, or zero on success
296 */
297int __init gaudio_setup(struct gaudio *card)
298{
299 int ret;
300
301 ret = gaudio_open_snd_dev(card);
302 if (ret)
303 ERROR(card, "we need at least one control device\n");
304
305 return ret;
306
307}
308
309/**
310 * gaudio_cleanup - remove ALSA device interface
311 *
312 * This is called to free all resources allocated by @gaudio_setup().
313 */
314void gaudio_cleanup(struct gaudio *card)
315{
316 if (card)
317 gaudio_close_snd_dev(card);
318}
319
diff --git a/drivers/usb/gadget/u_audio.h b/drivers/usb/gadget/u_audio.h
new file mode 100644
index 000000000000..cc8d159c648a
--- /dev/null
+++ b/drivers/usb/gadget/u_audio.h
@@ -0,0 +1,56 @@
1/*
2 * u_audio.h -- interface to USB gadget "ALSA AUDIO" utilities
3 *
4 * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
5 * Copyright (C) 2008 Analog Devices, Inc
6 *
7 * Enter bugs at http://blackfin.uclinux.org/
8 *
9 * Licensed under the GPL-2 or later.
10 */
11
12#ifndef __U_AUDIO_H
13#define __U_AUDIO_H
14
15#include <linux/device.h>
16#include <linux/err.h>
17#include <linux/usb/audio.h>
18#include <linux/usb/composite.h>
19
20#include <sound/core.h>
21#include <sound/pcm.h>
22#include <sound/pcm_params.h>
23
24#include "gadget_chips.h"
25
26/*
27 * This represents the USB side of an audio card device, managed by a USB
28 * function which provides control and stream interfaces.
29 */
30
31struct gaudio_snd_dev {
32 struct gaudio *card;
33 struct file *filp;
34 struct snd_pcm_substream *substream;
35 int access;
36 int format;
37 int channels;
38 int rate;
39};
40
41struct gaudio {
42 struct usb_function func;
43 struct usb_gadget *gadget;
44
45 /* ALSA sound device interfaces */
46 struct gaudio_snd_dev control;
47 struct gaudio_snd_dev playback;
48 struct gaudio_snd_dev capture;
49
50 /* TODO */
51};
52
53int gaudio_setup(struct gaudio *card);
54void gaudio_cleanup(struct gaudio *card);
55
56#endif /* __U_AUDIO_H */
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 0a4d99ab40d8..fc6e709f45b1 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -371,6 +371,7 @@ __acquires(&port->port_lock)
371 371
372 req->length = len; 372 req->length = len;
373 list_del(&req->list); 373 list_del(&req->list);
374 req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
374 375
375 pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n", 376 pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
376 port->port_num, len, *((u8 *)req->buf), 377 port->port_num, len, *((u8 *)req->buf),
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 845479f7c707..1576a0520adf 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -17,6 +17,26 @@ config USB_C67X00_HCD
17 To compile this driver as a module, choose M here: the 17 To compile this driver as a module, choose M here: the
18 module will be called c67x00. 18 module will be called c67x00.
19 19
20config USB_XHCI_HCD
21 tristate "xHCI HCD (USB 3.0) support (EXPERIMENTAL)"
22 depends on USB && PCI && EXPERIMENTAL
23 ---help---
24 The eXtensible Host Controller Interface (xHCI) is standard for USB 3.0
25 "SuperSpeed" host controller hardware.
26
27 To compile this driver as a module, choose M here: the
28 module will be called xhci-hcd.
29
30config USB_XHCI_HCD_DEBUGGING
31 bool "Debugging for the xHCI host controller"
32 depends on USB_XHCI_HCD
33 ---help---
34 Say 'Y' to turn on debugging for the xHCI host controller driver.
35 This will spew debugging output, even in interrupt context.
36 This should only be used for debugging xHCI driver bugs.
37
38 If unsure, say N.
39
20config USB_EHCI_HCD 40config USB_EHCI_HCD
21 tristate "EHCI HCD (USB 2.0) support" 41 tristate "EHCI HCD (USB 2.0) support"
22 depends on USB && USB_ARCH_HAS_EHCI 42 depends on USB && USB_ARCH_HAS_EHCI
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f163571e33d8..289d748bb414 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -12,6 +12,7 @@ fhci-objs := fhci-hcd.o fhci-hub.o fhci-q.o fhci-mem.o \
12ifeq ($(CONFIG_FHCI_DEBUG),y) 12ifeq ($(CONFIG_FHCI_DEBUG),y)
13fhci-objs += fhci-dbg.o 13fhci-objs += fhci-dbg.o
14endif 14endif
15xhci-objs := xhci-hcd.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o
15 16
16obj-$(CONFIG_USB_WHCI_HCD) += whci/ 17obj-$(CONFIG_USB_WHCI_HCD) += whci/
17 18
@@ -23,6 +24,7 @@ obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
23obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o 24obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
24obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o 25obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
25obj-$(CONFIG_USB_FHCI_HCD) += fhci.o 26obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
27obj-$(CONFIG_USB_XHCI_HCD) += xhci.o
26obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o 28obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
27obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o 29obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
28obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o 30obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index bf69f4739107..c3a778bd359c 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -97,6 +97,7 @@ static const struct hc_driver ehci_au1xxx_hc_driver = {
97 .urb_enqueue = ehci_urb_enqueue, 97 .urb_enqueue = ehci_urb_enqueue,
98 .urb_dequeue = ehci_urb_dequeue, 98 .urb_dequeue = ehci_urb_dequeue,
99 .endpoint_disable = ehci_endpoint_disable, 99 .endpoint_disable = ehci_endpoint_disable,
100 .endpoint_reset = ehci_endpoint_reset,
100 101
101 /* 102 /*
102 * scheduling support 103 * scheduling support
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 01c3da34f678..bf86809c5120 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -309,6 +309,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
309 .urb_enqueue = ehci_urb_enqueue, 309 .urb_enqueue = ehci_urb_enqueue,
310 .urb_dequeue = ehci_urb_dequeue, 310 .urb_dequeue = ehci_urb_dequeue,
311 .endpoint_disable = ehci_endpoint_disable, 311 .endpoint_disable = ehci_endpoint_disable,
312 .endpoint_reset = ehci_endpoint_reset,
312 313
313 /* 314 /*
314 * scheduling support 315 * scheduling support
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index c637207a1c80..2b72473544d3 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1024,6 +1024,51 @@ done:
1024 return; 1024 return;
1025} 1025}
1026 1026
1027static void
1028ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
1029{
1030 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
1031 struct ehci_qh *qh;
1032 int eptype = usb_endpoint_type(&ep->desc);
1033
1034 if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
1035 return;
1036
1037 rescan:
1038 spin_lock_irq(&ehci->lock);
1039 qh = ep->hcpriv;
1040
1041 /* For Bulk and Interrupt endpoints we maintain the toggle state
1042 * in the hardware; the toggle bits in udev aren't used at all.
1043 * When an endpoint is reset by usb_clear_halt() we must reset
1044 * the toggle bit in the QH.
1045 */
1046 if (qh) {
1047 if (!list_empty(&qh->qtd_list)) {
1048 WARN_ONCE(1, "clear_halt for a busy endpoint\n");
1049 } else if (qh->qh_state == QH_STATE_IDLE) {
1050 qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
1051 } else {
1052 /* It's not safe to write into the overlay area
1053 * while the QH is active. Unlink it first and
1054 * wait for the unlink to complete.
1055 */
1056 if (qh->qh_state == QH_STATE_LINKED) {
1057 if (eptype == USB_ENDPOINT_XFER_BULK) {
1058 unlink_async(ehci, qh);
1059 } else {
1060 intr_deschedule(ehci, qh);
1061 (void) qh_schedule(ehci, qh);
1062 }
1063 }
1064 spin_unlock_irq(&ehci->lock);
1065 schedule_timeout_uninterruptible(1);
1066 goto rescan;
1067 }
1068 }
1069 spin_unlock_irq(&ehci->lock);
1070}
1071
1027static int ehci_get_frame (struct usb_hcd *hcd) 1072static int ehci_get_frame (struct usb_hcd *hcd)
1028{ 1073{
1029 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 1074 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
@@ -1097,7 +1142,7 @@ static int __init ehci_hcd_init(void)
1097 sizeof(struct ehci_itd), sizeof(struct ehci_sitd)); 1142 sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
1098 1143
1099#ifdef DEBUG 1144#ifdef DEBUG
1100 ehci_debug_root = debugfs_create_dir("ehci", NULL); 1145 ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
1101 if (!ehci_debug_root) { 1146 if (!ehci_debug_root) {
1102 retval = -ENOENT; 1147 retval = -ENOENT;
1103 goto err_debug; 1148 goto err_debug;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 97a53a48a3d8..f46ad27c9a90 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -391,7 +391,7 @@ static inline void create_companion_file(struct ehci_hcd *ehci)
391 391
392 /* with integrated TT there is no companion! */ 392 /* with integrated TT there is no companion! */
393 if (!ehci_is_TDI(ehci)) 393 if (!ehci_is_TDI(ehci))
394 i = device_create_file(ehci_to_hcd(ehci)->self.dev, 394 i = device_create_file(ehci_to_hcd(ehci)->self.controller,
395 &dev_attr_companion); 395 &dev_attr_companion);
396} 396}
397 397
@@ -399,7 +399,7 @@ static inline void remove_companion_file(struct ehci_hcd *ehci)
399{ 399{
400 /* with integrated TT there is no companion! */ 400 /* with integrated TT there is no companion! */
401 if (!ehci_is_TDI(ehci)) 401 if (!ehci_is_TDI(ehci))
402 device_remove_file(ehci_to_hcd(ehci)->self.dev, 402 device_remove_file(ehci_to_hcd(ehci)->self.controller,
403 &dev_attr_companion); 403 &dev_attr_companion);
404} 404}
405 405
diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c
index 9c32063a0c2f..a44bb4a94954 100644
--- a/drivers/usb/host/ehci-ixp4xx.c
+++ b/drivers/usb/host/ehci-ixp4xx.c
@@ -51,6 +51,7 @@ static const struct hc_driver ixp4xx_ehci_hc_driver = {
51 .urb_enqueue = ehci_urb_enqueue, 51 .urb_enqueue = ehci_urb_enqueue,
52 .urb_dequeue = ehci_urb_dequeue, 52 .urb_dequeue = ehci_urb_dequeue,
53 .endpoint_disable = ehci_endpoint_disable, 53 .endpoint_disable = ehci_endpoint_disable,
54 .endpoint_reset = ehci_endpoint_reset,
54 .get_frame_number = ehci_get_frame, 55 .get_frame_number = ehci_get_frame,
55 .hub_status_data = ehci_hub_status_data, 56 .hub_status_data = ehci_hub_status_data,
56 .hub_control = ehci_hub_control, 57 .hub_control = ehci_hub_control,
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 9d487908012e..770dd9aba62a 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -149,6 +149,7 @@ static const struct hc_driver ehci_orion_hc_driver = {
149 .urb_enqueue = ehci_urb_enqueue, 149 .urb_enqueue = ehci_urb_enqueue,
150 .urb_dequeue = ehci_urb_dequeue, 150 .urb_dequeue = ehci_urb_dequeue,
151 .endpoint_disable = ehci_endpoint_disable, 151 .endpoint_disable = ehci_endpoint_disable,
152 .endpoint_reset = ehci_endpoint_reset,
152 153
153 /* 154 /*
154 * scheduling support 155 * scheduling support
@@ -187,7 +188,7 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
187 } 188 }
188} 189}
189 190
190static int __init ehci_orion_drv_probe(struct platform_device *pdev) 191static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
191{ 192{
192 struct orion_ehci_data *pd = pdev->dev.platform_data; 193 struct orion_ehci_data *pd = pdev->dev.platform_data;
193 struct resource *res; 194 struct resource *res;
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 5aa8bce90e1f..f3683e1da161 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -268,7 +268,7 @@ done:
268 * Also they depend on separate root hub suspend/resume. 268 * Also they depend on separate root hub suspend/resume.
269 */ 269 */
270 270
271static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message) 271static int ehci_pci_suspend(struct usb_hcd *hcd)
272{ 272{
273 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 273 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
274 unsigned long flags; 274 unsigned long flags;
@@ -293,12 +293,6 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
293 ehci_writel(ehci, 0, &ehci->regs->intr_enable); 293 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
294 (void)ehci_readl(ehci, &ehci->regs->intr_enable); 294 (void)ehci_readl(ehci, &ehci->regs->intr_enable);
295 295
296 /* make sure snapshot being resumed re-enumerates everything */
297 if (message.event == PM_EVENT_PRETHAW) {
298 ehci_halt(ehci);
299 ehci_reset(ehci);
300 }
301
302 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 296 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
303 bail: 297 bail:
304 spin_unlock_irqrestore (&ehci->lock, flags); 298 spin_unlock_irqrestore (&ehci->lock, flags);
@@ -309,7 +303,7 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
309 return rc; 303 return rc;
310} 304}
311 305
312static int ehci_pci_resume(struct usb_hcd *hcd) 306static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
313{ 307{
314 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 308 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
315 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 309 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
@@ -322,10 +316,12 @@ static int ehci_pci_resume(struct usb_hcd *hcd)
322 /* Mark hardware accessible again as we are out of D3 state by now */ 316 /* Mark hardware accessible again as we are out of D3 state by now */
323 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 317 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
324 318
325 /* If CF is still set, we maintained PCI Vaux power. 319 /* If CF is still set and we aren't resuming from hibernation
320 * then we maintained PCI Vaux power.
326 * Just undo the effect of ehci_pci_suspend(). 321 * Just undo the effect of ehci_pci_suspend().
327 */ 322 */
328 if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) { 323 if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF &&
324 !hibernated) {
329 int mask = INTR_MASK; 325 int mask = INTR_MASK;
330 326
331 if (!hcd->self.root_hub->do_remote_wakeup) 327 if (!hcd->self.root_hub->do_remote_wakeup)
@@ -335,7 +331,6 @@ static int ehci_pci_resume(struct usb_hcd *hcd)
335 return 0; 331 return 0;
336 } 332 }
337 333
338 ehci_dbg(ehci, "lost power, restarting\n");
339 usb_root_hub_lost_power(hcd->self.root_hub); 334 usb_root_hub_lost_power(hcd->self.root_hub);
340 335
341 /* Else reset, to cope with power loss or flush-to-storage 336 /* Else reset, to cope with power loss or flush-to-storage
@@ -393,6 +388,7 @@ static const struct hc_driver ehci_pci_hc_driver = {
393 .urb_enqueue = ehci_urb_enqueue, 388 .urb_enqueue = ehci_urb_enqueue,
394 .urb_dequeue = ehci_urb_dequeue, 389 .urb_dequeue = ehci_urb_dequeue,
395 .endpoint_disable = ehci_endpoint_disable, 390 .endpoint_disable = ehci_endpoint_disable,
391 .endpoint_reset = ehci_endpoint_reset,
396 392
397 /* 393 /*
398 * scheduling support 394 * scheduling support
@@ -429,10 +425,11 @@ static struct pci_driver ehci_pci_driver = {
429 425
430 .probe = usb_hcd_pci_probe, 426 .probe = usb_hcd_pci_probe,
431 .remove = usb_hcd_pci_remove, 427 .remove = usb_hcd_pci_remove,
428 .shutdown = usb_hcd_pci_shutdown,
432 429
433#ifdef CONFIG_PM 430#ifdef CONFIG_PM_SLEEP
434 .suspend = usb_hcd_pci_suspend, 431 .driver = {
435 .resume = usb_hcd_pci_resume, 432 .pm = &usb_hcd_pci_pm_ops
433 },
436#endif 434#endif
437 .shutdown = usb_hcd_pci_shutdown,
438}; 435};
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index ef732b704f53..fbd272288fc2 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -61,6 +61,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
61 .urb_enqueue = ehci_urb_enqueue, 61 .urb_enqueue = ehci_urb_enqueue,
62 .urb_dequeue = ehci_urb_dequeue, 62 .urb_dequeue = ehci_urb_dequeue,
63 .endpoint_disable = ehci_endpoint_disable, 63 .endpoint_disable = ehci_endpoint_disable,
64 .endpoint_reset = ehci_endpoint_reset,
64 65
65 /* 66 /*
66 * scheduling support 67 * scheduling support
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 3e8844e56ca9..93f7035d00a1 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -65,6 +65,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
65 .urb_enqueue = ehci_urb_enqueue, 65 .urb_enqueue = ehci_urb_enqueue,
66 .urb_dequeue = ehci_urb_dequeue, 66 .urb_dequeue = ehci_urb_dequeue,
67 .endpoint_disable = ehci_endpoint_disable, 67 .endpoint_disable = ehci_endpoint_disable,
68 .endpoint_reset = ehci_endpoint_reset,
68 .get_frame_number = ehci_get_frame, 69 .get_frame_number = ehci_get_frame,
69 .hub_status_data = ehci_hub_status_data, 70 .hub_status_data = ehci_hub_status_data,
70 .hub_control = ehci_hub_control, 71 .hub_control = ehci_hub_control,
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 1976b1b3778c..3192f683f807 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -93,22 +93,6 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
93 qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); 93 qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
94 qh->hw_alt_next = EHCI_LIST_END(ehci); 94 qh->hw_alt_next = EHCI_LIST_END(ehci);
95 95
96 /* Except for control endpoints, we make hardware maintain data
97 * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
98 * and set the pseudo-toggle in udev. Only usb_clear_halt() will
99 * ever clear it.
100 */
101 if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
102 unsigned is_out, epnum;
103
104 is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
105 epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
106 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
107 qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
108 usb_settoggle (qh->dev, epnum, is_out, 1);
109 }
110 }
111
112 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ 96 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
113 wmb (); 97 wmb ();
114 qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); 98 qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
@@ -850,7 +834,6 @@ done:
850 qh->qh_state = QH_STATE_IDLE; 834 qh->qh_state = QH_STATE_IDLE;
851 qh->hw_info1 = cpu_to_hc32(ehci, info1); 835 qh->hw_info1 = cpu_to_hc32(ehci, info1);
852 qh->hw_info2 = cpu_to_hc32(ehci, info2); 836 qh->hw_info2 = cpu_to_hc32(ehci, info2);
853 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
854 qh_refresh (ehci, qh); 837 qh_refresh (ehci, qh);
855 return qh; 838 return qh;
856} 839}
@@ -881,7 +864,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
881 } 864 }
882 } 865 }
883 866
884 /* clear halt and/or toggle; and maybe recover from silicon quirk */ 867 /* clear halt and maybe recover from silicon quirk */
885 if (qh->qh_state == QH_STATE_IDLE) 868 if (qh->qh_state == QH_STATE_IDLE)
886 qh_refresh (ehci, qh); 869 qh_refresh (ehci, qh);
887 870
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 556d0ec0c1f8..9d1babc7ff65 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -760,8 +760,10 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
760 if (status) { 760 if (status) {
761 /* "normal" case, uframing flexible except with splits */ 761 /* "normal" case, uframing flexible except with splits */
762 if (qh->period) { 762 if (qh->period) {
763 frame = qh->period - 1; 763 int i;
764 do { 764
765 for (i = qh->period; status && i > 0; --i) {
766 frame = ++ehci->random_frame % qh->period;
765 for (uframe = 0; uframe < 8; uframe++) { 767 for (uframe = 0; uframe < 8; uframe++) {
766 status = check_intr_schedule (ehci, 768 status = check_intr_schedule (ehci,
767 frame, uframe, qh, 769 frame, uframe, qh,
@@ -769,7 +771,7 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
769 if (status == 0) 771 if (status == 0)
770 break; 772 break;
771 } 773 }
772 } while (status && frame--); 774 }
773 775
774 /* qh->period == 0 means every uframe */ 776 /* qh->period == 0 means every uframe */
775 } else { 777 } else {
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 6cff195e1a36..90ad3395bb21 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -116,6 +116,7 @@ struct ehci_hcd { /* one per controller */
116 struct timer_list watchdog; 116 struct timer_list watchdog;
117 unsigned long actions; 117 unsigned long actions;
118 unsigned stamp; 118 unsigned stamp;
119 unsigned random_frame;
119 unsigned long next_statechange; 120 unsigned long next_statechange;
120 u32 command; 121 u32 command;
121 122
diff --git a/drivers/usb/host/fhci-dbg.c b/drivers/usb/host/fhci-dbg.c
index ea8a4255c5da..e799f86dab11 100644
--- a/drivers/usb/host/fhci-dbg.c
+++ b/drivers/usb/host/fhci-dbg.c
@@ -108,7 +108,7 @@ void fhci_dfs_create(struct fhci_hcd *fhci)
108{ 108{
109 struct device *dev = fhci_to_hcd(fhci)->self.controller; 109 struct device *dev = fhci_to_hcd(fhci)->self.controller;
110 110
111 fhci->dfs_root = debugfs_create_dir(dev_name(dev), NULL); 111 fhci->dfs_root = debugfs_create_dir(dev_name(dev), usb_debug_root);
112 if (!fhci->dfs_root) { 112 if (!fhci->dfs_root) {
113 WARN_ON(1); 113 WARN_ON(1);
114 return; 114 return;
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index cbf30e515f29..88b03214622b 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -172,25 +172,6 @@ error_cluster_id_get:
172 172
173} 173}
174 174
175static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg)
176{
177 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
178 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
179 dev_err(wusbhc->dev, "%s (%p [%p], 0x%lx) UNIMPLEMENTED\n", __func__,
180 usb_hcd, hwahc, *(unsigned long *) &msg);
181 return -ENOSYS;
182}
183
184static int hwahc_op_resume(struct usb_hcd *usb_hcd)
185{
186 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
187 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
188
189 dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
190 usb_hcd, hwahc);
191 return -ENOSYS;
192}
193
194/* 175/*
195 * No need to abort pipes, as when this is called, all the children 176 * No need to abort pipes, as when this is called, all the children
196 * has been disconnected and that has done it [through 177 * has been disconnected and that has done it [through
@@ -598,8 +579,6 @@ static struct hc_driver hwahc_hc_driver = {
598 .flags = HCD_USB2, /* FIXME */ 579 .flags = HCD_USB2, /* FIXME */
599 .reset = hwahc_op_reset, 580 .reset = hwahc_op_reset,
600 .start = hwahc_op_start, 581 .start = hwahc_op_start,
601 .pci_suspend = hwahc_op_suspend,
602 .pci_resume = hwahc_op_resume,
603 .stop = hwahc_op_stop, 582 .stop = hwahc_op_stop,
604 .get_frame_number = hwahc_op_get_frame_number, 583 .get_frame_number = hwahc_op_get_frame_number,
605 .urb_enqueue = hwahc_op_urb_enqueue, 584 .urb_enqueue = hwahc_op_urb_enqueue,
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index d3269656aa4d..811f5dfdc582 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -431,7 +431,7 @@ static struct dentry *ohci_debug_root;
431 431
432struct debug_buffer { 432struct debug_buffer {
433 ssize_t (*fill_func)(struct debug_buffer *); /* fill method */ 433 ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
434 struct device *dev; 434 struct ohci_hcd *ohci;
435 struct mutex mutex; /* protect filling of buffer */ 435 struct mutex mutex; /* protect filling of buffer */
436 size_t count; /* number of characters filled into buffer */ 436 size_t count; /* number of characters filled into buffer */
437 char *page; 437 char *page;
@@ -505,15 +505,11 @@ show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed)
505 505
506static ssize_t fill_async_buffer(struct debug_buffer *buf) 506static ssize_t fill_async_buffer(struct debug_buffer *buf)
507{ 507{
508 struct usb_bus *bus;
509 struct usb_hcd *hcd;
510 struct ohci_hcd *ohci; 508 struct ohci_hcd *ohci;
511 size_t temp; 509 size_t temp;
512 unsigned long flags; 510 unsigned long flags;
513 511
514 bus = dev_get_drvdata(buf->dev); 512 ohci = buf->ohci;
515 hcd = bus_to_hcd(bus);
516 ohci = hcd_to_ohci(hcd);
517 513
518 /* display control and bulk lists together, for simplicity */ 514 /* display control and bulk lists together, for simplicity */
519 spin_lock_irqsave (&ohci->lock, flags); 515 spin_lock_irqsave (&ohci->lock, flags);
@@ -529,8 +525,6 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
529 525
530static ssize_t fill_periodic_buffer(struct debug_buffer *buf) 526static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
531{ 527{
532 struct usb_bus *bus;
533 struct usb_hcd *hcd;
534 struct ohci_hcd *ohci; 528 struct ohci_hcd *ohci;
535 struct ed **seen, *ed; 529 struct ed **seen, *ed;
536 unsigned long flags; 530 unsigned long flags;
@@ -542,9 +536,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
542 return 0; 536 return 0;
543 seen_count = 0; 537 seen_count = 0;
544 538
545 bus = (struct usb_bus *)dev_get_drvdata(buf->dev); 539 ohci = buf->ohci;
546 hcd = bus_to_hcd(bus);
547 ohci = hcd_to_ohci(hcd);
548 next = buf->page; 540 next = buf->page;
549 size = PAGE_SIZE; 541 size = PAGE_SIZE;
550 542
@@ -626,7 +618,6 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
626 618
627static ssize_t fill_registers_buffer(struct debug_buffer *buf) 619static ssize_t fill_registers_buffer(struct debug_buffer *buf)
628{ 620{
629 struct usb_bus *bus;
630 struct usb_hcd *hcd; 621 struct usb_hcd *hcd;
631 struct ohci_hcd *ohci; 622 struct ohci_hcd *ohci;
632 struct ohci_regs __iomem *regs; 623 struct ohci_regs __iomem *regs;
@@ -635,9 +626,8 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
635 char *next; 626 char *next;
636 u32 rdata; 627 u32 rdata;
637 628
638 bus = (struct usb_bus *)dev_get_drvdata(buf->dev); 629 ohci = buf->ohci;
639 hcd = bus_to_hcd(bus); 630 hcd = ohci_to_hcd(ohci);
640 ohci = hcd_to_ohci(hcd);
641 regs = ohci->regs; 631 regs = ohci->regs;
642 next = buf->page; 632 next = buf->page;
643 size = PAGE_SIZE; 633 size = PAGE_SIZE;
@@ -710,7 +700,7 @@ done:
710 return PAGE_SIZE - size; 700 return PAGE_SIZE - size;
711} 701}
712 702
713static struct debug_buffer *alloc_buffer(struct device *dev, 703static struct debug_buffer *alloc_buffer(struct ohci_hcd *ohci,
714 ssize_t (*fill_func)(struct debug_buffer *)) 704 ssize_t (*fill_func)(struct debug_buffer *))
715{ 705{
716 struct debug_buffer *buf; 706 struct debug_buffer *buf;
@@ -718,7 +708,7 @@ static struct debug_buffer *alloc_buffer(struct device *dev,
718 buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL); 708 buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
719 709
720 if (buf) { 710 if (buf) {
721 buf->dev = dev; 711 buf->ohci = ohci;
722 buf->fill_func = fill_func; 712 buf->fill_func = fill_func;
723 mutex_init(&buf->mutex); 713 mutex_init(&buf->mutex);
724 } 714 }
@@ -810,26 +800,25 @@ static int debug_registers_open(struct inode *inode, struct file *file)
810static inline void create_debug_files (struct ohci_hcd *ohci) 800static inline void create_debug_files (struct ohci_hcd *ohci)
811{ 801{
812 struct usb_bus *bus = &ohci_to_hcd(ohci)->self; 802 struct usb_bus *bus = &ohci_to_hcd(ohci)->self;
813 struct device *dev = bus->dev;
814 803
815 ohci->debug_dir = debugfs_create_dir(bus->bus_name, ohci_debug_root); 804 ohci->debug_dir = debugfs_create_dir(bus->bus_name, ohci_debug_root);
816 if (!ohci->debug_dir) 805 if (!ohci->debug_dir)
817 goto dir_error; 806 goto dir_error;
818 807
819 ohci->debug_async = debugfs_create_file("async", S_IRUGO, 808 ohci->debug_async = debugfs_create_file("async", S_IRUGO,
820 ohci->debug_dir, dev, 809 ohci->debug_dir, ohci,
821 &debug_async_fops); 810 &debug_async_fops);
822 if (!ohci->debug_async) 811 if (!ohci->debug_async)
823 goto async_error; 812 goto async_error;
824 813
825 ohci->debug_periodic = debugfs_create_file("periodic", S_IRUGO, 814 ohci->debug_periodic = debugfs_create_file("periodic", S_IRUGO,
826 ohci->debug_dir, dev, 815 ohci->debug_dir, ohci,
827 &debug_periodic_fops); 816 &debug_periodic_fops);
828 if (!ohci->debug_periodic) 817 if (!ohci->debug_periodic)
829 goto periodic_error; 818 goto periodic_error;
830 819
831 ohci->debug_registers = debugfs_create_file("registers", S_IRUGO, 820 ohci->debug_registers = debugfs_create_file("registers", S_IRUGO,
832 ohci->debug_dir, dev, 821 ohci->debug_dir, ohci,
833 &debug_registers_fops); 822 &debug_registers_fops);
834 if (!ohci->debug_registers) 823 if (!ohci->debug_registers)
835 goto registers_error; 824 goto registers_error;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 25db704f3a2a..58151687d351 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -571,7 +571,7 @@ static int ohci_init (struct ohci_hcd *ohci)
571 */ 571 */
572static int ohci_run (struct ohci_hcd *ohci) 572static int ohci_run (struct ohci_hcd *ohci)
573{ 573{
574 u32 mask, temp; 574 u32 mask, val;
575 int first = ohci->fminterval == 0; 575 int first = ohci->fminterval == 0;
576 struct usb_hcd *hcd = ohci_to_hcd(ohci); 576 struct usb_hcd *hcd = ohci_to_hcd(ohci);
577 577
@@ -580,8 +580,8 @@ static int ohci_run (struct ohci_hcd *ohci)
580 /* boot firmware should have set this up (5.1.1.3.1) */ 580 /* boot firmware should have set this up (5.1.1.3.1) */
581 if (first) { 581 if (first) {
582 582
583 temp = ohci_readl (ohci, &ohci->regs->fminterval); 583 val = ohci_readl (ohci, &ohci->regs->fminterval);
584 ohci->fminterval = temp & 0x3fff; 584 ohci->fminterval = val & 0x3fff;
585 if (ohci->fminterval != FI) 585 if (ohci->fminterval != FI)
586 ohci_dbg (ohci, "fminterval delta %d\n", 586 ohci_dbg (ohci, "fminterval delta %d\n",
587 ohci->fminterval - FI); 587 ohci->fminterval - FI);
@@ -600,25 +600,25 @@ static int ohci_run (struct ohci_hcd *ohci)
600 600
601 switch (ohci->hc_control & OHCI_CTRL_HCFS) { 601 switch (ohci->hc_control & OHCI_CTRL_HCFS) {
602 case OHCI_USB_OPER: 602 case OHCI_USB_OPER:
603 temp = 0; 603 val = 0;
604 break; 604 break;
605 case OHCI_USB_SUSPEND: 605 case OHCI_USB_SUSPEND:
606 case OHCI_USB_RESUME: 606 case OHCI_USB_RESUME:
607 ohci->hc_control &= OHCI_CTRL_RWC; 607 ohci->hc_control &= OHCI_CTRL_RWC;
608 ohci->hc_control |= OHCI_USB_RESUME; 608 ohci->hc_control |= OHCI_USB_RESUME;
609 temp = 10 /* msec wait */; 609 val = 10 /* msec wait */;
610 break; 610 break;
611 // case OHCI_USB_RESET: 611 // case OHCI_USB_RESET:
612 default: 612 default:
613 ohci->hc_control &= OHCI_CTRL_RWC; 613 ohci->hc_control &= OHCI_CTRL_RWC;
614 ohci->hc_control |= OHCI_USB_RESET; 614 ohci->hc_control |= OHCI_USB_RESET;
615 temp = 50 /* msec wait */; 615 val = 50 /* msec wait */;
616 break; 616 break;
617 } 617 }
618 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control); 618 ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
619 // flush the writes 619 // flush the writes
620 (void) ohci_readl (ohci, &ohci->regs->control); 620 (void) ohci_readl (ohci, &ohci->regs->control);
621 msleep(temp); 621 msleep(val);
622 622
623 memset (ohci->hcca, 0, sizeof (struct ohci_hcca)); 623 memset (ohci->hcca, 0, sizeof (struct ohci_hcca));
624 624
@@ -628,9 +628,9 @@ static int ohci_run (struct ohci_hcd *ohci)
628retry: 628retry:
629 /* HC Reset requires max 10 us delay */ 629 /* HC Reset requires max 10 us delay */
630 ohci_writel (ohci, OHCI_HCR, &ohci->regs->cmdstatus); 630 ohci_writel (ohci, OHCI_HCR, &ohci->regs->cmdstatus);
631 temp = 30; /* ... allow extra time */ 631 val = 30; /* ... allow extra time */
632 while ((ohci_readl (ohci, &ohci->regs->cmdstatus) & OHCI_HCR) != 0) { 632 while ((ohci_readl (ohci, &ohci->regs->cmdstatus) & OHCI_HCR) != 0) {
633 if (--temp == 0) { 633 if (--val == 0) {
634 spin_unlock_irq (&ohci->lock); 634 spin_unlock_irq (&ohci->lock);
635 ohci_err (ohci, "USB HC reset timed out!\n"); 635 ohci_err (ohci, "USB HC reset timed out!\n");
636 return -1; 636 return -1;
@@ -699,23 +699,23 @@ retry:
699 ohci_writel (ohci, mask, &ohci->regs->intrenable); 699 ohci_writel (ohci, mask, &ohci->regs->intrenable);
700 700
701 /* handle root hub init quirks ... */ 701 /* handle root hub init quirks ... */
702 temp = roothub_a (ohci); 702 val = roothub_a (ohci);
703 temp &= ~(RH_A_PSM | RH_A_OCPM); 703 val &= ~(RH_A_PSM | RH_A_OCPM);
704 if (ohci->flags & OHCI_QUIRK_SUPERIO) { 704 if (ohci->flags & OHCI_QUIRK_SUPERIO) {
705 /* NSC 87560 and maybe others */ 705 /* NSC 87560 and maybe others */
706 temp |= RH_A_NOCP; 706 val |= RH_A_NOCP;
707 temp &= ~(RH_A_POTPGT | RH_A_NPS); 707 val &= ~(RH_A_POTPGT | RH_A_NPS);
708 ohci_writel (ohci, temp, &ohci->regs->roothub.a); 708 ohci_writel (ohci, val, &ohci->regs->roothub.a);
709 } else if ((ohci->flags & OHCI_QUIRK_AMD756) || 709 } else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
710 (ohci->flags & OHCI_QUIRK_HUB_POWER)) { 710 (ohci->flags & OHCI_QUIRK_HUB_POWER)) {
711 /* hub power always on; required for AMD-756 and some 711 /* hub power always on; required for AMD-756 and some
712 * Mac platforms. ganged overcurrent reporting, if any. 712 * Mac platforms. ganged overcurrent reporting, if any.
713 */ 713 */
714 temp |= RH_A_NPS; 714 val |= RH_A_NPS;
715 ohci_writel (ohci, temp, &ohci->regs->roothub.a); 715 ohci_writel (ohci, val, &ohci->regs->roothub.a);
716 } 716 }
717 ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status); 717 ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status);
718 ohci_writel (ohci, (temp & RH_A_NPS) ? 0 : RH_B_PPCM, 718 ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM,
719 &ohci->regs->roothub.b); 719 &ohci->regs->roothub.b);
720 // flush those writes 720 // flush those writes
721 (void) ohci_readl (ohci, &ohci->regs->control); 721 (void) ohci_readl (ohci, &ohci->regs->control);
@@ -724,7 +724,7 @@ retry:
724 spin_unlock_irq (&ohci->lock); 724 spin_unlock_irq (&ohci->lock);
725 725
726 // POTPGT delay is bits 24-31, in 2 ms units. 726 // POTPGT delay is bits 24-31, in 2 ms units.
727 mdelay ((temp >> 23) & 0x1fe); 727 mdelay ((val >> 23) & 0x1fe);
728 hcd->state = HC_STATE_RUNNING; 728 hcd->state = HC_STATE_RUNNING;
729 729
730 if (quirk_zfmicro(ohci)) { 730 if (quirk_zfmicro(ohci)) {
@@ -1105,7 +1105,7 @@ static int __init ohci_hcd_mod_init(void)
1105 set_bit(USB_OHCI_LOADED, &usb_hcds_loaded); 1105 set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
1106 1106
1107#ifdef DEBUG 1107#ifdef DEBUG
1108 ohci_debug_root = debugfs_create_dir("ohci", NULL); 1108 ohci_debug_root = debugfs_create_dir("ohci", usb_debug_root);
1109 if (!ohci_debug_root) { 1109 if (!ohci_debug_root) {
1110 retval = -ENOENT; 1110 retval = -ENOENT;
1111 goto error_debug; 1111 goto error_debug;
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index f9961b4c0da3..d2ba04dd785e 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -372,7 +372,7 @@ static int __devinit ohci_pci_start (struct usb_hcd *hcd)
372 372
373#ifdef CONFIG_PM 373#ifdef CONFIG_PM
374 374
375static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message) 375static int ohci_pci_suspend(struct usb_hcd *hcd)
376{ 376{
377 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 377 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
378 unsigned long flags; 378 unsigned long flags;
@@ -394,10 +394,6 @@ static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
394 ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); 394 ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
395 (void)ohci_readl(ohci, &ohci->regs->intrdisable); 395 (void)ohci_readl(ohci, &ohci->regs->intrdisable);
396 396
397 /* make sure snapshot being resumed re-enumerates everything */
398 if (message.event == PM_EVENT_PRETHAW)
399 ohci_usb_reset(ohci);
400
401 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 397 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
402 bail: 398 bail:
403 spin_unlock_irqrestore (&ohci->lock, flags); 399 spin_unlock_irqrestore (&ohci->lock, flags);
@@ -406,9 +402,14 @@ static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
406} 402}
407 403
408 404
409static int ohci_pci_resume (struct usb_hcd *hcd) 405static int ohci_pci_resume(struct usb_hcd *hcd, bool hibernated)
410{ 406{
411 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 407 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
408
409 /* Make sure resume from hibernation re-enumerates everything */
410 if (hibernated)
411 ohci_usb_reset(hcd_to_ohci(hcd));
412
412 ohci_finish_controller_resume(hcd); 413 ohci_finish_controller_resume(hcd);
413 return 0; 414 return 0;
414} 415}
@@ -484,12 +485,11 @@ static struct pci_driver ohci_pci_driver = {
484 485
485 .probe = usb_hcd_pci_probe, 486 .probe = usb_hcd_pci_probe,
486 .remove = usb_hcd_pci_remove, 487 .remove = usb_hcd_pci_remove,
488 .shutdown = usb_hcd_pci_shutdown,
487 489
488#ifdef CONFIG_PM 490#ifdef CONFIG_PM_SLEEP
489 .suspend = usb_hcd_pci_suspend, 491 .driver = {
490 .resume = usb_hcd_pci_resume, 492 .pm = &usb_hcd_pci_pm_ops
493 },
491#endif 494#endif
492
493 .shutdown = usb_hcd_pci_shutdown,
494}; 495};
495
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 033c2846ce59..83b5f9cea85a 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -15,6 +15,7 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/acpi.h> 16#include <linux/acpi.h>
17#include "pci-quirks.h" 17#include "pci-quirks.h"
18#include "xhci-ext-caps.h"
18 19
19 20
20#define UHCI_USBLEGSUP 0xc0 /* legacy support */ 21#define UHCI_USBLEGSUP 0xc0 /* legacy support */
@@ -341,7 +342,127 @@ static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
341 return; 342 return;
342} 343}
343 344
345/*
346 * handshake - spin reading a register until handshake completes
347 * @ptr: address of hc register to be read
348 * @mask: bits to look at in result of read
349 * @done: value of those bits when handshake succeeds
350 * @wait_usec: timeout in microseconds
351 * @delay_usec: delay in microseconds to wait between polling
352 *
353 * Polls a register every delay_usec microseconds.
354 * Returns 0 when the mask bits have the value done.
355 * Returns -ETIMEDOUT if this condition is not true after
356 * wait_usec microseconds have passed.
357 */
358static int handshake(void __iomem *ptr, u32 mask, u32 done,
359 int wait_usec, int delay_usec)
360{
361 u32 result;
362
363 do {
364 result = readl(ptr);
365 result &= mask;
366 if (result == done)
367 return 0;
368 udelay(delay_usec);
369 wait_usec -= delay_usec;
370 } while (wait_usec > 0);
371 return -ETIMEDOUT;
372}
373
374/**
375 * PCI Quirks for xHCI.
376 *
377 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
378 * It signals to the BIOS that the OS wants control of the host controller,
379 * and then waits 5 seconds for the BIOS to hand over control.
380 * If we timeout, assume the BIOS is broken and take control anyway.
381 */
382static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
383{
384 void __iomem *base;
385 int ext_cap_offset;
386 void __iomem *op_reg_base;
387 u32 val;
388 int timeout;
389
390 if (!mmio_resource_enabled(pdev, 0))
391 return;
392
393 base = ioremap_nocache(pci_resource_start(pdev, 0),
394 pci_resource_len(pdev, 0));
395 if (base == NULL)
396 return;
344 397
398 /*
399 * Find the Legacy Support Capability register -
400 * this is optional for xHCI host controllers.
401 */
402 ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
403 do {
404 if (!ext_cap_offset)
405 /* We've reached the end of the extended capabilities */
406 goto hc_init;
407 val = readl(base + ext_cap_offset);
408 if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
409 break;
410 ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset);
411 } while (1);
412
413 /* If the BIOS owns the HC, signal that the OS wants it, and wait */
414 if (val & XHCI_HC_BIOS_OWNED) {
415 writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset);
416
417 /* Wait for 5 seconds with 10 microsecond polling interval */
418 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
419 0, 5000, 10);
420
421 /* Assume a buggy BIOS and take HC ownership anyway */
422 if (timeout) {
423 dev_warn(&pdev->dev, "xHCI BIOS handoff failed"
424 " (BIOS bug ?) %08x\n", val);
425 writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
426 }
427 }
428
429 /* Disable any BIOS SMIs */
430 writel(XHCI_LEGACY_DISABLE_SMI,
431 base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
432
433hc_init:
434 op_reg_base = base + XHCI_HC_LENGTH(readl(base));
435
436 /* Wait for the host controller to be ready before writing any
437 * operational or runtime registers. Wait 5 seconds and no more.
438 */
439 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
440 5000, 10);
441 /* Assume a buggy HC and start HC initialization anyway */
442 if (timeout) {
443 val = readl(op_reg_base + XHCI_STS_OFFSET);
444 dev_warn(&pdev->dev,
445 "xHCI HW not ready after 5 sec (HC bug?) "
446 "status = 0x%x\n", val);
447 }
448
449 /* Send the halt and disable interrupts command */
450 val = readl(op_reg_base + XHCI_CMD_OFFSET);
451 val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
452 writel(val, op_reg_base + XHCI_CMD_OFFSET);
453
454 /* Wait for the HC to halt - poll every 125 usec (one microframe). */
455 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
456 XHCI_MAX_HALT_USEC, 125);
457 if (timeout) {
458 val = readl(op_reg_base + XHCI_STS_OFFSET);
459 dev_warn(&pdev->dev,
460 "xHCI HW did not halt within %d usec "
461 "status = 0x%x\n", XHCI_MAX_HALT_USEC, val);
462 }
463
464 iounmap(base);
465}
345 466
346static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev) 467static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
347{ 468{
@@ -351,5 +472,7 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
351 quirk_usb_handoff_ohci(pdev); 472 quirk_usb_handoff_ohci(pdev);
352 else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI) 473 else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
353 quirk_usb_disable_ehci(pdev); 474 quirk_usb_disable_ehci(pdev);
475 else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
476 quirk_usb_handoff_xhci(pdev);
354} 477}
355DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff); 478DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index f1626e58c141..56976cc0352a 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -46,31 +46,10 @@ MODULE_LICENSE("GPL");
46MODULE_AUTHOR("Yoshihiro Shimoda"); 46MODULE_AUTHOR("Yoshihiro Shimoda");
47MODULE_ALIAS("platform:r8a66597_hcd"); 47MODULE_ALIAS("platform:r8a66597_hcd");
48 48
49#define DRIVER_VERSION "10 Apr 2008" 49#define DRIVER_VERSION "2009-05-26"
50 50
51static const char hcd_name[] = "r8a66597_hcd"; 51static const char hcd_name[] = "r8a66597_hcd";
52 52
53/* module parameters */
54#if !defined(CONFIG_SUPERH_ON_CHIP_R8A66597)
55static unsigned short clock = XTAL12;
56module_param(clock, ushort, 0644);
57MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0 "
58 "(default=0)");
59#endif
60
61static unsigned short vif = LDRV;
62module_param(vif, ushort, 0644);
63MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0(default=32768)");
64
65static unsigned short endian;
66module_param(endian, ushort, 0644);
67MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)");
68
69static unsigned short irq_sense = 0xff;
70module_param(irq_sense, ushort, 0644);
71MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=32, falling edge=0 "
72 "(default=32)");
73
74static void packet_write(struct r8a66597 *r8a66597, u16 pipenum); 53static void packet_write(struct r8a66597 *r8a66597, u16 pipenum);
75static int r8a66597_get_frame(struct usb_hcd *hcd); 54static int r8a66597_get_frame(struct usb_hcd *hcd);
76 55
@@ -136,7 +115,8 @@ static int r8a66597_clock_enable(struct r8a66597 *r8a66597)
136 } 115 }
137 } while ((tmp & USBE) != USBE); 116 } while ((tmp & USBE) != USBE);
138 r8a66597_bclr(r8a66597, USBE, SYSCFG0); 117 r8a66597_bclr(r8a66597, USBE, SYSCFG0);
139 r8a66597_mdfy(r8a66597, clock, XTAL, SYSCFG0); 118 r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata), XTAL,
119 SYSCFG0);
140 120
141 i = 0; 121 i = 0;
142 r8a66597_bset(r8a66597, XCKE, SYSCFG0); 122 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
@@ -203,6 +183,9 @@ static void r8a66597_disable_port(struct r8a66597 *r8a66597, int port)
203static int enable_controller(struct r8a66597 *r8a66597) 183static int enable_controller(struct r8a66597 *r8a66597)
204{ 184{
205 int ret, port; 185 int ret, port;
186 u16 vif = r8a66597->pdata->vif ? LDRV : 0;
187 u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
188 u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
206 189
207 ret = r8a66597_clock_enable(r8a66597); 190 ret = r8a66597_clock_enable(r8a66597);
208 if (ret < 0) 191 if (ret < 0)
@@ -2373,7 +2356,7 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev)
2373 return 0; 2356 return 0;
2374} 2357}
2375 2358
2376static int __init r8a66597_probe(struct platform_device *pdev) 2359static int __devinit r8a66597_probe(struct platform_device *pdev)
2377{ 2360{
2378#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) 2361#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
2379 char clk_name[8]; 2362 char clk_name[8];
@@ -2418,6 +2401,12 @@ static int __init r8a66597_probe(struct platform_device *pdev)
2418 goto clean_up; 2401 goto clean_up;
2419 } 2402 }
2420 2403
2404 if (pdev->dev.platform_data == NULL) {
2405 dev_err(&pdev->dev, "no platform data\n");
2406 ret = -ENODEV;
2407 goto clean_up;
2408 }
2409
2421 /* initialize hcd */ 2410 /* initialize hcd */
2422 hcd = usb_create_hcd(&r8a66597_hc_driver, &pdev->dev, (char *)hcd_name); 2411 hcd = usb_create_hcd(&r8a66597_hc_driver, &pdev->dev, (char *)hcd_name);
2423 if (!hcd) { 2412 if (!hcd) {
@@ -2428,6 +2417,8 @@ static int __init r8a66597_probe(struct platform_device *pdev)
2428 r8a66597 = hcd_to_r8a66597(hcd); 2417 r8a66597 = hcd_to_r8a66597(hcd);
2429 memset(r8a66597, 0, sizeof(struct r8a66597)); 2418 memset(r8a66597, 0, sizeof(struct r8a66597));
2430 dev_set_drvdata(&pdev->dev, r8a66597); 2419 dev_set_drvdata(&pdev->dev, r8a66597);
2420 r8a66597->pdata = pdev->dev.platform_data;
2421 r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
2431 2422
2432#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) 2423#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
2433 snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id); 2424 snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
@@ -2458,29 +2449,6 @@ static int __init r8a66597_probe(struct platform_device *pdev)
2458 2449
2459 hcd->rsrc_start = res->start; 2450 hcd->rsrc_start = res->start;
2460 2451
2461 /* irq_sense setting on cmdline takes precedence over resource
2462 * settings, so the introduction of irqflags in IRQ resourse
2463 * won't disturb existing setups */
2464 switch (irq_sense) {
2465 case INTL:
2466 irq_trigger = IRQF_TRIGGER_LOW;
2467 break;
2468 case 0:
2469 irq_trigger = IRQF_TRIGGER_FALLING;
2470 break;
2471 case 0xff:
2472 if (irq_trigger)
2473 irq_sense = (irq_trigger & IRQF_TRIGGER_LOW) ?
2474 INTL : 0;
2475 else {
2476 irq_sense = INTL;
2477 irq_trigger = IRQF_TRIGGER_LOW;
2478 }
2479 break;
2480 default:
2481 dev_err(&pdev->dev, "Unknown irq_sense value.\n");
2482 }
2483
2484 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger); 2452 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger);
2485 if (ret != 0) { 2453 if (ret != 0) {
2486 dev_err(&pdev->dev, "Failed to add hcd\n"); 2454 dev_err(&pdev->dev, "Failed to add hcd\n");
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
index f49208f1bb74..d72680b433f9 100644
--- a/drivers/usb/host/r8a66597.h
+++ b/drivers/usb/host/r8a66597.h
@@ -30,6 +30,8 @@
30#include <linux/clk.h> 30#include <linux/clk.h>
31#endif 31#endif
32 32
33#include <linux/usb/r8a66597.h>
34
33#define SYSCFG0 0x00 35#define SYSCFG0 0x00
34#define SYSCFG1 0x02 36#define SYSCFG1 0x02
35#define SYSSTS0 0x04 37#define SYSSTS0 0x04
@@ -488,6 +490,7 @@ struct r8a66597 {
488#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) 490#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK)
489 struct clk *clk; 491 struct clk *clk;
490#endif 492#endif
493 struct r8a66597_platdata *pdata;
491 struct r8a66597_device device0; 494 struct r8a66597_device device0;
492 struct r8a66597_root_hub root_hub[R8A66597_MAX_ROOT_HUB]; 495 struct r8a66597_root_hub root_hub[R8A66597_MAX_ROOT_HUB];
493 struct list_head pipe_queue[R8A66597_MAX_NUM_PIPE]; 496 struct list_head pipe_queue[R8A66597_MAX_NUM_PIPE];
@@ -506,6 +509,7 @@ struct r8a66597 {
506 unsigned long child_connect_map[4]; 509 unsigned long child_connect_map[4];
507 510
508 unsigned bus_suspended:1; 511 unsigned bus_suspended:1;
512 unsigned irq_sense_low:1;
509}; 513};
510 514
511static inline struct r8a66597 *hcd_to_r8a66597(struct usb_hcd *hcd) 515static inline struct r8a66597 *hcd_to_r8a66597(struct usb_hcd *hcd)
@@ -660,10 +664,36 @@ static inline void r8a66597_port_power(struct r8a66597 *r8a66597, int port,
660{ 664{
661 unsigned long dvstctr_reg = get_dvstctr_reg(port); 665 unsigned long dvstctr_reg = get_dvstctr_reg(port);
662 666
663 if (power) 667 if (r8a66597->pdata->port_power) {
664 r8a66597_bset(r8a66597, VBOUT, dvstctr_reg); 668 r8a66597->pdata->port_power(port, power);
665 else 669 } else {
666 r8a66597_bclr(r8a66597, VBOUT, dvstctr_reg); 670 if (power)
671 r8a66597_bset(r8a66597, VBOUT, dvstctr_reg);
672 else
673 r8a66597_bclr(r8a66597, VBOUT, dvstctr_reg);
674 }
675}
676
677static inline u16 get_xtal_from_pdata(struct r8a66597_platdata *pdata)
678{
679 u16 clock = 0;
680
681 switch (pdata->xtal) {
682 case R8A66597_PLATDATA_XTAL_12MHZ:
683 clock = XTAL12;
684 break;
685 case R8A66597_PLATDATA_XTAL_24MHZ:
686 clock = XTAL24;
687 break;
688 case R8A66597_PLATDATA_XTAL_48MHZ:
689 clock = XTAL48;
690 break;
691 default:
692 printk(KERN_ERR "r8a66597: platdata clock is wrong.\n");
693 break;
694 }
695
696 return clock;
667} 697}
668 698
669#define get_pipectr_addr(pipenum) (PIPE1CTR + (pipenum - 1) * 2) 699#define get_pipectr_addr(pipenum) (PIPE1CTR + (pipenum - 1) * 2)
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index cf5e4cf7ea42..274751b4409c 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -769,7 +769,7 @@ static int uhci_rh_resume(struct usb_hcd *hcd)
769 return rc; 769 return rc;
770} 770}
771 771
772static int uhci_pci_suspend(struct usb_hcd *hcd, pm_message_t message) 772static int uhci_pci_suspend(struct usb_hcd *hcd)
773{ 773{
774 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 774 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
775 int rc = 0; 775 int rc = 0;
@@ -795,10 +795,6 @@ static int uhci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
795 795
796 /* FIXME: Enable non-PME# remote wakeup? */ 796 /* FIXME: Enable non-PME# remote wakeup? */
797 797
798 /* make sure snapshot being resumed re-enumerates everything */
799 if (message.event == PM_EVENT_PRETHAW)
800 uhci_hc_died(uhci);
801
802done_okay: 798done_okay:
803 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 799 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
804done: 800done:
@@ -806,7 +802,7 @@ done:
806 return rc; 802 return rc;
807} 803}
808 804
809static int uhci_pci_resume(struct usb_hcd *hcd) 805static int uhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
810{ 806{
811 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 807 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
812 808
@@ -820,6 +816,10 @@ static int uhci_pci_resume(struct usb_hcd *hcd)
820 816
821 spin_lock_irq(&uhci->lock); 817 spin_lock_irq(&uhci->lock);
822 818
819 /* Make sure resume from hibernation re-enumerates everything */
820 if (hibernated)
821 uhci_hc_died(uhci);
822
823 /* FIXME: Disable non-PME# remote wakeup? */ 823 /* FIXME: Disable non-PME# remote wakeup? */
824 824
825 /* The firmware or a boot kernel may have changed the controller 825 /* The firmware or a boot kernel may have changed the controller
@@ -940,10 +940,11 @@ static struct pci_driver uhci_pci_driver = {
940 .remove = usb_hcd_pci_remove, 940 .remove = usb_hcd_pci_remove,
941 .shutdown = uhci_shutdown, 941 .shutdown = uhci_shutdown,
942 942
943#ifdef CONFIG_PM 943#ifdef CONFIG_PM_SLEEP
944 .suspend = usb_hcd_pci_suspend, 944 .driver = {
945 .resume = usb_hcd_pci_resume, 945 .pm = &usb_hcd_pci_pm_ops
946#endif /* PM */ 946 },
947#endif
947}; 948};
948 949
949static int __init uhci_hcd_init(void) 950static int __init uhci_hcd_init(void)
@@ -961,7 +962,7 @@ static int __init uhci_hcd_init(void)
961 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL); 962 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
962 if (!errbuf) 963 if (!errbuf)
963 goto errbuf_failed; 964 goto errbuf_failed;
964 uhci_debugfs_root = debugfs_create_dir("uhci", NULL); 965 uhci_debugfs_root = debugfs_create_dir("uhci", usb_debug_root);
965 if (!uhci_debugfs_root) 966 if (!uhci_debugfs_root)
966 goto debug_failed; 967 goto debug_failed;
967 } 968 }
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 3e5807d14ffb..64e57bfe236b 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -260,7 +260,7 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
260 INIT_LIST_HEAD(&qh->node); 260 INIT_LIST_HEAD(&qh->node);
261 261
262 if (udev) { /* Normal QH */ 262 if (udev) { /* Normal QH */
263 qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 263 qh->type = usb_endpoint_type(&hep->desc);
264 if (qh->type != USB_ENDPOINT_XFER_ISOC) { 264 if (qh->type != USB_ENDPOINT_XFER_ISOC) {
265 qh->dummy_td = uhci_alloc_td(uhci); 265 qh->dummy_td = uhci_alloc_td(uhci);
266 if (!qh->dummy_td) { 266 if (!qh->dummy_td) {
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
new file mode 100644
index 000000000000..2501c571f855
--- /dev/null
+++ b/drivers/usb/host/xhci-dbg.c
@@ -0,0 +1,485 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include "xhci.h"
24
25#define XHCI_INIT_VALUE 0x0
26
27/* Add verbose debugging later, just print everything for now */
28
29void xhci_dbg_regs(struct xhci_hcd *xhci)
30{
31 u32 temp;
32
33 xhci_dbg(xhci, "// xHCI capability registers at %p:\n",
34 xhci->cap_regs);
35 temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
36 xhci_dbg(xhci, "// @%p = 0x%x (CAPLENGTH AND HCIVERSION)\n",
37 &xhci->cap_regs->hc_capbase, temp);
38 xhci_dbg(xhci, "// CAPLENGTH: 0x%x\n",
39 (unsigned int) HC_LENGTH(temp));
40#if 0
41 xhci_dbg(xhci, "// HCIVERSION: 0x%x\n",
42 (unsigned int) HC_VERSION(temp));
43#endif
44
45 xhci_dbg(xhci, "// xHCI operational registers at %p:\n", xhci->op_regs);
46
47 temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
48 xhci_dbg(xhci, "// @%p = 0x%x RTSOFF\n",
49 &xhci->cap_regs->run_regs_off,
50 (unsigned int) temp & RTSOFF_MASK);
51 xhci_dbg(xhci, "// xHCI runtime registers at %p:\n", xhci->run_regs);
52
53 temp = xhci_readl(xhci, &xhci->cap_regs->db_off);
54 xhci_dbg(xhci, "// @%p = 0x%x DBOFF\n", &xhci->cap_regs->db_off, temp);
55 xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
56}
57
58static void xhci_print_cap_regs(struct xhci_hcd *xhci)
59{
60 u32 temp;
61
62 xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
63
64 temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
65 xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
66 (unsigned int) temp);
67 xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
68 (unsigned int) HC_LENGTH(temp));
69 xhci_dbg(xhci, "HCIVERSION: 0x%x\n",
70 (unsigned int) HC_VERSION(temp));
71
72 temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
73 xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
74 (unsigned int) temp);
75 xhci_dbg(xhci, " Max device slots: %u\n",
76 (unsigned int) HCS_MAX_SLOTS(temp));
77 xhci_dbg(xhci, " Max interrupters: %u\n",
78 (unsigned int) HCS_MAX_INTRS(temp));
79 xhci_dbg(xhci, " Max ports: %u\n",
80 (unsigned int) HCS_MAX_PORTS(temp));
81
82 temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
83 xhci_dbg(xhci, "HCSPARAMS 2: 0x%x\n",
84 (unsigned int) temp);
85 xhci_dbg(xhci, " Isoc scheduling threshold: %u\n",
86 (unsigned int) HCS_IST(temp));
87 xhci_dbg(xhci, " Maximum allowed segments in event ring: %u\n",
88 (unsigned int) HCS_ERST_MAX(temp));
89
90 temp = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
91 xhci_dbg(xhci, "HCSPARAMS 3 0x%x:\n",
92 (unsigned int) temp);
93 xhci_dbg(xhci, " Worst case U1 device exit latency: %u\n",
94 (unsigned int) HCS_U1_LATENCY(temp));
95 xhci_dbg(xhci, " Worst case U2 device exit latency: %u\n",
96 (unsigned int) HCS_U2_LATENCY(temp));
97
98 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
99 xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
100 xhci_dbg(xhci, " HC generates %s bit addresses\n",
101 HCC_64BIT_ADDR(temp) ? "64" : "32");
102 /* FIXME */
103 xhci_dbg(xhci, " FIXME: more HCCPARAMS debugging\n");
104
105 temp = xhci_readl(xhci, &xhci->cap_regs->run_regs_off);
106 xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
107}
108
109static void xhci_print_command_reg(struct xhci_hcd *xhci)
110{
111 u32 temp;
112
113 temp = xhci_readl(xhci, &xhci->op_regs->command);
114 xhci_dbg(xhci, "USBCMD 0x%x:\n", temp);
115 xhci_dbg(xhci, " HC is %s\n",
116 (temp & CMD_RUN) ? "running" : "being stopped");
117 xhci_dbg(xhci, " HC has %sfinished hard reset\n",
118 (temp & CMD_RESET) ? "not " : "");
119 xhci_dbg(xhci, " Event Interrupts %s\n",
120 (temp & CMD_EIE) ? "enabled " : "disabled");
121 xhci_dbg(xhci, " Host System Error Interrupts %s\n",
122 (temp & CMD_EIE) ? "enabled " : "disabled");
123 xhci_dbg(xhci, " HC has %sfinished light reset\n",
124 (temp & CMD_LRESET) ? "not " : "");
125}
126
127static void xhci_print_status(struct xhci_hcd *xhci)
128{
129 u32 temp;
130
131 temp = xhci_readl(xhci, &xhci->op_regs->status);
132 xhci_dbg(xhci, "USBSTS 0x%x:\n", temp);
133 xhci_dbg(xhci, " Event ring is %sempty\n",
134 (temp & STS_EINT) ? "not " : "");
135 xhci_dbg(xhci, " %sHost System Error\n",
136 (temp & STS_FATAL) ? "WARNING: " : "No ");
137 xhci_dbg(xhci, " HC is %s\n",
138 (temp & STS_HALT) ? "halted" : "running");
139}
140
141static void xhci_print_op_regs(struct xhci_hcd *xhci)
142{
143 xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
144 xhci_print_command_reg(xhci);
145 xhci_print_status(xhci);
146}
147
148static void xhci_print_ports(struct xhci_hcd *xhci)
149{
150 u32 __iomem *addr;
151 int i, j;
152 int ports;
153 char *names[NUM_PORT_REGS] = {
154 "status",
155 "power",
156 "link",
157 "reserved",
158 };
159
160 ports = HCS_MAX_PORTS(xhci->hcs_params1);
161 addr = &xhci->op_regs->port_status_base;
162 for (i = 0; i < ports; i++) {
163 for (j = 0; j < NUM_PORT_REGS; ++j) {
164 xhci_dbg(xhci, "%p port %s reg = 0x%x\n",
165 addr, names[j],
166 (unsigned int) xhci_readl(xhci, addr));
167 addr++;
168 }
169 }
170}
171
172void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
173{
174 void *addr;
175 u32 temp;
176
177 addr = &ir_set->irq_pending;
178 temp = xhci_readl(xhci, addr);
179 if (temp == XHCI_INIT_VALUE)
180 return;
181
182 xhci_dbg(xhci, " %p: ir_set[%i]\n", ir_set, set_num);
183
184 xhci_dbg(xhci, " %p: ir_set.pending = 0x%x\n", addr,
185 (unsigned int)temp);
186
187 addr = &ir_set->irq_control;
188 temp = xhci_readl(xhci, addr);
189 xhci_dbg(xhci, " %p: ir_set.control = 0x%x\n", addr,
190 (unsigned int)temp);
191
192 addr = &ir_set->erst_size;
193 temp = xhci_readl(xhci, addr);
194 xhci_dbg(xhci, " %p: ir_set.erst_size = 0x%x\n", addr,
195 (unsigned int)temp);
196
197 addr = &ir_set->rsvd;
198 temp = xhci_readl(xhci, addr);
199 if (temp != XHCI_INIT_VALUE)
200 xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
201 addr, (unsigned int)temp);
202
203 addr = &ir_set->erst_base[0];
204 temp = xhci_readl(xhci, addr);
205 xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n",
206 addr, (unsigned int) temp);
207
208 addr = &ir_set->erst_base[1];
209 temp = xhci_readl(xhci, addr);
210 xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n",
211 addr, (unsigned int) temp);
212
213 addr = &ir_set->erst_dequeue[0];
214 temp = xhci_readl(xhci, addr);
215 xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n",
216 addr, (unsigned int) temp);
217
218 addr = &ir_set->erst_dequeue[1];
219 temp = xhci_readl(xhci, addr);
220 xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n",
221 addr, (unsigned int) temp);
222}
223
224void xhci_print_run_regs(struct xhci_hcd *xhci)
225{
226 u32 temp;
227 int i;
228
229 xhci_dbg(xhci, "xHCI runtime registers at %p:\n", xhci->run_regs);
230 temp = xhci_readl(xhci, &xhci->run_regs->microframe_index);
231 xhci_dbg(xhci, " %p: Microframe index = 0x%x\n",
232 &xhci->run_regs->microframe_index,
233 (unsigned int) temp);
234 for (i = 0; i < 7; ++i) {
235 temp = xhci_readl(xhci, &xhci->run_regs->rsvd[i]);
236 if (temp != XHCI_INIT_VALUE)
237 xhci_dbg(xhci, " WARN: %p: Rsvd[%i] = 0x%x\n",
238 &xhci->run_regs->rsvd[i],
239 i, (unsigned int) temp);
240 }
241}
242
243void xhci_print_registers(struct xhci_hcd *xhci)
244{
245 xhci_print_cap_regs(xhci);
246 xhci_print_op_regs(xhci);
247 xhci_print_ports(xhci);
248}
249
250void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
251{
252 int i;
253 for (i = 0; i < 4; ++i)
254 xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
255 i*4, trb->generic.field[i]);
256}
257
258/**
259 * Debug a transfer request block (TRB).
260 */
261void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
262{
263 u64 address;
264 u32 type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK;
265
266 switch (type) {
267 case TRB_TYPE(TRB_LINK):
268 xhci_dbg(xhci, "Link TRB:\n");
269 xhci_print_trb_offsets(xhci, trb);
270
271 address = trb->link.segment_ptr[0] +
272 (((u64) trb->link.segment_ptr[1]) << 32);
273 xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
274
275 xhci_dbg(xhci, "Interrupter target = 0x%x\n",
276 GET_INTR_TARGET(trb->link.intr_target));
277 xhci_dbg(xhci, "Cycle bit = %u\n",
278 (unsigned int) (trb->link.control & TRB_CYCLE));
279 xhci_dbg(xhci, "Toggle cycle bit = %u\n",
280 (unsigned int) (trb->link.control & LINK_TOGGLE));
281 xhci_dbg(xhci, "No Snoop bit = %u\n",
282 (unsigned int) (trb->link.control & TRB_NO_SNOOP));
283 break;
284 case TRB_TYPE(TRB_TRANSFER):
285 address = trb->trans_event.buffer[0] +
286 (((u64) trb->trans_event.buffer[1]) << 32);
287 /*
288 * FIXME: look at flags to figure out if it's an address or if
289 * the data is directly in the buffer field.
290 */
291 xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
292 break;
293 case TRB_TYPE(TRB_COMPLETION):
294 address = trb->event_cmd.cmd_trb[0] +
295 (((u64) trb->event_cmd.cmd_trb[1]) << 32);
296 xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
297 xhci_dbg(xhci, "Completion status = %u\n",
298 (unsigned int) GET_COMP_CODE(trb->event_cmd.status));
299 xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags);
300 break;
301 default:
302 xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
303 (unsigned int) type>>10);
304 xhci_print_trb_offsets(xhci, trb);
305 break;
306 }
307}
308
309/**
310 * Debug a segment with an xHCI ring.
311 *
312 * @return The Link TRB of the segment, or NULL if there is no Link TRB
313 * (which is a bug, since all segments must have a Link TRB).
314 *
315 * Prints out all TRBs in the segment, even those after the Link TRB.
316 *
317 * XXX: should we print out TRBs that the HC owns? As long as we don't
318 * write, that should be fine... We shouldn't expect that the memory pointed to
319 * by the TRB is valid at all. Do we care about ones the HC owns? Probably,
320 * for HC debugging.
321 */
322void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
323{
324 int i;
325 u32 addr = (u32) seg->dma;
326 union xhci_trb *trb = seg->trbs;
327
328 for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
329 trb = &seg->trbs[i];
330 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
331 (unsigned int) trb->link.segment_ptr[0],
332 (unsigned int) trb->link.segment_ptr[1],
333 (unsigned int) trb->link.intr_target,
334 (unsigned int) trb->link.control);
335 addr += sizeof(*trb);
336 }
337}
338
339void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
340{
341 xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
342 ring->dequeue,
343 (unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
344 ring->dequeue));
345 xhci_dbg(xhci, "Ring deq updated %u times\n",
346 ring->deq_updates);
347 xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
348 ring->enqueue,
349 (unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
350 ring->enqueue));
351 xhci_dbg(xhci, "Ring enq updated %u times\n",
352 ring->enq_updates);
353}
354
355/**
356 * Debugging for an xHCI ring, which is a queue broken into multiple segments.
357 *
358 * Print out each segment in the ring. Check that the DMA address in
359 * each link segment actually matches the segment's stored DMA address.
360 * Check that the link end bit is only set at the end of the ring.
361 * Check that the dequeue and enqueue pointers point to real data in this ring
362 * (not some other ring).
363 */
364void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring)
365{
366 /* FIXME: Throw an error if any segment doesn't have a Link TRB */
367 struct xhci_segment *seg;
368 struct xhci_segment *first_seg = ring->first_seg;
369 xhci_debug_segment(xhci, first_seg);
370
371 if (!ring->enq_updates && !ring->deq_updates) {
372 xhci_dbg(xhci, " Ring has not been updated\n");
373 return;
374 }
375 for (seg = first_seg->next; seg != first_seg; seg = seg->next)
376 xhci_debug_segment(xhci, seg);
377}
378
379void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
380{
381 u32 addr = (u32) erst->erst_dma_addr;
382 int i;
383 struct xhci_erst_entry *entry;
384
385 for (i = 0; i < erst->num_entries; ++i) {
386 entry = &erst->entries[i];
387 xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
388 (unsigned int) addr,
389 (unsigned int) entry->seg_addr[0],
390 (unsigned int) entry->seg_addr[1],
391 (unsigned int) entry->seg_size,
392 (unsigned int) entry->rsvd);
393 addr += sizeof(*entry);
394 }
395}
396
397void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
398{
399 u32 val;
400
401 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
402 xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val);
403 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]);
404 xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val);
405}
406
407void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep)
408{
409 int i, j;
410 int last_ep_ctx = 31;
411 /* Fields are 32 bits wide, DMA addresses are in bytes */
412 int field_size = 32 / 8;
413
414 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
415 &ctx->drop_flags, (unsigned long long)dma,
416 ctx->drop_flags);
417 dma += field_size;
418 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
419 &ctx->add_flags, (unsigned long long)dma,
420 ctx->add_flags);
421 dma += field_size;
422 for (i = 0; i > 6; ++i) {
423 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
424 &ctx->rsvd[i], (unsigned long long)dma,
425 ctx->rsvd[i], i);
426 dma += field_size;
427 }
428
429 xhci_dbg(xhci, "Slot Context:\n");
430 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
431 &ctx->slot.dev_info,
432 (unsigned long long)dma, ctx->slot.dev_info);
433 dma += field_size;
434 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
435 &ctx->slot.dev_info2,
436 (unsigned long long)dma, ctx->slot.dev_info2);
437 dma += field_size;
438 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
439 &ctx->slot.tt_info,
440 (unsigned long long)dma, ctx->slot.tt_info);
441 dma += field_size;
442 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
443 &ctx->slot.dev_state,
444 (unsigned long long)dma, ctx->slot.dev_state);
445 dma += field_size;
446 for (i = 0; i > 4; ++i) {
447 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
448 &ctx->slot.reserved[i], (unsigned long long)dma,
449 ctx->slot.reserved[i], i);
450 dma += field_size;
451 }
452
453 if (last_ep < 31)
454 last_ep_ctx = last_ep + 1;
455 for (i = 0; i < last_ep_ctx; ++i) {
456 xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
457 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
458 &ctx->ep[i].ep_info,
459 (unsigned long long)dma, ctx->ep[i].ep_info);
460 dma += field_size;
461 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
462 &ctx->ep[i].ep_info2,
463 (unsigned long long)dma, ctx->ep[i].ep_info2);
464 dma += field_size;
465 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n",
466 &ctx->ep[i].deq[0],
467 (unsigned long long)dma, ctx->ep[i].deq[0]);
468 dma += field_size;
469 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n",
470 &ctx->ep[i].deq[1],
471 (unsigned long long)dma, ctx->ep[i].deq[1]);
472 dma += field_size;
473 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
474 &ctx->ep[i].tx_info,
475 (unsigned long long)dma, ctx->ep[i].tx_info);
476 dma += field_size;
477 for (j = 0; j < 3; ++j) {
478 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
479 &ctx->ep[i].reserved[j],
480 (unsigned long long)dma,
481 ctx->ep[i].reserved[j], j);
482 dma += field_size;
483 }
484 }
485}
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
new file mode 100644
index 000000000000..ecc131c3fe33
--- /dev/null
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -0,0 +1,145 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22/* Up to 16 microframes to halt an HC - one microframe is 125 microsectonds */
23#define XHCI_MAX_HALT_USEC (16*125)
24/* HC not running - set to 1 when run/stop bit is cleared. */
25#define XHCI_STS_HALT (1<<0)
26
27/* HCCPARAMS offset from PCI base address */
28#define XHCI_HCC_PARAMS_OFFSET 0x10
29/* HCCPARAMS contains the first extended capability pointer */
30#define XHCI_HCC_EXT_CAPS(p) (((p)>>16)&0xffff)
31
32/* Command and Status registers offset from the Operational Registers address */
33#define XHCI_CMD_OFFSET 0x00
34#define XHCI_STS_OFFSET 0x04
35
36#define XHCI_MAX_EXT_CAPS 50
37
38/* Capability Register */
39/* bits 7:0 - how long is the Capabilities register */
40#define XHCI_HC_LENGTH(p) (((p)>>00)&0x00ff)
41
42/* Extended capability register fields */
43#define XHCI_EXT_CAPS_ID(p) (((p)>>0)&0xff)
44#define XHCI_EXT_CAPS_NEXT(p) (((p)>>8)&0xff)
45#define XHCI_EXT_CAPS_VAL(p) ((p)>>16)
46/* Extended capability IDs - ID 0 reserved */
47#define XHCI_EXT_CAPS_LEGACY 1
48#define XHCI_EXT_CAPS_PROTOCOL 2
49#define XHCI_EXT_CAPS_PM 3
50#define XHCI_EXT_CAPS_VIRT 4
51#define XHCI_EXT_CAPS_ROUTE 5
52/* IDs 6-9 reserved */
53#define XHCI_EXT_CAPS_DEBUG 10
54/* USB Legacy Support Capability - section 7.1.1 */
55#define XHCI_HC_BIOS_OWNED (1 << 16)
56#define XHCI_HC_OS_OWNED (1 << 24)
57
58/* USB Legacy Support Capability - section 7.1.1 */
59/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
60#define XHCI_LEGACY_SUPPORT_OFFSET (0x00)
61
62/* USB Legacy Support Control and Status Register - section 7.1.2 */
63/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
64#define XHCI_LEGACY_CONTROL_OFFSET (0x04)
65/* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
66#define XHCI_LEGACY_DISABLE_SMI ((0x3 << 1) + (0xff << 5) + (0x7 << 17))
67
68/* command register values to disable interrupts and halt the HC */
69/* start/stop HC execution - do not write unless HC is halted*/
70#define XHCI_CMD_RUN (1 << 0)
71/* Event Interrupt Enable - get irq when EINT bit is set in USBSTS register */
72#define XHCI_CMD_EIE (1 << 2)
73/* Host System Error Interrupt Enable - get irq when HSEIE bit set in USBSTS */
74#define XHCI_CMD_HSEIE (1 << 3)
75/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
76#define XHCI_CMD_EWE (1 << 10)
77
78#define XHCI_IRQS (XHCI_CMD_EIE | XHCI_CMD_HSEIE | XHCI_CMD_EWE)
79
80/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
81#define XHCI_STS_CNR (1 << 11)
82
83#include <linux/io.h>
84
85/**
86 * Return the next extended capability pointer register.
87 *
88 * @base PCI register base address.
89 *
90 * @ext_offset Offset of the 32-bit register that contains the extended
91 * capabilites pointer. If searching for the first extended capability, pass
92 * in XHCI_HCC_PARAMS_OFFSET. If searching for the next extended capability,
93 * pass in the offset of the current extended capability register.
94 *
95 * Returns 0 if there is no next extended capability register or returns the register offset
96 * from the PCI registers base address.
97 */
98static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset)
99{
100 u32 next;
101
102 next = readl(base + ext_offset);
103
104 if (ext_offset == XHCI_HCC_PARAMS_OFFSET)
105 /* Find the first extended capability */
106 next = XHCI_HCC_EXT_CAPS(next);
107 else
108 /* Find the next extended capability */
109 next = XHCI_EXT_CAPS_NEXT(next);
110 if (!next)
111 return 0;
112 /*
113 * Address calculation from offset of extended capabilities
114 * (or HCCPARAMS) register - see section 5.3.6 and section 7.
115 */
116 return ext_offset + (next << 2);
117}
118
119/**
120 * Find the offset of the extended capabilities with capability ID id.
121 *
122 * @base PCI MMIO registers base address.
123 * @ext_offset Offset from base of the first extended capability to look at,
124 * or the address of HCCPARAMS.
125 * @id Extended capability ID to search for.
126 *
127 * This uses an arbitrary limit of XHCI_MAX_EXT_CAPS extended capabilities
128 * to make sure that the list doesn't contain a loop.
129 */
130static inline int xhci_find_ext_cap_by_id(void __iomem *base, int ext_offset, int id)
131{
132 u32 val;
133 int limit = XHCI_MAX_EXT_CAPS;
134
135 while (ext_offset && limit > 0) {
136 val = readl(base + ext_offset);
137 if (XHCI_EXT_CAPS_ID(val) == id)
138 break;
139 ext_offset = xhci_find_next_cap_offset(base, ext_offset);
140 limit--;
141 }
142 if (limit > 0)
143 return ext_offset;
144 return 0;
145}
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
new file mode 100644
index 000000000000..dba3e07ccd09
--- /dev/null
+++ b/drivers/usb/host/xhci-hcd.c
@@ -0,0 +1,1274 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/irq.h>
24#include <linux/module.h>
25
26#include "xhci.h"
27
28#define DRIVER_AUTHOR "Sarah Sharp"
29#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
30
31/* TODO: copied from ehci-hcd.c - can this be refactored? */
32/*
33 * handshake - spin reading hc until handshake completes or fails
34 * @ptr: address of hc register to be read
35 * @mask: bits to look at in result of read
36 * @done: value of those bits when handshake succeeds
37 * @usec: timeout in microseconds
38 *
39 * Returns negative errno, or zero on success
40 *
41 * Success happens when the "mask" bits have the specified value (hardware
42 * handshake done). There are two failure modes: "usec" have passed (major
43 * hardware flakeout), or the register reads as all-ones (hardware removed).
44 */
45static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
46 u32 mask, u32 done, int usec)
47{
48 u32 result;
49
50 do {
51 result = xhci_readl(xhci, ptr);
52 if (result == ~(u32)0) /* card removed */
53 return -ENODEV;
54 result &= mask;
55 if (result == done)
56 return 0;
57 udelay(1);
58 usec--;
59 } while (usec > 0);
60 return -ETIMEDOUT;
61}
62
63/*
64 * Force HC into halt state.
65 *
66 * Disable any IRQs and clear the run/stop bit.
67 * HC will complete any current and actively pipelined transactions, and
68 * should halt within 16 microframes of the run/stop bit being cleared.
69 * Read HC Halted bit in the status register to see when the HC is finished.
70 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
71 */
72int xhci_halt(struct xhci_hcd *xhci)
73{
74 u32 halted;
75 u32 cmd;
76 u32 mask;
77
78 xhci_dbg(xhci, "// Halt the HC\n");
79 /* Disable all interrupts from the host controller */
80 mask = ~(XHCI_IRQS);
81 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
82 if (!halted)
83 mask &= ~CMD_RUN;
84
85 cmd = xhci_readl(xhci, &xhci->op_regs->command);
86 cmd &= mask;
87 xhci_writel(xhci, cmd, &xhci->op_regs->command);
88
89 return handshake(xhci, &xhci->op_regs->status,
90 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
91}
92
93/*
94 * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
95 *
96 * This resets pipelines, timers, counters, state machines, etc.
97 * Transactions will be terminated immediately, and operational registers
98 * will be set to their defaults.
99 */
100int xhci_reset(struct xhci_hcd *xhci)
101{
102 u32 command;
103 u32 state;
104
105 state = xhci_readl(xhci, &xhci->op_regs->status);
106 BUG_ON((state & STS_HALT) == 0);
107
108 xhci_dbg(xhci, "// Reset the HC\n");
109 command = xhci_readl(xhci, &xhci->op_regs->command);
110 command |= CMD_RESET;
111 xhci_writel(xhci, command, &xhci->op_regs->command);
112 /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
113 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
114
115 return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
116}
117
118/*
119 * Stop the HC from processing the endpoint queues.
120 */
121static void xhci_quiesce(struct xhci_hcd *xhci)
122{
123 /*
124 * Queues are per endpoint, so we need to disable an endpoint or slot.
125 *
126 * To disable a slot, we need to insert a disable slot command on the
127 * command ring and ring the doorbell. This will also free any internal
128 * resources associated with the slot (which might not be what we want).
129 *
130 * A Release Endpoint command sounds better - doesn't free internal HC
131 * memory, but removes the endpoints from the schedule and releases the
132 * bandwidth, disables the doorbells, and clears the endpoint enable
133 * flag. Usually used prior to a set interface command.
134 *
135 * TODO: Implement after command ring code is done.
136 */
137 BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state));
138 xhci_dbg(xhci, "Finished quiescing -- code not written yet\n");
139}
140
141#if 0
142/* Set up MSI-X table for entry 0 (may claim other entries later) */
143static int xhci_setup_msix(struct xhci_hcd *xhci)
144{
145 int ret;
146 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
147
148 xhci->msix_count = 0;
149 /* XXX: did I do this right? ixgbe does kcalloc for more than one */
150 xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL);
151 if (!xhci->msix_entries) {
152 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
153 return -ENOMEM;
154 }
155 xhci->msix_entries[0].entry = 0;
156
157 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
158 if (ret) {
159 xhci_err(xhci, "Failed to enable MSI-X\n");
160 goto free_entries;
161 }
162
163 /*
164 * Pass the xhci pointer value as the request_irq "cookie".
165 * If more irqs are added, this will need to be unique for each one.
166 */
167 ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0,
168 "xHCI", xhci_to_hcd(xhci));
169 if (ret) {
170 xhci_err(xhci, "Failed to allocate MSI-X interrupt\n");
171 goto disable_msix;
172 }
173 xhci_dbg(xhci, "Finished setting up MSI-X\n");
174 return 0;
175
176disable_msix:
177 pci_disable_msix(pdev);
178free_entries:
179 kfree(xhci->msix_entries);
180 xhci->msix_entries = NULL;
181 return ret;
182}
183
184/* XXX: code duplication; can xhci_setup_msix call this? */
185/* Free any IRQs and disable MSI-X */
186static void xhci_cleanup_msix(struct xhci_hcd *xhci)
187{
188 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
189 if (!xhci->msix_entries)
190 return;
191
192 free_irq(xhci->msix_entries[0].vector, xhci);
193 pci_disable_msix(pdev);
194 kfree(xhci->msix_entries);
195 xhci->msix_entries = NULL;
196 xhci_dbg(xhci, "Finished cleaning up MSI-X\n");
197}
198#endif
199
200/*
201 * Initialize memory for HCD and xHC (one-time init).
202 *
203 * Program the PAGESIZE register, initialize the device context array, create
204 * device contexts (?), set up a command ring segment (or two?), create event
205 * ring (one for now).
206 */
207int xhci_init(struct usb_hcd *hcd)
208{
209 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
210 int retval = 0;
211
212 xhci_dbg(xhci, "xhci_init\n");
213 spin_lock_init(&xhci->lock);
214 retval = xhci_mem_init(xhci, GFP_KERNEL);
215 xhci_dbg(xhci, "Finished xhci_init\n");
216
217 return retval;
218}
219
220/*
221 * Called in interrupt context when there might be work
222 * queued on the event ring
223 *
224 * xhci->lock must be held by caller.
225 */
226static void xhci_work(struct xhci_hcd *xhci)
227{
228 u32 temp;
229
230 /*
231 * Clear the op reg interrupt status first,
232 * so we can receive interrupts from other MSI-X interrupters.
233 * Write 1 to clear the interrupt status.
234 */
235 temp = xhci_readl(xhci, &xhci->op_regs->status);
236 temp |= STS_EINT;
237 xhci_writel(xhci, temp, &xhci->op_regs->status);
238 /* FIXME when MSI-X is supported and there are multiple vectors */
239 /* Clear the MSI-X event interrupt status */
240
241 /* Acknowledge the interrupt */
242 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
243 temp |= 0x3;
244 xhci_writel(xhci, temp, &xhci->ir_set->irq_pending);
245 /* Flush posted writes */
246 xhci_readl(xhci, &xhci->ir_set->irq_pending);
247
248 /* FIXME this should be a delayed service routine that clears the EHB */
249 xhci_handle_event(xhci);
250
251 /* Clear the event handler busy flag; the event ring should be empty. */
252 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
253 xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]);
254 /* Flush posted writes -- FIXME is this necessary? */
255 xhci_readl(xhci, &xhci->ir_set->irq_pending);
256}
257
258/*-------------------------------------------------------------------------*/
259
260/*
261 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
262 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
263 * indicators of an event TRB error, but we check the status *first* to be safe.
264 */
265irqreturn_t xhci_irq(struct usb_hcd *hcd)
266{
267 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
268 u32 temp, temp2;
269
270 spin_lock(&xhci->lock);
271 /* Check if the xHC generated the interrupt, or the irq is shared */
272 temp = xhci_readl(xhci, &xhci->op_regs->status);
273 temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
274 if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
275 spin_unlock(&xhci->lock);
276 return IRQ_NONE;
277 }
278
279 if (temp & STS_FATAL) {
280 xhci_warn(xhci, "WARNING: Host System Error\n");
281 xhci_halt(xhci);
282 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
283 spin_unlock(&xhci->lock);
284 return -ESHUTDOWN;
285 }
286
287 xhci_work(xhci);
288 spin_unlock(&xhci->lock);
289
290 return IRQ_HANDLED;
291}
292
293#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
294void xhci_event_ring_work(unsigned long arg)
295{
296 unsigned long flags;
297 int temp;
298 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
299 int i, j;
300
301 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
302
303 spin_lock_irqsave(&xhci->lock, flags);
304 temp = xhci_readl(xhci, &xhci->op_regs->status);
305 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
306 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
307 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
308 xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
309 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
310 xhci->error_bitmask = 0;
311 xhci_dbg(xhci, "Event ring:\n");
312 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
313 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
314 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
315 temp &= ERST_PTR_MASK;
316 xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
317 xhci_dbg(xhci, "Command ring:\n");
318 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
319 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
320 xhci_dbg_cmd_ptrs(xhci);
321 for (i = 0; i < MAX_HC_SLOTS; ++i) {
322 if (xhci->devs[i]) {
323 for (j = 0; j < 31; ++j) {
324 if (xhci->devs[i]->ep_rings[j]) {
325 xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
326 xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg);
327 }
328 }
329 }
330 }
331
332 if (xhci->noops_submitted != NUM_TEST_NOOPS)
333 if (xhci_setup_one_noop(xhci))
334 xhci_ring_cmd_db(xhci);
335 spin_unlock_irqrestore(&xhci->lock, flags);
336
337 if (!xhci->zombie)
338 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
339 else
340 xhci_dbg(xhci, "Quit polling the event ring.\n");
341}
342#endif
343
344/*
345 * Start the HC after it was halted.
346 *
347 * This function is called by the USB core when the HC driver is added.
348 * Its opposite is xhci_stop().
349 *
350 * xhci_init() must be called once before this function can be called.
351 * Reset the HC, enable device slot contexts, program DCBAAP, and
352 * set command ring pointer and event ring pointer.
353 *
354 * Setup MSI-X vectors and enable interrupts.
355 */
356int xhci_run(struct usb_hcd *hcd)
357{
358 u32 temp;
359 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
360 void (*doorbell)(struct xhci_hcd *) = NULL;
361
362 hcd->uses_new_polling = 1;
363 hcd->poll_rh = 0;
364
365 xhci_dbg(xhci, "xhci_run\n");
366#if 0 /* FIXME: MSI not setup yet */
367 /* Do this at the very last minute */
368 ret = xhci_setup_msix(xhci);
369 if (!ret)
370 return ret;
371
372 return -ENOSYS;
373#endif
374#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
375 init_timer(&xhci->event_ring_timer);
376 xhci->event_ring_timer.data = (unsigned long) xhci;
377 xhci->event_ring_timer.function = xhci_event_ring_work;
378 /* Poll the event ring */
379 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
380 xhci->zombie = 0;
381 xhci_dbg(xhci, "Setting event ring polling timer\n");
382 add_timer(&xhci->event_ring_timer);
383#endif
384
385 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
386 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
387 temp &= ~ER_IRQ_INTERVAL_MASK;
388 temp |= (u32) 160;
389 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
390
391 /* Set the HCD state before we enable the irqs */
392 hcd->state = HC_STATE_RUNNING;
393 temp = xhci_readl(xhci, &xhci->op_regs->command);
394 temp |= (CMD_EIE);
395 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
396 temp);
397 xhci_writel(xhci, temp, &xhci->op_regs->command);
398
399 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
400 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
401 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
402 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
403 &xhci->ir_set->irq_pending);
404 xhci_print_ir_set(xhci, xhci->ir_set, 0);
405
406 if (NUM_TEST_NOOPS > 0)
407 doorbell = xhci_setup_one_noop(xhci);
408
409 xhci_dbg(xhci, "Command ring memory map follows:\n");
410 xhci_debug_ring(xhci, xhci->cmd_ring);
411 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
412 xhci_dbg_cmd_ptrs(xhci);
413
414 xhci_dbg(xhci, "ERST memory map follows:\n");
415 xhci_dbg_erst(xhci, &xhci->erst);
416 xhci_dbg(xhci, "Event ring:\n");
417 xhci_debug_ring(xhci, xhci->event_ring);
418 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
419 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
420 temp &= ERST_PTR_MASK;
421 xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
422 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
423 xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
424
425 temp = xhci_readl(xhci, &xhci->op_regs->command);
426 temp |= (CMD_RUN);
427 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
428 temp);
429 xhci_writel(xhci, temp, &xhci->op_regs->command);
430 /* Flush PCI posted writes */
431 temp = xhci_readl(xhci, &xhci->op_regs->command);
432 xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
433 if (doorbell)
434 (*doorbell)(xhci);
435
436 xhci_dbg(xhci, "Finished xhci_run\n");
437 return 0;
438}
439
440/*
441 * Stop xHCI driver.
442 *
443 * This function is called by the USB core when the HC driver is removed.
444 * Its opposite is xhci_run().
445 *
446 * Disable device contexts, disable IRQs, and quiesce the HC.
447 * Reset the HC, finish any completed transactions, and cleanup memory.
448 */
449void xhci_stop(struct usb_hcd *hcd)
450{
451 u32 temp;
452 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
453
454 spin_lock_irq(&xhci->lock);
455 if (HC_IS_RUNNING(hcd->state))
456 xhci_quiesce(xhci);
457 xhci_halt(xhci);
458 xhci_reset(xhci);
459 spin_unlock_irq(&xhci->lock);
460
461#if 0 /* No MSI yet */
462 xhci_cleanup_msix(xhci);
463#endif
464#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
465 /* Tell the event ring poll function not to reschedule */
466 xhci->zombie = 1;
467 del_timer_sync(&xhci->event_ring_timer);
468#endif
469
470 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
471 temp = xhci_readl(xhci, &xhci->op_regs->status);
472 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
473 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
474 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
475 &xhci->ir_set->irq_pending);
476 xhci_print_ir_set(xhci, xhci->ir_set, 0);
477
478 xhci_dbg(xhci, "cleaning up memory\n");
479 xhci_mem_cleanup(xhci);
480 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
481 xhci_readl(xhci, &xhci->op_regs->status));
482}
483
484/*
485 * Shutdown HC (not bus-specific)
486 *
487 * This is called when the machine is rebooting or halting. We assume that the
488 * machine will be powered off, and the HC's internal state will be reset.
489 * Don't bother to free memory.
490 */
491void xhci_shutdown(struct usb_hcd *hcd)
492{
493 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
494
495 spin_lock_irq(&xhci->lock);
496 xhci_halt(xhci);
497 spin_unlock_irq(&xhci->lock);
498
499#if 0
500 xhci_cleanup_msix(xhci);
501#endif
502
503 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
504 xhci_readl(xhci, &xhci->op_regs->status));
505}
506
507/*-------------------------------------------------------------------------*/
508
509/**
510 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
511 * HCDs. Find the index for an endpoint given its descriptor. Use the return
512 * value to right shift 1 for the bitmask.
513 *
514 * Index = (epnum * 2) + direction - 1,
515 * where direction = 0 for OUT, 1 for IN.
516 * For control endpoints, the IN index is used (OUT index is unused), so
517 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
518 */
519unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
520{
521 unsigned int index;
522 if (usb_endpoint_xfer_control(desc))
523 index = (unsigned int) (usb_endpoint_num(desc)*2);
524 else
525 index = (unsigned int) (usb_endpoint_num(desc)*2) +
526 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
527 return index;
528}
529
530/* Find the flag for this endpoint (for use in the control context). Use the
531 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
532 * bit 1, etc.
533 */
534unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
535{
536 return 1 << (xhci_get_endpoint_index(desc) + 1);
537}
538
539/* Compute the last valid endpoint context index. Basically, this is the
540 * endpoint index plus one. For slot contexts with more than valid endpoint,
541 * we find the most significant bit set in the added contexts flags.
542 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
543 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
544 */
545static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
546{
547 return fls(added_ctxs) - 1;
548}
549
550/* Returns 1 if the arguments are OK;
551 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
552 */
553int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
554 struct usb_host_endpoint *ep, int check_ep, const char *func) {
555 if (!hcd || (check_ep && !ep) || !udev) {
556 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
557 func);
558 return -EINVAL;
559 }
560 if (!udev->parent) {
561 printk(KERN_DEBUG "xHCI %s called for root hub\n",
562 func);
563 return 0;
564 }
565 if (!udev->slot_id) {
566 printk(KERN_DEBUG "xHCI %s called with unaddressed device\n",
567 func);
568 return -EINVAL;
569 }
570 return 1;
571}
572
573/*
574 * non-error returns are a promise to giveback() the urb later
575 * we drop ownership so next owner (or urb unlink) can get it
576 */
577int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
578{
579 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
580 unsigned long flags;
581 int ret = 0;
582 unsigned int slot_id, ep_index;
583
584 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0)
585 return -EINVAL;
586
587 slot_id = urb->dev->slot_id;
588 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
589
590 spin_lock_irqsave(&xhci->lock, flags);
591 if (!xhci->devs || !xhci->devs[slot_id]) {
592 if (!in_interrupt())
593 dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n");
594 ret = -EINVAL;
595 goto exit;
596 }
597 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
598 if (!in_interrupt())
599 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
600 ret = -ESHUTDOWN;
601 goto exit;
602 }
603 if (usb_endpoint_xfer_control(&urb->ep->desc))
604 ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb,
605 slot_id, ep_index);
606 else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
607 ret = xhci_queue_bulk_tx(xhci, mem_flags, urb,
608 slot_id, ep_index);
609 else
610 ret = -EINVAL;
611exit:
612 spin_unlock_irqrestore(&xhci->lock, flags);
613 return ret;
614}
615
616/*
617 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
618 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
619 * should pick up where it left off in the TD, unless a Set Transfer Ring
620 * Dequeue Pointer is issued.
621 *
622 * The TRBs that make up the buffers for the canceled URB will be "removed" from
623 * the ring. Since the ring is a contiguous structure, they can't be physically
624 * removed. Instead, there are two options:
625 *
626 * 1) If the HC is in the middle of processing the URB to be canceled, we
627 * simply move the ring's dequeue pointer past those TRBs using the Set
628 * Transfer Ring Dequeue Pointer command. This will be the common case,
629 * when drivers timeout on the last submitted URB and attempt to cancel.
630 *
631 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
632 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
633 * HC will need to invalidate the any TRBs it has cached after the stop
634 * endpoint command, as noted in the xHCI 0.95 errata.
635 *
636 * 3) The TD may have completed by the time the Stop Endpoint Command
637 * completes, so software needs to handle that case too.
638 *
639 * This function should protect against the TD enqueueing code ringing the
640 * doorbell while this code is waiting for a Stop Endpoint command to complete.
641 * It also needs to account for multiple cancellations on happening at the same
642 * time for the same endpoint.
643 *
644 * Note that this function can be called in any context, or so says
645 * usb_hcd_unlink_urb()
646 */
647int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
648{
649 unsigned long flags;
650 int ret;
651 struct xhci_hcd *xhci;
652 struct xhci_td *td;
653 unsigned int ep_index;
654 struct xhci_ring *ep_ring;
655
656 xhci = hcd_to_xhci(hcd);
657 spin_lock_irqsave(&xhci->lock, flags);
658 /* Make sure the URB hasn't completed or been unlinked already */
659 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
660 if (ret || !urb->hcpriv)
661 goto done;
662
663 xhci_dbg(xhci, "Cancel URB %p\n", urb);
664 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
665 ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
666 td = (struct xhci_td *) urb->hcpriv;
667
668 ep_ring->cancels_pending++;
669 list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list);
670 /* Queue a stop endpoint command, but only if this is
671 * the first cancellation to be handled.
672 */
673 if (ep_ring->cancels_pending == 1) {
674 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
675 xhci_ring_cmd_db(xhci);
676 }
677done:
678 spin_unlock_irqrestore(&xhci->lock, flags);
679 return ret;
680}
681
682/* Drop an endpoint from a new bandwidth configuration for this device.
683 * Only one call to this function is allowed per endpoint before
684 * check_bandwidth() or reset_bandwidth() must be called.
685 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
686 * add the endpoint to the schedule with possibly new parameters denoted by a
687 * different endpoint descriptor in usb_host_endpoint.
688 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
689 * not allowed.
690 *
691 * The USB core will not allow URBs to be queued to an endpoint that is being
692 * disabled, so there's no need for mutual exclusion to protect
693 * the xhci->devs[slot_id] structure.
694 */
695int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
696 struct usb_host_endpoint *ep)
697{
698 struct xhci_hcd *xhci;
699 struct xhci_device_control *in_ctx;
700 unsigned int last_ctx;
701 unsigned int ep_index;
702 struct xhci_ep_ctx *ep_ctx;
703 u32 drop_flag;
704 u32 new_add_flags, new_drop_flags, new_slot_info;
705 int ret;
706
707 ret = xhci_check_args(hcd, udev, ep, 1, __func__);
708 if (ret <= 0)
709 return ret;
710 xhci = hcd_to_xhci(hcd);
711 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
712
713 drop_flag = xhci_get_endpoint_flag(&ep->desc);
714 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
715 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
716 __func__, drop_flag);
717 return 0;
718 }
719
720 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
721 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
722 __func__);
723 return -EINVAL;
724 }
725
726 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
727 ep_index = xhci_get_endpoint_index(&ep->desc);
728 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
729 /* If the HC already knows the endpoint is disabled,
730 * or the HCD has noted it is disabled, ignore this request
731 */
732 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
733 in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
734 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
735 __func__, ep);
736 return 0;
737 }
738
739 in_ctx->drop_flags |= drop_flag;
740 new_drop_flags = in_ctx->drop_flags;
741
742 in_ctx->add_flags = ~drop_flag;
743 new_add_flags = in_ctx->add_flags;
744
745 last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags);
746 /* Update the last valid endpoint context, if we deleted the last one */
747 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
748 in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
749 in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
750 }
751 new_slot_info = in_ctx->slot.dev_info;
752
753 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
754
755 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
756 (unsigned int) ep->desc.bEndpointAddress,
757 udev->slot_id,
758 (unsigned int) new_drop_flags,
759 (unsigned int) new_add_flags,
760 (unsigned int) new_slot_info);
761 return 0;
762}
763
764/* Add an endpoint to a new possible bandwidth configuration for this device.
765 * Only one call to this function is allowed per endpoint before
766 * check_bandwidth() or reset_bandwidth() must be called.
767 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
768 * add the endpoint to the schedule with possibly new parameters denoted by a
769 * different endpoint descriptor in usb_host_endpoint.
770 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
771 * not allowed.
772 *
773 * The USB core will not allow URBs to be queued to an endpoint until the
774 * configuration or alt setting is installed in the device, so there's no need
775 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
776 */
777int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
778 struct usb_host_endpoint *ep)
779{
780 struct xhci_hcd *xhci;
781 struct xhci_device_control *in_ctx;
782 unsigned int ep_index;
783 struct xhci_ep_ctx *ep_ctx;
784 u32 added_ctxs;
785 unsigned int last_ctx;
786 u32 new_add_flags, new_drop_flags, new_slot_info;
787 int ret = 0;
788
789 ret = xhci_check_args(hcd, udev, ep, 1, __func__);
790 if (ret <= 0)
791 return ret;
792 xhci = hcd_to_xhci(hcd);
793
794 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
795 last_ctx = xhci_last_valid_endpoint(added_ctxs);
796 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
797 /* FIXME when we have to issue an evaluate endpoint command to
798 * deal with ep0 max packet size changing once we get the
799 * descriptors
800 */
801 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
802 __func__, added_ctxs);
803 return 0;
804 }
805
806 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
807 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
808 __func__);
809 return -EINVAL;
810 }
811
812 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
813 ep_index = xhci_get_endpoint_index(&ep->desc);
814 ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
815 /* If the HCD has already noted the endpoint is enabled,
816 * ignore this request.
817 */
818 if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
819 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
820 __func__, ep);
821 return 0;
822 }
823
824 /*
825 * Configuration and alternate setting changes must be done in
826 * process context, not interrupt context (or so documenation
827 * for usb_set_interface() and usb_set_configuration() claim).
828 */
829 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
830 udev, ep, GFP_KERNEL) < 0) {
831 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
832 __func__, ep->desc.bEndpointAddress);
833 return -ENOMEM;
834 }
835
836 in_ctx->add_flags |= added_ctxs;
837 new_add_flags = in_ctx->add_flags;
838
839 /* If xhci_endpoint_disable() was called for this endpoint, but the
840 * xHC hasn't been notified yet through the check_bandwidth() call,
841 * this re-adds a new state for the endpoint from the new endpoint
842 * descriptors. We must drop and re-add this endpoint, so we leave the
843 * drop flags alone.
844 */
845 new_drop_flags = in_ctx->drop_flags;
846
847 /* Update the last valid endpoint context, if we just added one past */
848 if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
849 in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
850 in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
851 }
852 new_slot_info = in_ctx->slot.dev_info;
853
854 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
855 (unsigned int) ep->desc.bEndpointAddress,
856 udev->slot_id,
857 (unsigned int) new_drop_flags,
858 (unsigned int) new_add_flags,
859 (unsigned int) new_slot_info);
860 return 0;
861}
862
863static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
864{
865 struct xhci_ep_ctx *ep_ctx;
866 int i;
867
868 /* When a device's add flag and drop flag are zero, any subsequent
869 * configure endpoint command will leave that endpoint's state
870 * untouched. Make sure we don't leave any old state in the input
871 * endpoint contexts.
872 */
873 virt_dev->in_ctx->drop_flags = 0;
874 virt_dev->in_ctx->add_flags = 0;
875 virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
876 /* Endpoint 0 is always valid */
877 virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1);
878 for (i = 1; i < 31; ++i) {
879 ep_ctx = &virt_dev->in_ctx->ep[i];
880 ep_ctx->ep_info = 0;
881 ep_ctx->ep_info2 = 0;
882 ep_ctx->deq[0] = 0;
883 ep_ctx->deq[1] = 0;
884 ep_ctx->tx_info = 0;
885 }
886}
887
888/* Called after one or more calls to xhci_add_endpoint() or
889 * xhci_drop_endpoint(). If this call fails, the USB core is expected
890 * to call xhci_reset_bandwidth().
891 *
892 * Since we are in the middle of changing either configuration or
893 * installing a new alt setting, the USB core won't allow URBs to be
894 * enqueued for any endpoint on the old config or interface. Nothing
895 * else should be touching the xhci->devs[slot_id] structure, so we
896 * don't need to take the xhci->lock for manipulating that.
897 */
898int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
899{
900 int i;
901 int ret = 0;
902 int timeleft;
903 unsigned long flags;
904 struct xhci_hcd *xhci;
905 struct xhci_virt_device *virt_dev;
906
907 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
908 if (ret <= 0)
909 return ret;
910 xhci = hcd_to_xhci(hcd);
911
912 if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
913 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
914 __func__);
915 return -EINVAL;
916 }
917 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
918 virt_dev = xhci->devs[udev->slot_id];
919
920 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
921 virt_dev->in_ctx->add_flags |= SLOT_FLAG;
922 virt_dev->in_ctx->add_flags &= ~EP0_FLAG;
923 virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG;
924 virt_dev->in_ctx->drop_flags &= ~EP0_FLAG;
925 xhci_dbg(xhci, "New Input Control Context:\n");
926 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
927 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
928
929 spin_lock_irqsave(&xhci->lock, flags);
930 ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma,
931 udev->slot_id);
932 if (ret < 0) {
933 spin_unlock_irqrestore(&xhci->lock, flags);
934 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
935 return -ENOMEM;
936 }
937 xhci_ring_cmd_db(xhci);
938 spin_unlock_irqrestore(&xhci->lock, flags);
939
940 /* Wait for the configure endpoint command to complete */
941 timeleft = wait_for_completion_interruptible_timeout(
942 &virt_dev->cmd_completion,
943 USB_CTRL_SET_TIMEOUT);
944 if (timeleft <= 0) {
945 xhci_warn(xhci, "%s while waiting for configure endpoint command\n",
946 timeleft == 0 ? "Timeout" : "Signal");
947 /* FIXME cancel the configure endpoint command */
948 return -ETIME;
949 }
950
951 switch (virt_dev->cmd_status) {
952 case COMP_ENOMEM:
953 dev_warn(&udev->dev, "Not enough host controller resources "
954 "for new device state.\n");
955 ret = -ENOMEM;
956 /* FIXME: can we allocate more resources for the HC? */
957 break;
958 case COMP_BW_ERR:
959 dev_warn(&udev->dev, "Not enough bandwidth "
960 "for new device state.\n");
961 ret = -ENOSPC;
962 /* FIXME: can we go back to the old state? */
963 break;
964 case COMP_TRB_ERR:
965 /* the HCD set up something wrong */
966 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, "
967 "and endpoint is not disabled.\n");
968 ret = -EINVAL;
969 break;
970 case COMP_SUCCESS:
971 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
972 break;
973 default:
974 xhci_err(xhci, "ERROR: unexpected command completion "
975 "code 0x%x.\n", virt_dev->cmd_status);
976 ret = -EINVAL;
977 break;
978 }
979 if (ret) {
980 /* Callee should call reset_bandwidth() */
981 return ret;
982 }
983
984 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
985 xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma,
986 LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
987
988 xhci_zero_in_ctx(virt_dev);
989 /* Free any old rings */
990 for (i = 1; i < 31; ++i) {
991 if (virt_dev->new_ep_rings[i]) {
992 xhci_ring_free(xhci, virt_dev->ep_rings[i]);
993 virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i];
994 virt_dev->new_ep_rings[i] = NULL;
995 }
996 }
997
998 return ret;
999}
1000
1001void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1002{
1003 struct xhci_hcd *xhci;
1004 struct xhci_virt_device *virt_dev;
1005 int i, ret;
1006
1007 ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
1008 if (ret <= 0)
1009 return;
1010 xhci = hcd_to_xhci(hcd);
1011
1012 if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1013 xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1014 __func__);
1015 return;
1016 }
1017 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1018 virt_dev = xhci->devs[udev->slot_id];
1019 /* Free any rings allocated for added endpoints */
1020 for (i = 0; i < 31; ++i) {
1021 if (virt_dev->new_ep_rings[i]) {
1022 xhci_ring_free(xhci, virt_dev->new_ep_rings[i]);
1023 virt_dev->new_ep_rings[i] = NULL;
1024 }
1025 }
1026 xhci_zero_in_ctx(virt_dev);
1027}
1028
1029/*
1030 * At this point, the struct usb_device is about to go away, the device has
1031 * disconnected, and all traffic has been stopped and the endpoints have been
1032 * disabled. Free any HC data structures associated with that device.
1033 */
1034void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
1035{
1036 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1037 unsigned long flags;
1038
1039 if (udev->slot_id == 0)
1040 return;
1041
1042 spin_lock_irqsave(&xhci->lock, flags);
1043 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
1044 spin_unlock_irqrestore(&xhci->lock, flags);
1045 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1046 return;
1047 }
1048 xhci_ring_cmd_db(xhci);
1049 spin_unlock_irqrestore(&xhci->lock, flags);
1050 /*
1051 * Event command completion handler will free any data structures
1052 * associated with the slot. XXX Can free sleep?
1053 */
1054}
1055
1056/*
1057 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
1058 * timed out, or allocating memory failed. Returns 1 on success.
1059 */
1060int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
1061{
1062 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1063 unsigned long flags;
1064 int timeleft;
1065 int ret;
1066
1067 spin_lock_irqsave(&xhci->lock, flags);
1068 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
1069 if (ret) {
1070 spin_unlock_irqrestore(&xhci->lock, flags);
1071 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1072 return 0;
1073 }
1074 xhci_ring_cmd_db(xhci);
1075 spin_unlock_irqrestore(&xhci->lock, flags);
1076
1077 /* XXX: how much time for xHC slot assignment? */
1078 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
1079 USB_CTRL_SET_TIMEOUT);
1080 if (timeleft <= 0) {
1081 xhci_warn(xhci, "%s while waiting for a slot\n",
1082 timeleft == 0 ? "Timeout" : "Signal");
1083 /* FIXME cancel the enable slot request */
1084 return 0;
1085 }
1086
1087 if (!xhci->slot_id) {
1088 xhci_err(xhci, "Error while assigning device slot ID\n");
1089 return 0;
1090 }
1091 /* xhci_alloc_virt_device() does not touch rings; no need to lock */
1092 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
1093 /* Disable slot, if we can do it without mem alloc */
1094 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
1095 spin_lock_irqsave(&xhci->lock, flags);
1096 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
1097 xhci_ring_cmd_db(xhci);
1098 spin_unlock_irqrestore(&xhci->lock, flags);
1099 return 0;
1100 }
1101 udev->slot_id = xhci->slot_id;
1102 /* Is this a LS or FS device under a HS hub? */
1103 /* Hub or peripherial? */
1104 return 1;
1105}
1106
1107/*
1108 * Issue an Address Device command (which will issue a SetAddress request to
1109 * the device).
1110 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
1111 * we should only issue and wait on one address command at the same time.
1112 *
1113 * We add one to the device address issued by the hardware because the USB core
1114 * uses address 1 for the root hubs (even though they're not really devices).
1115 */
1116int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1117{
1118 unsigned long flags;
1119 int timeleft;
1120 struct xhci_virt_device *virt_dev;
1121 int ret = 0;
1122 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1123 u32 temp;
1124
1125 if (!udev->slot_id) {
1126 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
1127 return -EINVAL;
1128 }
1129
1130 virt_dev = xhci->devs[udev->slot_id];
1131
1132 /* If this is a Set Address to an unconfigured device, setup ep 0 */
1133 if (!udev->config)
1134 xhci_setup_addressable_virt_dev(xhci, udev);
1135 /* Otherwise, assume the core has the device configured how it wants */
1136
1137 spin_lock_irqsave(&xhci->lock, flags);
1138 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma,
1139 udev->slot_id);
1140 if (ret) {
1141 spin_unlock_irqrestore(&xhci->lock, flags);
1142 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1143 return ret;
1144 }
1145 xhci_ring_cmd_db(xhci);
1146 spin_unlock_irqrestore(&xhci->lock, flags);
1147
1148 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
1149 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
1150 USB_CTRL_SET_TIMEOUT);
1151 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
1152 * the SetAddress() "recovery interval" required by USB and aborting the
1153 * command on a timeout.
1154 */
1155 if (timeleft <= 0) {
1156 xhci_warn(xhci, "%s while waiting for a slot\n",
1157 timeleft == 0 ? "Timeout" : "Signal");
1158 /* FIXME cancel the address device command */
1159 return -ETIME;
1160 }
1161
1162 switch (virt_dev->cmd_status) {
1163 case COMP_CTX_STATE:
1164 case COMP_EBADSLT:
1165 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
1166 udev->slot_id);
1167 ret = -EINVAL;
1168 break;
1169 case COMP_TX_ERR:
1170 dev_warn(&udev->dev, "Device not responding to set address.\n");
1171 ret = -EPROTO;
1172 break;
1173 case COMP_SUCCESS:
1174 xhci_dbg(xhci, "Successful Address Device command\n");
1175 break;
1176 default:
1177 xhci_err(xhci, "ERROR: unexpected command completion "
1178 "code 0x%x.\n", virt_dev->cmd_status);
1179 ret = -EINVAL;
1180 break;
1181 }
1182 if (ret) {
1183 return ret;
1184 }
1185 temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]);
1186 xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp);
1187 temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]);
1188 xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
1189 xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n",
1190 udev->slot_id,
1191 &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
1192 xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
1193 xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n",
1194 udev->slot_id,
1195 &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1],
1196 xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]);
1197 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
1198 (unsigned long long)virt_dev->out_ctx_dma);
1199 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
1200 xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
1201 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
1202 xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2);
1203 /*
1204 * USB core uses address 1 for the roothubs, so we add one to the
1205 * address given back to us by the HC.
1206 */
1207 udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1;
1208 /* Zero the input context control for later use */
1209 virt_dev->in_ctx->add_flags = 0;
1210 virt_dev->in_ctx->drop_flags = 0;
1211 /* Mirror flags in the output context for future ep enable/disable */
1212 virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
1213 virt_dev->out_ctx->drop_flags = 0;
1214
1215 xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
1216 /* XXX Meh, not sure if anyone else but choose_address uses this. */
1217 set_bit(udev->devnum, udev->bus->devmap.devicemap);
1218
1219 return 0;
1220}
1221
1222int xhci_get_frame(struct usb_hcd *hcd)
1223{
1224 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1225 /* EHCI mods by the periodic size. Why? */
1226 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
1227}
1228
1229MODULE_DESCRIPTION(DRIVER_DESC);
1230MODULE_AUTHOR(DRIVER_AUTHOR);
1231MODULE_LICENSE("GPL");
1232
1233static int __init xhci_hcd_init(void)
1234{
1235#ifdef CONFIG_PCI
1236 int retval = 0;
1237
1238 retval = xhci_register_pci();
1239
1240 if (retval < 0) {
1241 printk(KERN_DEBUG "Problem registering PCI driver.");
1242 return retval;
1243 }
1244#endif
1245 /*
1246 * Check the compiler generated sizes of structures that must be laid
1247 * out in specific ways for hardware access.
1248 */
1249 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
1250 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
1251 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
1252 /* xhci_device_control has eight fields, and also
1253 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
1254 */
1255 BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8);
1256 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
1257 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
1258 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
1259 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
1260 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
1261 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
1262 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
1263 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
1264 return 0;
1265}
1266module_init(xhci_hcd_init);
1267
1268static void __exit xhci_hcd_cleanup(void)
1269{
1270#ifdef CONFIG_PCI
1271 xhci_unregister_pci();
1272#endif
1273}
1274module_exit(xhci_hcd_cleanup);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
new file mode 100644
index 000000000000..eac5b53aa9e7
--- /dev/null
+++ b/drivers/usb/host/xhci-hub.c
@@ -0,0 +1,308 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <asm/unaligned.h>
24
25#include "xhci.h"
26
27static void xhci_hub_descriptor(struct xhci_hcd *xhci,
28 struct usb_hub_descriptor *desc)
29{
30 int ports;
31 u16 temp;
32
33 ports = HCS_MAX_PORTS(xhci->hcs_params1);
34
35 /* USB 3.0 hubs have a different descriptor, but we fake this for now */
36 desc->bDescriptorType = 0x29;
37 desc->bPwrOn2PwrGood = 10; /* xhci section 5.4.9 says 20ms max */
38 desc->bHubContrCurrent = 0;
39
40 desc->bNbrPorts = ports;
41 temp = 1 + (ports / 8);
42 desc->bDescLength = 7 + 2 * temp;
43
44 /* Why does core/hcd.h define bitmap? It's just confusing. */
45 memset(&desc->DeviceRemovable[0], 0, temp);
46 memset(&desc->DeviceRemovable[temp], 0xff, temp);
47
48 /* Ugh, these should be #defines, FIXME */
49 /* Using table 11-13 in USB 2.0 spec. */
50 temp = 0;
51 /* Bits 1:0 - support port power switching, or power always on */
52 if (HCC_PPC(xhci->hcc_params))
53 temp |= 0x0001;
54 else
55 temp |= 0x0002;
56 /* Bit 2 - root hubs are not part of a compound device */
57 /* Bits 4:3 - individual port over current protection */
58 temp |= 0x0008;
59 /* Bits 6:5 - no TTs in root ports */
60 /* Bit 7 - no port indicators */
61 desc->wHubCharacteristics = (__force __u16) cpu_to_le16(temp);
62}
63
64static unsigned int xhci_port_speed(unsigned int port_status)
65{
66 if (DEV_LOWSPEED(port_status))
67 return 1 << USB_PORT_FEAT_LOWSPEED;
68 if (DEV_HIGHSPEED(port_status))
69 return 1 << USB_PORT_FEAT_HIGHSPEED;
70 if (DEV_SUPERSPEED(port_status))
71 return 1 << USB_PORT_FEAT_SUPERSPEED;
72 /*
73 * FIXME: Yes, we should check for full speed, but the core uses that as
74 * a default in portspeed() in usb/core/hub.c (which is the only place
75 * USB_PORT_FEAT_*SPEED is used).
76 */
77 return 0;
78}
79
80/*
81 * These bits are Read Only (RO) and should be saved and written to the
82 * registers: 0, 3, 10:13, 30
83 * connect status, over-current status, port speed, and device removable.
84 * connect status and port speed are also sticky - meaning they're in
85 * the AUX well and they aren't changed by a hot, warm, or cold reset.
86 */
87#define XHCI_PORT_RO ((1<<0) | (1<<3) | (0xf<<10) | (1<<30))
88/*
89 * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit:
90 * bits 5:8, 9, 14:15, 25:27
91 * link state, port power, port indicator state, "wake on" enable state
92 */
93#define XHCI_PORT_RWS ((0xf<<5) | (1<<9) | (0x3<<14) | (0x7<<25))
94/*
95 * These bits are RW; writing a 1 sets the bit, writing a 0 has no effect:
96 * bit 4 (port reset)
97 */
98#define XHCI_PORT_RW1S ((1<<4))
99/*
100 * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect:
101 * bits 1, 17, 18, 19, 20, 21, 22, 23
102 * port enable/disable, and
103 * change bits: connect, PED, warm port reset changed (reserved zero for USB 2.0 ports),
104 * over-current, reset, link state, and L1 change
105 */
106#define XHCI_PORT_RW1CS ((1<<1) | (0x7f<<17))
107/*
108 * Bit 16 is RW, and writing a '1' to it causes the link state control to be
109 * latched in
110 */
111#define XHCI_PORT_RW ((1<<16))
112/*
113 * These bits are Reserved Zero (RsvdZ) and zero should be written to them:
114 * bits 2, 24, 28:31
115 */
116#define XHCI_PORT_RZ ((1<<2) | (1<<24) | (0xf<<28))
117
118/*
119 * Given a port state, this function returns a value that would result in the
120 * port being in the same state, if the value was written to the port status
121 * control register.
122 * Save Read Only (RO) bits and save read/write bits where
123 * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
124 * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
125 */
126static u32 xhci_port_state_to_neutral(u32 state)
127{
128 /* Save read-only status and port state */
129 return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
130}
131
132int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
133 u16 wIndex, char *buf, u16 wLength)
134{
135 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
136 int ports;
137 unsigned long flags;
138 u32 temp, status;
139 int retval = 0;
140 u32 __iomem *addr;
141 char *port_change_bit;
142
143 ports = HCS_MAX_PORTS(xhci->hcs_params1);
144
145 spin_lock_irqsave(&xhci->lock, flags);
146 switch (typeReq) {
147 case GetHubStatus:
148 /* No power source, over-current reported per port */
149 memset(buf, 0, 4);
150 break;
151 case GetHubDescriptor:
152 xhci_hub_descriptor(xhci, (struct usb_hub_descriptor *) buf);
153 break;
154 case GetPortStatus:
155 if (!wIndex || wIndex > ports)
156 goto error;
157 wIndex--;
158 status = 0;
159 addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
160 temp = xhci_readl(xhci, addr);
161 xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp);
162
163 /* wPortChange bits */
164 if (temp & PORT_CSC)
165 status |= 1 << USB_PORT_FEAT_C_CONNECTION;
166 if (temp & PORT_PEC)
167 status |= 1 << USB_PORT_FEAT_C_ENABLE;
168 if ((temp & PORT_OCC))
169 status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
170 /*
171 * FIXME ignoring suspend, reset, and USB 2.1/3.0 specific
172 * changes
173 */
174 if (temp & PORT_CONNECT) {
175 status |= 1 << USB_PORT_FEAT_CONNECTION;
176 status |= xhci_port_speed(temp);
177 }
178 if (temp & PORT_PE)
179 status |= 1 << USB_PORT_FEAT_ENABLE;
180 if (temp & PORT_OC)
181 status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
182 if (temp & PORT_RESET)
183 status |= 1 << USB_PORT_FEAT_RESET;
184 if (temp & PORT_POWER)
185 status |= 1 << USB_PORT_FEAT_POWER;
186 xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
187 put_unaligned(cpu_to_le32(status), (__le32 *) buf);
188 break;
189 case SetPortFeature:
190 wIndex &= 0xff;
191 if (!wIndex || wIndex > ports)
192 goto error;
193 wIndex--;
194 addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(wIndex & 0xff);
195 temp = xhci_readl(xhci, addr);
196 temp = xhci_port_state_to_neutral(temp);
197 switch (wValue) {
198 case USB_PORT_FEAT_POWER:
199 /*
200 * Turn on ports, even if there isn't per-port switching.
201 * HC will report connect events even before this is set.
202 * However, khubd will ignore the roothub events until
203 * the roothub is registered.
204 */
205 xhci_writel(xhci, temp | PORT_POWER, addr);
206
207 temp = xhci_readl(xhci, addr);
208 xhci_dbg(xhci, "set port power, actual port %d status = 0x%x\n", wIndex, temp);
209 break;
210 case USB_PORT_FEAT_RESET:
211 temp = (temp | PORT_RESET);
212 xhci_writel(xhci, temp, addr);
213
214 temp = xhci_readl(xhci, addr);
215 xhci_dbg(xhci, "set port reset, actual port %d status = 0x%x\n", wIndex, temp);
216 break;
217 default:
218 goto error;
219 }
220 temp = xhci_readl(xhci, addr); /* unblock any posted writes */
221 break;
222 case ClearPortFeature:
223 if (!wIndex || wIndex > ports)
224 goto error;
225 wIndex--;
226 addr = &xhci->op_regs->port_status_base +
227 NUM_PORT_REGS*(wIndex & 0xff);
228 temp = xhci_readl(xhci, addr);
229 temp = xhci_port_state_to_neutral(temp);
230 switch (wValue) {
231 case USB_PORT_FEAT_C_RESET:
232 status = PORT_RC;
233 port_change_bit = "reset";
234 break;
235 case USB_PORT_FEAT_C_CONNECTION:
236 status = PORT_CSC;
237 port_change_bit = "connect";
238 break;
239 case USB_PORT_FEAT_C_OVER_CURRENT:
240 status = PORT_OCC;
241 port_change_bit = "over-current";
242 break;
243 default:
244 goto error;
245 }
246 /* Change bits are all write 1 to clear */
247 xhci_writel(xhci, temp | status, addr);
248 temp = xhci_readl(xhci, addr);
249 xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
250 port_change_bit, wIndex, temp);
251 temp = xhci_readl(xhci, addr); /* unblock any posted writes */
252 break;
253 default:
254error:
255 /* "stall" on error */
256 retval = -EPIPE;
257 }
258 spin_unlock_irqrestore(&xhci->lock, flags);
259 return retval;
260}
261
262/*
263 * Returns 0 if the status hasn't changed, or the number of bytes in buf.
264 * Ports are 0-indexed from the HCD point of view,
265 * and 1-indexed from the USB core pointer of view.
266 * xHCI instances can have up to 127 ports, so FIXME if you see more than 15.
267 *
268 * Note that the status change bits will be cleared as soon as a port status
269 * change event is generated, so we use the saved status from that event.
270 */
271int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
272{
273 unsigned long flags;
274 u32 temp, status;
275 int i, retval;
276 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
277 int ports;
278 u32 __iomem *addr;
279
280 ports = HCS_MAX_PORTS(xhci->hcs_params1);
281
282 /* Initial status is no changes */
283 buf[0] = 0;
284 status = 0;
285 if (ports > 7) {
286 buf[1] = 0;
287 retval = 2;
288 } else {
289 retval = 1;
290 }
291
292 spin_lock_irqsave(&xhci->lock, flags);
293 /* For each port, did anything change? If so, set that bit in buf. */
294 for (i = 0; i < ports; i++) {
295 addr = &xhci->op_regs->port_status_base +
296 NUM_PORT_REGS*i;
297 temp = xhci_readl(xhci, addr);
298 if (temp & (PORT_CSC | PORT_PEC | PORT_OCC)) {
299 if (i < 7)
300 buf[0] |= 1 << (i + 1);
301 else
302 buf[1] |= 1 << (i - 7);
303 status = 1;
304 }
305 }
306 spin_unlock_irqrestore(&xhci->lock, flags);
307 return status ? retval : 0;
308}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
new file mode 100644
index 000000000000..c8a72de1c508
--- /dev/null
+++ b/drivers/usb/host/xhci-mem.c
@@ -0,0 +1,769 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/usb.h>
24#include <linux/pci.h>
25#include <linux/dmapool.h>
26
27#include "xhci.h"
28
29/*
30 * Allocates a generic ring segment from the ring pool, sets the dma address,
31 * initializes the segment to zero, and sets the private next pointer to NULL.
32 *
33 * Section 4.11.1.1:
34 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
35 */
36static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
37{
38 struct xhci_segment *seg;
39 dma_addr_t dma;
40
41 seg = kzalloc(sizeof *seg, flags);
42 if (!seg)
43 return 0;
44 xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
45
46 seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
47 if (!seg->trbs) {
48 kfree(seg);
49 return 0;
50 }
51 xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
52 seg->trbs, (unsigned long long)dma);
53
54 memset(seg->trbs, 0, SEGMENT_SIZE);
55 seg->dma = dma;
56 seg->next = NULL;
57
58 return seg;
59}
60
61static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
62{
63 if (!seg)
64 return;
65 if (seg->trbs) {
66 xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
67 seg->trbs, (unsigned long long)seg->dma);
68 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
69 seg->trbs = NULL;
70 }
71 xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
72 kfree(seg);
73}
74
75/*
76 * Make the prev segment point to the next segment.
77 *
78 * Change the last TRB in the prev segment to be a Link TRB which points to the
79 * DMA address of the next segment. The caller needs to set any Link TRB
80 * related flags, such as End TRB, Toggle Cycle, and no snoop.
81 */
82static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
83 struct xhci_segment *next, bool link_trbs)
84{
85 u32 val;
86
87 if (!prev || !next)
88 return;
89 prev->next = next;
90 if (link_trbs) {
91 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma;
92
93 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
94 val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
95 val &= ~TRB_TYPE_BITMASK;
96 val |= TRB_TYPE(TRB_LINK);
97 prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
98 }
99 xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
100 (unsigned long long)prev->dma,
101 (unsigned long long)next->dma);
102}
103
104/* XXX: Do we need the hcd structure in all these functions? */
105void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
106{
107 struct xhci_segment *seg;
108 struct xhci_segment *first_seg;
109
110 if (!ring || !ring->first_seg)
111 return;
112 first_seg = ring->first_seg;
113 seg = first_seg->next;
114 xhci_dbg(xhci, "Freeing ring at %p\n", ring);
115 while (seg != first_seg) {
116 struct xhci_segment *next = seg->next;
117 xhci_segment_free(xhci, seg);
118 seg = next;
119 }
120 xhci_segment_free(xhci, first_seg);
121 ring->first_seg = NULL;
122 kfree(ring);
123}
124
125/**
126 * Create a new ring with zero or more segments.
127 *
128 * Link each segment together into a ring.
129 * Set the end flag and the cycle toggle bit on the last segment.
130 * See section 4.9.1 and figures 15 and 16.
131 */
132static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
133 unsigned int num_segs, bool link_trbs, gfp_t flags)
134{
135 struct xhci_ring *ring;
136 struct xhci_segment *prev;
137
138 ring = kzalloc(sizeof *(ring), flags);
139 xhci_dbg(xhci, "Allocating ring at %p\n", ring);
140 if (!ring)
141 return 0;
142
143 INIT_LIST_HEAD(&ring->td_list);
144 INIT_LIST_HEAD(&ring->cancelled_td_list);
145 if (num_segs == 0)
146 return ring;
147
148 ring->first_seg = xhci_segment_alloc(xhci, flags);
149 if (!ring->first_seg)
150 goto fail;
151 num_segs--;
152
153 prev = ring->first_seg;
154 while (num_segs > 0) {
155 struct xhci_segment *next;
156
157 next = xhci_segment_alloc(xhci, flags);
158 if (!next)
159 goto fail;
160 xhci_link_segments(xhci, prev, next, link_trbs);
161
162 prev = next;
163 num_segs--;
164 }
165 xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
166
167 if (link_trbs) {
168 /* See section 4.9.2.1 and 6.4.4.1 */
169 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
170 xhci_dbg(xhci, "Wrote link toggle flag to"
171 " segment %p (virtual), 0x%llx (DMA)\n",
172 prev, (unsigned long long)prev->dma);
173 }
174 /* The ring is empty, so the enqueue pointer == dequeue pointer */
175 ring->enqueue = ring->first_seg->trbs;
176 ring->enq_seg = ring->first_seg;
177 ring->dequeue = ring->enqueue;
178 ring->deq_seg = ring->first_seg;
179 /* The ring is initialized to 0. The producer must write 1 to the cycle
180 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
181 * compare CCS to the cycle bit to check ownership, so CCS = 1.
182 */
183 ring->cycle_state = 1;
184
185 return ring;
186
187fail:
188 xhci_ring_free(xhci, ring);
189 return 0;
190}
191
192/* All the xhci_tds in the ring's TD list should be freed at this point */
193void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
194{
195 struct xhci_virt_device *dev;
196 int i;
197
198 /* Slot ID 0 is reserved */
199 if (slot_id == 0 || !xhci->devs[slot_id])
200 return;
201
202 dev = xhci->devs[slot_id];
203 xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0;
204 xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
205 if (!dev)
206 return;
207
208 for (i = 0; i < 31; ++i)
209 if (dev->ep_rings[i])
210 xhci_ring_free(xhci, dev->ep_rings[i]);
211
212 if (dev->in_ctx)
213 dma_pool_free(xhci->device_pool,
214 dev->in_ctx, dev->in_ctx_dma);
215 if (dev->out_ctx)
216 dma_pool_free(xhci->device_pool,
217 dev->out_ctx, dev->out_ctx_dma);
218 kfree(xhci->devs[slot_id]);
219 xhci->devs[slot_id] = 0;
220}
221
222int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
223 struct usb_device *udev, gfp_t flags)
224{
225 dma_addr_t dma;
226 struct xhci_virt_device *dev;
227
228 /* Slot ID 0 is reserved */
229 if (slot_id == 0 || xhci->devs[slot_id]) {
230 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
231 return 0;
232 }
233
234 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
235 if (!xhci->devs[slot_id])
236 return 0;
237 dev = xhci->devs[slot_id];
238
239 /* Allocate the (output) device context that will be used in the HC */
240 dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
241 if (!dev->out_ctx)
242 goto fail;
243 dev->out_ctx_dma = dma;
244 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
245 (unsigned long long)dma);
246 memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
247
248 /* Allocate the (input) device context for address device command */
249 dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
250 if (!dev->in_ctx)
251 goto fail;
252 dev->in_ctx_dma = dma;
253 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
254 (unsigned long long)dma);
255 memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
256
257 /* Allocate endpoint 0 ring */
258 dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
259 if (!dev->ep_rings[0])
260 goto fail;
261
262 init_completion(&dev->cmd_completion);
263
264 /*
265 * Point to output device context in dcbaa; skip the output control
266 * context, which is eight 32 bit fields (or 32 bytes long)
267 */
268 xhci->dcbaa->dev_context_ptrs[2*slot_id] =
269 (u32) dev->out_ctx_dma + (32);
270 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
271 slot_id,
272 &xhci->dcbaa->dev_context_ptrs[2*slot_id],
273 (unsigned long long)dev->out_ctx_dma);
274 xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
275
276 return 1;
277fail:
278 xhci_free_virt_device(xhci, slot_id);
279 return 0;
280}
281
282/* Setup an xHCI virtual device for a Set Address command */
283int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
284{
285 struct xhci_virt_device *dev;
286 struct xhci_ep_ctx *ep0_ctx;
287 struct usb_device *top_dev;
288
289 dev = xhci->devs[udev->slot_id];
290 /* Slot ID 0 is reserved */
291 if (udev->slot_id == 0 || !dev) {
292 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
293 udev->slot_id);
294 return -EINVAL;
295 }
296 ep0_ctx = &dev->in_ctx->ep[0];
297
298 /* 2) New slot context and endpoint 0 context are valid*/
299 dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
300
301 /* 3) Only the control endpoint is valid - one endpoint context */
302 dev->in_ctx->slot.dev_info |= LAST_CTX(1);
303
304 switch (udev->speed) {
305 case USB_SPEED_SUPER:
306 dev->in_ctx->slot.dev_info |= (u32) udev->route;
307 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS;
308 break;
309 case USB_SPEED_HIGH:
310 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS;
311 break;
312 case USB_SPEED_FULL:
313 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS;
314 break;
315 case USB_SPEED_LOW:
316 dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS;
317 break;
318 case USB_SPEED_VARIABLE:
319 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
320 return -EINVAL;
321 break;
322 default:
323 /* Speed was set earlier, this shouldn't happen. */
324 BUG();
325 }
326 /* Find the root hub port this device is under */
327 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
328 top_dev = top_dev->parent)
329 /* Found device below root hub */;
330 dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
331 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
332
333 /* Is this a LS/FS device under a HS hub? */
334 /*
335 * FIXME: I don't think this is right, where does the TT info for the
336 * roothub or parent hub come from?
337 */
338 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
339 udev->tt) {
340 dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id;
341 dev->in_ctx->slot.tt_info |= udev->ttport << 8;
342 }
343 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
344 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
345
346 /* Step 4 - ring already allocated */
347 /* Step 5 */
348 ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
349 /*
350 * See section 4.3 bullet 6:
351 * The default Max Packet size for ep0 is "8 bytes for a USB2
352 * LS/FS/HS device or 512 bytes for a USB3 SS device"
353 * XXX: Not sure about wireless USB devices.
354 */
355 if (udev->speed == USB_SPEED_SUPER)
356 ep0_ctx->ep_info2 |= MAX_PACKET(512);
357 else
358 ep0_ctx->ep_info2 |= MAX_PACKET(8);
359 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
360 ep0_ctx->ep_info2 |= MAX_BURST(0);
361 ep0_ctx->ep_info2 |= ERROR_COUNT(3);
362
363 ep0_ctx->deq[0] =
364 dev->ep_rings[0]->first_seg->dma;
365 ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state;
366 ep0_ctx->deq[1] = 0;
367
368 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
369
370 return 0;
371}
372
373/* Return the polling or NAK interval.
374 *
375 * The polling interval is expressed in "microframes". If xHCI's Interval field
376 * is set to N, it will service the endpoint every 2^(Interval)*125us.
377 *
378 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
379 * is set to 0.
380 */
381static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
382 struct usb_host_endpoint *ep)
383{
384 unsigned int interval = 0;
385
386 switch (udev->speed) {
387 case USB_SPEED_HIGH:
388 /* Max NAK rate */
389 if (usb_endpoint_xfer_control(&ep->desc) ||
390 usb_endpoint_xfer_bulk(&ep->desc))
391 interval = ep->desc.bInterval;
392 /* Fall through - SS and HS isoc/int have same decoding */
393 case USB_SPEED_SUPER:
394 if (usb_endpoint_xfer_int(&ep->desc) ||
395 usb_endpoint_xfer_isoc(&ep->desc)) {
396 if (ep->desc.bInterval == 0)
397 interval = 0;
398 else
399 interval = ep->desc.bInterval - 1;
400 if (interval > 15)
401 interval = 15;
402 if (interval != ep->desc.bInterval + 1)
403 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
404 ep->desc.bEndpointAddress, 1 << interval);
405 }
406 break;
407 /* Convert bInterval (in 1-255 frames) to microframes and round down to
408 * nearest power of 2.
409 */
410 case USB_SPEED_FULL:
411 case USB_SPEED_LOW:
412 if (usb_endpoint_xfer_int(&ep->desc) ||
413 usb_endpoint_xfer_isoc(&ep->desc)) {
414 interval = fls(8*ep->desc.bInterval) - 1;
415 if (interval > 10)
416 interval = 10;
417 if (interval < 3)
418 interval = 3;
419 if ((1 << interval) != 8*ep->desc.bInterval)
420 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
421 ep->desc.bEndpointAddress, 1 << interval);
422 }
423 break;
424 default:
425 BUG();
426 }
427 return EP_INTERVAL(interval);
428}
429
430static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
431 struct usb_host_endpoint *ep)
432{
433 int in;
434 u32 type;
435
436 in = usb_endpoint_dir_in(&ep->desc);
437 if (usb_endpoint_xfer_control(&ep->desc)) {
438 type = EP_TYPE(CTRL_EP);
439 } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
440 if (in)
441 type = EP_TYPE(BULK_IN_EP);
442 else
443 type = EP_TYPE(BULK_OUT_EP);
444 } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
445 if (in)
446 type = EP_TYPE(ISOC_IN_EP);
447 else
448 type = EP_TYPE(ISOC_OUT_EP);
449 } else if (usb_endpoint_xfer_int(&ep->desc)) {
450 if (in)
451 type = EP_TYPE(INT_IN_EP);
452 else
453 type = EP_TYPE(INT_OUT_EP);
454 } else {
455 BUG();
456 }
457 return type;
458}
459
460int xhci_endpoint_init(struct xhci_hcd *xhci,
461 struct xhci_virt_device *virt_dev,
462 struct usb_device *udev,
463 struct usb_host_endpoint *ep,
464 gfp_t mem_flags)
465{
466 unsigned int ep_index;
467 struct xhci_ep_ctx *ep_ctx;
468 struct xhci_ring *ep_ring;
469 unsigned int max_packet;
470 unsigned int max_burst;
471
472 ep_index = xhci_get_endpoint_index(&ep->desc);
473 ep_ctx = &virt_dev->in_ctx->ep[ep_index];
474
475 /* Set up the endpoint ring */
476 virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
477 if (!virt_dev->new_ep_rings[ep_index])
478 return -ENOMEM;
479 ep_ring = virt_dev->new_ep_rings[ep_index];
480 ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state;
481 ep_ctx->deq[1] = 0;
482
483 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
484
485 /* FIXME dig Mult and streams info out of ep companion desc */
486
487 /* Allow 3 retries for everything but isoc */
488 if (!usb_endpoint_xfer_isoc(&ep->desc))
489 ep_ctx->ep_info2 = ERROR_COUNT(3);
490 else
491 ep_ctx->ep_info2 = ERROR_COUNT(0);
492
493 ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
494
495 /* Set the max packet size and max burst */
496 switch (udev->speed) {
497 case USB_SPEED_SUPER:
498 max_packet = ep->desc.wMaxPacketSize;
499 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
500 /* dig out max burst from ep companion desc */
501 max_packet = ep->ss_ep_comp->desc.bMaxBurst;
502 ep_ctx->ep_info2 |= MAX_BURST(max_packet);
503 break;
504 case USB_SPEED_HIGH:
505 /* bits 11:12 specify the number of additional transaction
506 * opportunities per microframe (USB 2.0, section 9.6.6)
507 */
508 if (usb_endpoint_xfer_isoc(&ep->desc) ||
509 usb_endpoint_xfer_int(&ep->desc)) {
510 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
511 ep_ctx->ep_info2 |= MAX_BURST(max_burst);
512 }
513 /* Fall through */
514 case USB_SPEED_FULL:
515 case USB_SPEED_LOW:
516 max_packet = ep->desc.wMaxPacketSize & 0x3ff;
517 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
518 break;
519 default:
520 BUG();
521 }
522 /* FIXME Debug endpoint context */
523 return 0;
524}
525
526void xhci_endpoint_zero(struct xhci_hcd *xhci,
527 struct xhci_virt_device *virt_dev,
528 struct usb_host_endpoint *ep)
529{
530 unsigned int ep_index;
531 struct xhci_ep_ctx *ep_ctx;
532
533 ep_index = xhci_get_endpoint_index(&ep->desc);
534 ep_ctx = &virt_dev->in_ctx->ep[ep_index];
535
536 ep_ctx->ep_info = 0;
537 ep_ctx->ep_info2 = 0;
538 ep_ctx->deq[0] = 0;
539 ep_ctx->deq[1] = 0;
540 ep_ctx->tx_info = 0;
541 /* Don't free the endpoint ring until the set interface or configuration
542 * request succeeds.
543 */
544}
545
546void xhci_mem_cleanup(struct xhci_hcd *xhci)
547{
548 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
549 int size;
550 int i;
551
552 /* Free the Event Ring Segment Table and the actual Event Ring */
553 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
554 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]);
555 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
556 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
557 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
558 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
559 if (xhci->erst.entries)
560 pci_free_consistent(pdev, size,
561 xhci->erst.entries, xhci->erst.erst_dma_addr);
562 xhci->erst.entries = NULL;
563 xhci_dbg(xhci, "Freed ERST\n");
564 if (xhci->event_ring)
565 xhci_ring_free(xhci, xhci->event_ring);
566 xhci->event_ring = NULL;
567 xhci_dbg(xhci, "Freed event ring\n");
568
569 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]);
570 xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
571 if (xhci->cmd_ring)
572 xhci_ring_free(xhci, xhci->cmd_ring);
573 xhci->cmd_ring = NULL;
574 xhci_dbg(xhci, "Freed command ring\n");
575
576 for (i = 1; i < MAX_HC_SLOTS; ++i)
577 xhci_free_virt_device(xhci, i);
578
579 if (xhci->segment_pool)
580 dma_pool_destroy(xhci->segment_pool);
581 xhci->segment_pool = NULL;
582 xhci_dbg(xhci, "Freed segment pool\n");
583
584 if (xhci->device_pool)
585 dma_pool_destroy(xhci->device_pool);
586 xhci->device_pool = NULL;
587 xhci_dbg(xhci, "Freed device context pool\n");
588
589 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]);
590 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
591 if (xhci->dcbaa)
592 pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
593 xhci->dcbaa, xhci->dcbaa->dma);
594 xhci->dcbaa = NULL;
595
596 xhci->page_size = 0;
597 xhci->page_shift = 0;
598}
599
600int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
601{
602 dma_addr_t dma;
603 struct device *dev = xhci_to_hcd(xhci)->self.controller;
604 unsigned int val, val2;
605 struct xhci_segment *seg;
606 u32 page_size;
607 int i;
608
609 page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
610 xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
611 for (i = 0; i < 16; i++) {
612 if ((0x1 & page_size) != 0)
613 break;
614 page_size = page_size >> 1;
615 }
616 if (i < 16)
617 xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
618 else
619 xhci_warn(xhci, "WARN: no supported page size\n");
620 /* Use 4K pages, since that's common and the minimum the HC supports */
621 xhci->page_shift = 12;
622 xhci->page_size = 1 << xhci->page_shift;
623 xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
624
625 /*
626 * Program the Number of Device Slots Enabled field in the CONFIG
627 * register with the max value of slots the HC can handle.
628 */
629 val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
630 xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
631 (unsigned int) val);
632 val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
633 val |= (val2 & ~HCS_SLOTS_MASK);
634 xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
635 (unsigned int) val);
636 xhci_writel(xhci, val, &xhci->op_regs->config_reg);
637
638 /*
639 * Section 5.4.8 - doorbell array must be
640 * "physically contiguous and 64-byte (cache line) aligned".
641 */
642 xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
643 sizeof(*xhci->dcbaa), &dma);
644 if (!xhci->dcbaa)
645 goto fail;
646 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
647 xhci->dcbaa->dma = dma;
648 xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
649 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
650 xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);
651 xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
652
653 /*
654 * Initialize the ring segment pool. The ring must be a contiguous
655 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
656 * however, the command ring segment needs 64-byte aligned segments,
657 * so we pick the greater alignment need.
658 */
659 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
660 SEGMENT_SIZE, 64, xhci->page_size);
661 /* See Table 46 and Note on Figure 55 */
662 /* FIXME support 64-byte contexts */
663 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
664 sizeof(struct xhci_device_control),
665 64, xhci->page_size);
666 if (!xhci->segment_pool || !xhci->device_pool)
667 goto fail;
668
669 /* Set up the command ring to have one segments for now. */
670 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
671 if (!xhci->cmd_ring)
672 goto fail;
673 xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
674 xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
675 (unsigned long long)xhci->cmd_ring->first_seg->dma);
676
677 /* Set the address in the Command Ring Control register */
678 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
679 val = (val & ~CMD_RING_ADDR_MASK) |
680 (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) |
681 xhci->cmd_ring->cycle_state;
682 xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val);
683 xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]);
684 xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
685 xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
686 xhci_dbg_cmd_ptrs(xhci);
687
688 val = xhci_readl(xhci, &xhci->cap_regs->db_off);
689 val &= DBOFF_MASK;
690 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
691 " from cap regs base addr\n", val);
692 xhci->dba = (void *) xhci->cap_regs + val;
693 xhci_dbg_regs(xhci);
694 xhci_print_run_regs(xhci);
695 /* Set ir_set to interrupt register set 0 */
696 xhci->ir_set = (void *) xhci->run_regs->ir_set;
697
698 /*
699 * Event ring setup: Allocate a normal ring, but also setup
700 * the event ring segment table (ERST). Section 4.9.3.
701 */
702 xhci_dbg(xhci, "// Allocating event ring\n");
703 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
704 if (!xhci->event_ring)
705 goto fail;
706
707 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
708 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
709 if (!xhci->erst.entries)
710 goto fail;
711 xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
712 (unsigned long long)dma);
713
714 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
715 xhci->erst.num_entries = ERST_NUM_SEGS;
716 xhci->erst.erst_dma_addr = dma;
717 xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
718 xhci->erst.num_entries,
719 xhci->erst.entries,
720 (unsigned long long)xhci->erst.erst_dma_addr);
721
722 /* set ring base address and size for each segment table entry */
723 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
724 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
725 entry->seg_addr[0] = seg->dma;
726 entry->seg_addr[1] = 0;
727 entry->seg_size = TRBS_PER_SEGMENT;
728 entry->rsvd = 0;
729 seg = seg->next;
730 }
731
732 /* set ERST count with the number of entries in the segment table */
733 val = xhci_readl(xhci, &xhci->ir_set->erst_size);
734 val &= ERST_SIZE_MASK;
735 val |= ERST_NUM_SEGS;
736 xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
737 val);
738 xhci_writel(xhci, val, &xhci->ir_set->erst_size);
739
740 xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
741 /* set the segment table base address */
742 xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
743 (unsigned long long)xhci->erst.erst_dma_addr);
744 val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
745 val &= ERST_PTR_MASK;
746 val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK);
747 xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]);
748 xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
749
750 /* Set the event ring dequeue address */
751 xhci_set_hc_event_deq(xhci);
752 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
753 xhci_print_ir_set(xhci, xhci->ir_set, 0);
754
755 /*
756 * XXX: Might need to set the Interrupter Moderation Register to
757 * something other than the default (~1ms minimum between interrupts).
758 * See section 5.5.1.2.
759 */
760 init_completion(&xhci->addr_dev);
761 for (i = 0; i < MAX_HC_SLOTS; ++i)
762 xhci->devs[i] = 0;
763
764 return 0;
765fail:
766 xhci_warn(xhci, "Couldn't initialize memory\n");
767 xhci_mem_cleanup(xhci);
768 return -ENOMEM;
769}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
new file mode 100644
index 000000000000..1462709e26c0
--- /dev/null
+++ b/drivers/usb/host/xhci-pci.c
@@ -0,0 +1,166 @@
1/*
2 * xHCI host controller driver PCI Bus Glue.
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/pci.h>
24
25#include "xhci.h"
26
27static const char hcd_name[] = "xhci_hcd";
28
29/* called after powerup, by probe or system-pm "wakeup" */
30static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
31{
32 /*
33 * TODO: Implement finding debug ports later.
34 * TODO: see if there are any quirks that need to be added to handle
35 * new extended capabilities.
36 */
37
38 /* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */
39 if (!pci_set_mwi(pdev))
40 xhci_dbg(xhci, "MWI active\n");
41
42 xhci_dbg(xhci, "Finished xhci_pci_reinit\n");
43 return 0;
44}
45
46/* called during probe() after chip reset completes */
47static int xhci_pci_setup(struct usb_hcd *hcd)
48{
49 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
50 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
51 int retval;
52
53 xhci->cap_regs = hcd->regs;
54 xhci->op_regs = hcd->regs +
55 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
56 xhci->run_regs = hcd->regs +
57 (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
58 /* Cache read-only capability registers */
59 xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
60 xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
61 xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
62 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
63 xhci_print_registers(xhci);
64
65 /* Make sure the HC is halted. */
66 retval = xhci_halt(xhci);
67 if (retval)
68 return retval;
69
70 xhci_dbg(xhci, "Resetting HCD\n");
71 /* Reset the internal HC memory state and registers. */
72 retval = xhci_reset(xhci);
73 if (retval)
74 return retval;
75 xhci_dbg(xhci, "Reset complete\n");
76
77 xhci_dbg(xhci, "Calling HCD init\n");
78 /* Initialize HCD and host controller data structures. */
79 retval = xhci_init(hcd);
80 if (retval)
81 return retval;
82 xhci_dbg(xhci, "Called HCD init\n");
83
84 pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
85 xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
86
87 /* Find any debug ports */
88 return xhci_pci_reinit(xhci, pdev);
89}
90
91static const struct hc_driver xhci_pci_hc_driver = {
92 .description = hcd_name,
93 .product_desc = "xHCI Host Controller",
94 .hcd_priv_size = sizeof(struct xhci_hcd),
95
96 /*
97 * generic hardware linkage
98 */
99 .irq = xhci_irq,
100 .flags = HCD_MEMORY | HCD_USB3,
101
102 /*
103 * basic lifecycle operations
104 */
105 .reset = xhci_pci_setup,
106 .start = xhci_run,
107 /* suspend and resume implemented later */
108 .stop = xhci_stop,
109 .shutdown = xhci_shutdown,
110
111 /*
112 * managing i/o requests and associated device resources
113 */
114 .urb_enqueue = xhci_urb_enqueue,
115 .urb_dequeue = xhci_urb_dequeue,
116 .alloc_dev = xhci_alloc_dev,
117 .free_dev = xhci_free_dev,
118 .add_endpoint = xhci_add_endpoint,
119 .drop_endpoint = xhci_drop_endpoint,
120 .check_bandwidth = xhci_check_bandwidth,
121 .reset_bandwidth = xhci_reset_bandwidth,
122 .address_device = xhci_address_device,
123
124 /*
125 * scheduling support
126 */
127 .get_frame_number = xhci_get_frame,
128
129 /* Root hub support */
130 .hub_control = xhci_hub_control,
131 .hub_status_data = xhci_hub_status_data,
132};
133
134/*-------------------------------------------------------------------------*/
135
136/* PCI driver selection metadata; PCI hotplugging uses this */
137static const struct pci_device_id pci_ids[] = { {
138 /* handle any USB 3.0 xHCI controller */
139 PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
140 .driver_data = (unsigned long) &xhci_pci_hc_driver,
141 },
142 { /* end: all zeroes */ }
143};
144MODULE_DEVICE_TABLE(pci, pci_ids);
145
146/* pci driver glue; this is a "new style" PCI driver module */
147static struct pci_driver xhci_pci_driver = {
148 .name = (char *) hcd_name,
149 .id_table = pci_ids,
150
151 .probe = usb_hcd_pci_probe,
152 .remove = usb_hcd_pci_remove,
153 /* suspend and resume implemented later */
154
155 .shutdown = usb_hcd_pci_shutdown,
156};
157
158int xhci_register_pci()
159{
160 return pci_register_driver(&xhci_pci_driver);
161}
162
163void xhci_unregister_pci()
164{
165 pci_unregister_driver(&xhci_pci_driver);
166}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
new file mode 100644
index 000000000000..02d81985c454
--- /dev/null
+++ b/drivers/usb/host/xhci-ring.c
@@ -0,0 +1,1648 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23/*
24 * Ring initialization rules:
25 * 1. Each segment is initialized to zero, except for link TRBs.
26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
27 * Consumer Cycle State (CCS), depending on ring function.
28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29 *
30 * Ring behavior rules:
31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
32 * least one free TRB in the ring. This is useful if you want to turn that
33 * into a link TRB and expand the ring.
34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35 * link TRB, then load the pointer with the address in the link TRB. If the
36 * link TRB had its toggle bit set, you may need to update the ring cycle
37 * state (see cycle bit rules). You may have to do this multiple times
38 * until you reach a non-link TRB.
39 * 3. A ring is full if enqueue++ (for the definition of increment above)
40 * equals the dequeue pointer.
41 *
42 * Cycle bit rules:
43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44 * in a link TRB, it must toggle the ring cycle state.
45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46 * in a link TRB, it must toggle the ring cycle state.
47 *
48 * Producer rules:
49 * 1. Check if ring is full before you enqueue.
50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51 * Update enqueue pointer between each write (which may update the ring
52 * cycle state).
53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
54 * and endpoint rings. If HC is the producer for the event ring,
55 * and it generates an interrupt according to interrupt modulation rules.
56 *
57 * Consumer rules:
58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
59 * the TRB is owned by the consumer.
60 * 2. Update dequeue pointer (which may update the ring cycle state) and
61 * continue processing TRBs until you reach a TRB which is not owned by you.
62 * 3. Notify the producer. SW is the consumer for the event ring, and it
63 * updates event ring dequeue pointer. HC is the consumer for the command and
64 * endpoint rings; it generates events on the event ring for these.
65 */
66
67#include <linux/scatterlist.h>
68#include "xhci.h"
69
70/*
71 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
72 * address of the TRB.
73 */
74dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
75 union xhci_trb *trb)
76{
77 unsigned long segment_offset;
78
79 if (!seg || !trb || trb < seg->trbs)
80 return 0;
81 /* offset in TRBs */
82 segment_offset = trb - seg->trbs;
83 if (segment_offset > TRBS_PER_SEGMENT)
84 return 0;
85 return seg->dma + (segment_offset * sizeof(*trb));
86}
87
88/* Does this link TRB point to the first segment in a ring,
89 * or was the previous TRB the last TRB on the last segment in the ERST?
90 */
91static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
92 struct xhci_segment *seg, union xhci_trb *trb)
93{
94 if (ring == xhci->event_ring)
95 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
96 (seg->next == xhci->event_ring->first_seg);
97 else
98 return trb->link.control & LINK_TOGGLE;
99}
100
101/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
102 * segment? I.e. would the updated event TRB pointer step off the end of the
103 * event seg?
104 */
105static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
106 struct xhci_segment *seg, union xhci_trb *trb)
107{
108 if (ring == xhci->event_ring)
109 return trb == &seg->trbs[TRBS_PER_SEGMENT];
110 else
111 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
112}
113
114/* Updates trb to point to the next TRB in the ring, and updates seg if the next
115 * TRB is in a new segment. This does not skip over link TRBs, and it does not
116 * effect the ring dequeue or enqueue pointers.
117 */
118static void next_trb(struct xhci_hcd *xhci,
119 struct xhci_ring *ring,
120 struct xhci_segment **seg,
121 union xhci_trb **trb)
122{
123 if (last_trb(xhci, ring, *seg, *trb)) {
124 *seg = (*seg)->next;
125 *trb = ((*seg)->trbs);
126 } else {
127 *trb = (*trb)++;
128 }
129}
130
131/*
132 * See Cycle bit rules. SW is the consumer for the event ring only.
133 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
134 */
135static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
136{
137 union xhci_trb *next = ++(ring->dequeue);
138
139 ring->deq_updates++;
140 /* Update the dequeue pointer further if that was a link TRB or we're at
141 * the end of an event ring segment (which doesn't have link TRBS)
142 */
143 while (last_trb(xhci, ring, ring->deq_seg, next)) {
144 if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
145 ring->cycle_state = (ring->cycle_state ? 0 : 1);
146 if (!in_interrupt())
147 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
148 ring,
149 (unsigned int) ring->cycle_state);
150 }
151 ring->deq_seg = ring->deq_seg->next;
152 ring->dequeue = ring->deq_seg->trbs;
153 next = ring->dequeue;
154 }
155}
156
157/*
158 * See Cycle bit rules. SW is the consumer for the event ring only.
159 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
160 *
161 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
162 * chain bit is set), then set the chain bit in all the following link TRBs.
163 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
164 * have their chain bit cleared (so that each Link TRB is a separate TD).
165 *
166 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
167 * set, but other sections talk about dealing with the chain bit set.
168 * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB.
169 */
170static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
171{
172 u32 chain;
173 union xhci_trb *next;
174
175 chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
176 next = ++(ring->enqueue);
177
178 ring->enq_updates++;
179 /* Update the dequeue pointer further if that was a link TRB or we're at
180 * the end of an event ring segment (which doesn't have link TRBS)
181 */
182 while (last_trb(xhci, ring, ring->enq_seg, next)) {
183 if (!consumer) {
184 if (ring != xhci->event_ring) {
185 next->link.control &= ~TRB_CHAIN;
186 next->link.control |= chain;
187 /* Give this link TRB to the hardware */
188 wmb();
189 if (next->link.control & TRB_CYCLE)
190 next->link.control &= (u32) ~TRB_CYCLE;
191 else
192 next->link.control |= (u32) TRB_CYCLE;
193 }
194 /* Toggle the cycle bit after the last ring segment. */
195 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
196 ring->cycle_state = (ring->cycle_state ? 0 : 1);
197 if (!in_interrupt())
198 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
199 ring,
200 (unsigned int) ring->cycle_state);
201 }
202 }
203 ring->enq_seg = ring->enq_seg->next;
204 ring->enqueue = ring->enq_seg->trbs;
205 next = ring->enqueue;
206 }
207}
208
209/*
210 * Check to see if there's room to enqueue num_trbs on the ring. See rules
211 * above.
212 * FIXME: this would be simpler and faster if we just kept track of the number
213 * of free TRBs in a ring.
214 */
215static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
216 unsigned int num_trbs)
217{
218 int i;
219 union xhci_trb *enq = ring->enqueue;
220 struct xhci_segment *enq_seg = ring->enq_seg;
221
222 /* Check if ring is empty */
223 if (enq == ring->dequeue)
224 return 1;
225 /* Make sure there's an extra empty TRB available */
226 for (i = 0; i <= num_trbs; ++i) {
227 if (enq == ring->dequeue)
228 return 0;
229 enq++;
230 while (last_trb(xhci, ring, enq_seg, enq)) {
231 enq_seg = enq_seg->next;
232 enq = enq_seg->trbs;
233 }
234 }
235 return 1;
236}
237
238void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
239{
240 u32 temp;
241 dma_addr_t deq;
242
243 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
244 xhci->event_ring->dequeue);
245 if (deq == 0 && !in_interrupt())
246 xhci_warn(xhci, "WARN something wrong with SW event ring "
247 "dequeue ptr.\n");
248 /* Update HC event ring dequeue pointer */
249 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
250 temp &= ERST_PTR_MASK;
251 if (!in_interrupt())
252 xhci_dbg(xhci, "// Write event ring dequeue pointer\n");
253 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
254 xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp,
255 &xhci->ir_set->erst_dequeue[0]);
256}
257
258/* Ring the host controller doorbell after placing a command on the ring */
259void xhci_ring_cmd_db(struct xhci_hcd *xhci)
260{
261 u32 temp;
262
263 xhci_dbg(xhci, "// Ding dong!\n");
264 temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
265 xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
266 /* Flush PCI posted writes */
267 xhci_readl(xhci, &xhci->dba->doorbell[0]);
268}
269
270static void ring_ep_doorbell(struct xhci_hcd *xhci,
271 unsigned int slot_id,
272 unsigned int ep_index)
273{
274 struct xhci_ring *ep_ring;
275 u32 field;
276 __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
277
278 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
279 /* Don't ring the doorbell for this endpoint if there are pending
280 * cancellations because the we don't want to interrupt processing.
281 */
282 if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) {
283 field = xhci_readl(xhci, db_addr) & DB_MASK;
284 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
285 /* Flush PCI posted writes - FIXME Matthew Wilcox says this
286 * isn't time-critical and we shouldn't make the CPU wait for
287 * the flush.
288 */
289 xhci_readl(xhci, db_addr);
290 }
291}
292
293/*
294 * Find the segment that trb is in. Start searching in start_seg.
295 * If we must move past a segment that has a link TRB with a toggle cycle state
296 * bit set, then we will toggle the value pointed at by cycle_state.
297 */
298static struct xhci_segment *find_trb_seg(
299 struct xhci_segment *start_seg,
300 union xhci_trb *trb, int *cycle_state)
301{
302 struct xhci_segment *cur_seg = start_seg;
303 struct xhci_generic_trb *generic_trb;
304
305 while (cur_seg->trbs > trb ||
306 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
307 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
308 if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
309 (generic_trb->field[3] & LINK_TOGGLE))
310 *cycle_state = ~(*cycle_state) & 0x1;
311 cur_seg = cur_seg->next;
312 if (cur_seg == start_seg)
313 /* Looped over the entire list. Oops! */
314 return 0;
315 }
316 return cur_seg;
317}
318
319struct dequeue_state {
320 struct xhci_segment *new_deq_seg;
321 union xhci_trb *new_deq_ptr;
322 int new_cycle_state;
323};
324
325/*
326 * Move the xHC's endpoint ring dequeue pointer past cur_td.
327 * Record the new state of the xHC's endpoint ring dequeue segment,
328 * dequeue pointer, and new consumer cycle state in state.
329 * Update our internal representation of the ring's dequeue pointer.
330 *
331 * We do this in three jumps:
332 * - First we update our new ring state to be the same as when the xHC stopped.
333 * - Then we traverse the ring to find the segment that contains
334 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
335 * any link TRBs with the toggle cycle bit set.
336 * - Finally we move the dequeue state one TRB further, toggling the cycle bit
337 * if we've moved it past a link TRB with the toggle cycle bit set.
338 */
339static void find_new_dequeue_state(struct xhci_hcd *xhci,
340 unsigned int slot_id, unsigned int ep_index,
341 struct xhci_td *cur_td, struct dequeue_state *state)
342{
343 struct xhci_virt_device *dev = xhci->devs[slot_id];
344 struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
345 struct xhci_generic_trb *trb;
346
347 state->new_cycle_state = 0;
348 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
349 ep_ring->stopped_trb,
350 &state->new_cycle_state);
351 if (!state->new_deq_seg)
352 BUG();
353 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
354 state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0];
355
356 state->new_deq_ptr = cur_td->last_trb;
357 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
358 state->new_deq_ptr,
359 &state->new_cycle_state);
360 if (!state->new_deq_seg)
361 BUG();
362
363 trb = &state->new_deq_ptr->generic;
364 if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
365 (trb->field[3] & LINK_TOGGLE))
366 state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
367 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
368
369 /* Don't update the ring cycle state for the producer (us). */
370 ep_ring->dequeue = state->new_deq_ptr;
371 ep_ring->deq_seg = state->new_deq_seg;
372}
373
374static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
375 struct xhci_td *cur_td)
376{
377 struct xhci_segment *cur_seg;
378 union xhci_trb *cur_trb;
379
380 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
381 true;
382 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
383 if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) ==
384 TRB_TYPE(TRB_LINK)) {
385 /* Unchain any chained Link TRBs, but
386 * leave the pointers intact.
387 */
388 cur_trb->generic.field[3] &= ~TRB_CHAIN;
389 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
390 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
391 "in seg %p (0x%llx dma)\n",
392 cur_trb,
393 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
394 cur_seg,
395 (unsigned long long)cur_seg->dma);
396 } else {
397 cur_trb->generic.field[0] = 0;
398 cur_trb->generic.field[1] = 0;
399 cur_trb->generic.field[2] = 0;
400 /* Preserve only the cycle bit of this TRB */
401 cur_trb->generic.field[3] &= TRB_CYCLE;
402 cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
403 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
404 "in seg %p (0x%llx dma)\n",
405 cur_trb,
406 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
407 cur_seg,
408 (unsigned long long)cur_seg->dma);
409 }
410 if (cur_trb == cur_td->last_trb)
411 break;
412 }
413}
414
415static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
416 unsigned int ep_index, struct xhci_segment *deq_seg,
417 union xhci_trb *deq_ptr, u32 cycle_state);
418
419/*
420 * When we get a command completion for a Stop Endpoint Command, we need to
421 * unlink any cancelled TDs from the ring. There are two ways to do that:
422 *
423 * 1. If the HW was in the middle of processing the TD that needs to be
424 * cancelled, then we must move the ring's dequeue pointer past the last TRB
425 * in the TD with a Set Dequeue Pointer Command.
426 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
427 * bit cleared) so that the HW will skip over them.
428 */
429static void handle_stopped_endpoint(struct xhci_hcd *xhci,
430 union xhci_trb *trb)
431{
432 unsigned int slot_id;
433 unsigned int ep_index;
434 struct xhci_ring *ep_ring;
435 struct list_head *entry;
436 struct xhci_td *cur_td = 0;
437 struct xhci_td *last_unlinked_td;
438
439 struct dequeue_state deq_state;
440#ifdef CONFIG_USB_HCD_STAT
441 ktime_t stop_time = ktime_get();
442#endif
443
444 memset(&deq_state, 0, sizeof(deq_state));
445 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
446 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
447 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
448
449 if (list_empty(&ep_ring->cancelled_td_list))
450 return;
451
452 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
453 * We have the xHCI lock, so nothing can modify this list until we drop
454 * it. We're also in the event handler, so we can't get re-interrupted
455 * if another Stop Endpoint command completes
456 */
457 list_for_each(entry, &ep_ring->cancelled_td_list) {
458 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
459 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
460 cur_td->first_trb,
461 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
462 /*
463 * If we stopped on the TD we need to cancel, then we have to
464 * move the xHC endpoint ring dequeue pointer past this TD.
465 */
466 if (cur_td == ep_ring->stopped_td)
467 find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
468 &deq_state);
469 else
470 td_to_noop(xhci, ep_ring, cur_td);
471 /*
472 * The event handler won't see a completion for this TD anymore,
473 * so remove it from the endpoint ring's TD list. Keep it in
474 * the cancelled TD list for URB completion later.
475 */
476 list_del(&cur_td->td_list);
477 ep_ring->cancels_pending--;
478 }
479 last_unlinked_td = cur_td;
480
481 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
482 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
483 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
484 "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
485 deq_state.new_deq_seg,
486 (unsigned long long)deq_state.new_deq_seg->dma,
487 deq_state.new_deq_ptr,
488 (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
489 deq_state.new_cycle_state);
490 queue_set_tr_deq(xhci, slot_id, ep_index,
491 deq_state.new_deq_seg,
492 deq_state.new_deq_ptr,
493 (u32) deq_state.new_cycle_state);
494 /* Stop the TD queueing code from ringing the doorbell until
495 * this command completes. The HC won't set the dequeue pointer
496 * if the ring is running, and ringing the doorbell starts the
497 * ring running.
498 */
499 ep_ring->state |= SET_DEQ_PENDING;
500 xhci_ring_cmd_db(xhci);
501 } else {
502 /* Otherwise just ring the doorbell to restart the ring */
503 ring_ep_doorbell(xhci, slot_id, ep_index);
504 }
505
506 /*
507 * Drop the lock and complete the URBs in the cancelled TD list.
508 * New TDs to be cancelled might be added to the end of the list before
509 * we can complete all the URBs for the TDs we already unlinked.
510 * So stop when we've completed the URB for the last TD we unlinked.
511 */
512 do {
513 cur_td = list_entry(ep_ring->cancelled_td_list.next,
514 struct xhci_td, cancelled_td_list);
515 list_del(&cur_td->cancelled_td_list);
516
517 /* Clean up the cancelled URB */
518#ifdef CONFIG_USB_HCD_STAT
519 hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
520 ktime_sub(stop_time, cur_td->start_time));
521#endif
522 cur_td->urb->hcpriv = NULL;
523 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
524
525 xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
526 spin_unlock(&xhci->lock);
527 /* Doesn't matter what we pass for status, since the core will
528 * just overwrite it (because the URB has been unlinked).
529 */
530 usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0);
531 kfree(cur_td);
532
533 spin_lock(&xhci->lock);
534 } while (cur_td != last_unlinked_td);
535
536 /* Return to the event handler with xhci->lock re-acquired */
537}
538
539/*
540 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
541 * we need to clear the set deq pending flag in the endpoint ring state, so that
542 * the TD queueing code can ring the doorbell again. We also need to ring the
543 * endpoint doorbell to restart the ring, but only if there aren't more
544 * cancellations pending.
545 */
546static void handle_set_deq_completion(struct xhci_hcd *xhci,
547 struct xhci_event_cmd *event,
548 union xhci_trb *trb)
549{
550 unsigned int slot_id;
551 unsigned int ep_index;
552 struct xhci_ring *ep_ring;
553 struct xhci_virt_device *dev;
554
555 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
556 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
557 dev = xhci->devs[slot_id];
558 ep_ring = dev->ep_rings[ep_index];
559
560 if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
561 unsigned int ep_state;
562 unsigned int slot_state;
563
564 switch (GET_COMP_CODE(event->status)) {
565 case COMP_TRB_ERR:
566 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
567 "of stream ID configuration\n");
568 break;
569 case COMP_CTX_STATE:
570 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
571 "to incorrect slot or ep state.\n");
572 ep_state = dev->out_ctx->ep[ep_index].ep_info;
573 ep_state &= EP_STATE_MASK;
574 slot_state = dev->out_ctx->slot.dev_state;
575 slot_state = GET_SLOT_STATE(slot_state);
576 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
577 slot_state, ep_state);
578 break;
579 case COMP_EBADSLT:
580 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
581 "slot %u was not enabled.\n", slot_id);
582 break;
583 default:
584 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
585 "completion code of %u.\n",
586 GET_COMP_CODE(event->status));
587 break;
588 }
589 /* OK what do we do now? The endpoint state is hosed, and we
590 * should never get to this point if the synchronization between
591 * queueing, and endpoint state are correct. This might happen
592 * if the device gets disconnected after we've finished
593 * cancelling URBs, which might not be an error...
594 */
595 } else {
596 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, "
597 "deq[1] = 0x%x.\n",
598 dev->out_ctx->ep[ep_index].deq[0],
599 dev->out_ctx->ep[ep_index].deq[1]);
600 }
601
602 ep_ring->state &= ~SET_DEQ_PENDING;
603 ring_ep_doorbell(xhci, slot_id, ep_index);
604}
605
606
607static void handle_cmd_completion(struct xhci_hcd *xhci,
608 struct xhci_event_cmd *event)
609{
610 int slot_id = TRB_TO_SLOT_ID(event->flags);
611 u64 cmd_dma;
612 dma_addr_t cmd_dequeue_dma;
613
614 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
615 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
616 xhci->cmd_ring->dequeue);
617 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
618 if (cmd_dequeue_dma == 0) {
619 xhci->error_bitmask |= 1 << 4;
620 return;
621 }
622 /* Does the DMA address match our internal dequeue pointer address? */
623 if (cmd_dma != (u64) cmd_dequeue_dma) {
624 xhci->error_bitmask |= 1 << 5;
625 return;
626 }
627 switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
628 case TRB_TYPE(TRB_ENABLE_SLOT):
629 if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
630 xhci->slot_id = slot_id;
631 else
632 xhci->slot_id = 0;
633 complete(&xhci->addr_dev);
634 break;
635 case TRB_TYPE(TRB_DISABLE_SLOT):
636 if (xhci->devs[slot_id])
637 xhci_free_virt_device(xhci, slot_id);
638 break;
639 case TRB_TYPE(TRB_CONFIG_EP):
640 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
641 complete(&xhci->devs[slot_id]->cmd_completion);
642 break;
643 case TRB_TYPE(TRB_ADDR_DEV):
644 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
645 complete(&xhci->addr_dev);
646 break;
647 case TRB_TYPE(TRB_STOP_RING):
648 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue);
649 break;
650 case TRB_TYPE(TRB_SET_DEQ):
651 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
652 break;
653 case TRB_TYPE(TRB_CMD_NOOP):
654 ++xhci->noops_handled;
655 break;
656 default:
657 /* Skip over unknown commands on the event ring */
658 xhci->error_bitmask |= 1 << 6;
659 break;
660 }
661 inc_deq(xhci, xhci->cmd_ring, false);
662}
663
664static void handle_port_status(struct xhci_hcd *xhci,
665 union xhci_trb *event)
666{
667 u32 port_id;
668
669 /* Port status change events always have a successful completion code */
670 if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
671 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
672 xhci->error_bitmask |= 1 << 8;
673 }
674 /* FIXME: core doesn't care about all port link state changes yet */
675 port_id = GET_PORT_ID(event->generic.field[0]);
676 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
677
678 /* Update event ring dequeue pointer before dropping the lock */
679 inc_deq(xhci, xhci->event_ring, true);
680 xhci_set_hc_event_deq(xhci);
681
682 spin_unlock(&xhci->lock);
683 /* Pass this up to the core */
684 usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
685 spin_lock(&xhci->lock);
686}
687
688/*
689 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
690 * at end_trb, which may be in another segment. If the suspect DMA address is a
691 * TRB in this TD, this function returns that TRB's segment. Otherwise it
692 * returns 0.
693 */
694static struct xhci_segment *trb_in_td(
695 struct xhci_segment *start_seg,
696 union xhci_trb *start_trb,
697 union xhci_trb *end_trb,
698 dma_addr_t suspect_dma)
699{
700 dma_addr_t start_dma;
701 dma_addr_t end_seg_dma;
702 dma_addr_t end_trb_dma;
703 struct xhci_segment *cur_seg;
704
705 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
706 cur_seg = start_seg;
707
708 do {
709 /* We may get an event for a Link TRB in the middle of a TD */
710 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
711 &start_seg->trbs[TRBS_PER_SEGMENT - 1]);
712 /* If the end TRB isn't in this segment, this is set to 0 */
713 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
714
715 if (end_trb_dma > 0) {
716 /* The end TRB is in this segment, so suspect should be here */
717 if (start_dma <= end_trb_dma) {
718 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
719 return cur_seg;
720 } else {
721 /* Case for one segment with
722 * a TD wrapped around to the top
723 */
724 if ((suspect_dma >= start_dma &&
725 suspect_dma <= end_seg_dma) ||
726 (suspect_dma >= cur_seg->dma &&
727 suspect_dma <= end_trb_dma))
728 return cur_seg;
729 }
730 return 0;
731 } else {
732 /* Might still be somewhere in this segment */
733 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
734 return cur_seg;
735 }
736 cur_seg = cur_seg->next;
737 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
738 } while (1);
739
740}
741
742/*
743 * If this function returns an error condition, it means it got a Transfer
744 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
745 * At this point, the host controller is probably hosed and should be reset.
746 */
747static int handle_tx_event(struct xhci_hcd *xhci,
748 struct xhci_transfer_event *event)
749{
750 struct xhci_virt_device *xdev;
751 struct xhci_ring *ep_ring;
752 int ep_index;
753 struct xhci_td *td = 0;
754 dma_addr_t event_dma;
755 struct xhci_segment *event_seg;
756 union xhci_trb *event_trb;
757 struct urb *urb = 0;
758 int status = -EINPROGRESS;
759
760 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
761 if (!xdev) {
762 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
763 return -ENODEV;
764 }
765
766 /* Endpoint ID is 1 based, our index is zero based */
767 ep_index = TRB_TO_EP_ID(event->flags) - 1;
768 ep_ring = xdev->ep_rings[ep_index];
769 if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
770 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
771 return -ENODEV;
772 }
773
774 event_dma = event->buffer[0];
775 if (event->buffer[1] != 0)
776 xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n");
777
778 /* This TRB should be in the TD at the head of this ring's TD list */
779 if (list_empty(&ep_ring->td_list)) {
780 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
781 TRB_TO_SLOT_ID(event->flags), ep_index);
782 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
783 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
784 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
785 urb = NULL;
786 goto cleanup;
787 }
788 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
789
790 /* Is this a TRB in the currently executing TD? */
791 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
792 td->last_trb, event_dma);
793 if (!event_seg) {
794 /* HC is busted, give up! */
795 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
796 return -ESHUTDOWN;
797 }
798 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
799 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
800 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
801 xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n",
802 (unsigned int) event->buffer[0]);
803 xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n",
804 (unsigned int) event->buffer[1]);
805 xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
806 (unsigned int) event->transfer_len);
807 xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
808 (unsigned int) event->flags);
809
810 /* Look for common error cases */
811 switch (GET_COMP_CODE(event->transfer_len)) {
812 /* Skip codes that require special handling depending on
813 * transfer type
814 */
815 case COMP_SUCCESS:
816 case COMP_SHORT_TX:
817 break;
818 case COMP_STOP:
819 xhci_dbg(xhci, "Stopped on Transfer TRB\n");
820 break;
821 case COMP_STOP_INVAL:
822 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
823 break;
824 case COMP_STALL:
825 xhci_warn(xhci, "WARN: Stalled endpoint\n");
826 status = -EPIPE;
827 break;
828 case COMP_TRB_ERR:
829 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
830 status = -EILSEQ;
831 break;
832 case COMP_TX_ERR:
833 xhci_warn(xhci, "WARN: transfer error on endpoint\n");
834 status = -EPROTO;
835 break;
836 case COMP_DB_ERR:
837 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
838 status = -ENOSR;
839 break;
840 default:
841 xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
842 urb = NULL;
843 goto cleanup;
844 }
845 /* Now update the urb's actual_length and give back to the core */
846 /* Was this a control transfer? */
847 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
848 xhci_debug_trb(xhci, xhci->event_ring->dequeue);
849 switch (GET_COMP_CODE(event->transfer_len)) {
850 case COMP_SUCCESS:
851 if (event_trb == ep_ring->dequeue) {
852 xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
853 status = -ESHUTDOWN;
854 } else if (event_trb != td->last_trb) {
855 xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
856 status = -ESHUTDOWN;
857 } else {
858 xhci_dbg(xhci, "Successful control transfer!\n");
859 status = 0;
860 }
861 break;
862 case COMP_SHORT_TX:
863 xhci_warn(xhci, "WARN: short transfer on control ep\n");
864 status = -EREMOTEIO;
865 break;
866 default:
867 /* Others already handled above */
868 break;
869 }
870 /*
871 * Did we transfer any data, despite the errors that might have
872 * happened? I.e. did we get past the setup stage?
873 */
874 if (event_trb != ep_ring->dequeue) {
875 /* The event was for the status stage */
876 if (event_trb == td->last_trb) {
877 td->urb->actual_length =
878 td->urb->transfer_buffer_length;
879 } else {
880 /* Maybe the event was for the data stage? */
881 if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
882 /* We didn't stop on a link TRB in the middle */
883 td->urb->actual_length =
884 td->urb->transfer_buffer_length -
885 TRB_LEN(event->transfer_len);
886 }
887 }
888 } else {
889 switch (GET_COMP_CODE(event->transfer_len)) {
890 case COMP_SUCCESS:
891 /* Double check that the HW transferred everything. */
892 if (event_trb != td->last_trb) {
893 xhci_warn(xhci, "WARN Successful completion "
894 "on short TX\n");
895 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
896 status = -EREMOTEIO;
897 else
898 status = 0;
899 } else {
900 xhci_dbg(xhci, "Successful bulk transfer!\n");
901 status = 0;
902 }
903 break;
904 case COMP_SHORT_TX:
905 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
906 status = -EREMOTEIO;
907 else
908 status = 0;
909 break;
910 default:
911 /* Others already handled above */
912 break;
913 }
914 dev_dbg(&td->urb->dev->dev,
915 "ep %#x - asked for %d bytes, "
916 "%d bytes untransferred\n",
917 td->urb->ep->desc.bEndpointAddress,
918 td->urb->transfer_buffer_length,
919 TRB_LEN(event->transfer_len));
920 /* Fast path - was this the last TRB in the TD for this URB? */
921 if (event_trb == td->last_trb) {
922 if (TRB_LEN(event->transfer_len) != 0) {
923 td->urb->actual_length =
924 td->urb->transfer_buffer_length -
925 TRB_LEN(event->transfer_len);
926 if (td->urb->actual_length < 0) {
927 xhci_warn(xhci, "HC gave bad length "
928 "of %d bytes left\n",
929 TRB_LEN(event->transfer_len));
930 td->urb->actual_length = 0;
931 }
932 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
933 status = -EREMOTEIO;
934 else
935 status = 0;
936 } else {
937 td->urb->actual_length = td->urb->transfer_buffer_length;
938 /* Ignore a short packet completion if the
939 * untransferred length was zero.
940 */
941 status = 0;
942 }
943 } else {
944 /* Slow path - walk the list, starting from the dequeue
945 * pointer, to get the actual length transferred.
946 */
947 union xhci_trb *cur_trb;
948 struct xhci_segment *cur_seg;
949
950 td->urb->actual_length = 0;
951 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
952 cur_trb != event_trb;
953 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
954 if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
955 TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
956 td->urb->actual_length +=
957 TRB_LEN(cur_trb->generic.field[2]);
958 }
959 /* If the ring didn't stop on a Link or No-op TRB, add
960 * in the actual bytes transferred from the Normal TRB
961 */
962 if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
963 td->urb->actual_length +=
964 TRB_LEN(cur_trb->generic.field[2]) -
965 TRB_LEN(event->transfer_len);
966 }
967 }
968 /* The Endpoint Stop Command completion will take care of
969 * any stopped TDs. A stopped TD may be restarted, so don't update the
970 * ring dequeue pointer or take this TD off any lists yet.
971 */
972 if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
973 GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
974 ep_ring->stopped_td = td;
975 ep_ring->stopped_trb = event_trb;
976 } else {
977 /* Update ring dequeue pointer */
978 while (ep_ring->dequeue != td->last_trb)
979 inc_deq(xhci, ep_ring, false);
980 inc_deq(xhci, ep_ring, false);
981
982 /* Clean up the endpoint's TD list */
983 urb = td->urb;
984 list_del(&td->td_list);
985 /* Was this TD slated to be cancelled but completed anyway? */
986 if (!list_empty(&td->cancelled_td_list)) {
987 list_del(&td->cancelled_td_list);
988 ep_ring->cancels_pending--;
989 }
990 kfree(td);
991 urb->hcpriv = NULL;
992 }
993cleanup:
994 inc_deq(xhci, xhci->event_ring, true);
995 xhci_set_hc_event_deq(xhci);
996
997 /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
998 if (urb) {
999 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
1000 spin_unlock(&xhci->lock);
1001 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
1002 spin_lock(&xhci->lock);
1003 }
1004 return 0;
1005}
1006
1007/*
1008 * This function handles all OS-owned events on the event ring. It may drop
1009 * xhci->lock between event processing (e.g. to pass up port status changes).
1010 */
1011void xhci_handle_event(struct xhci_hcd *xhci)
1012{
1013 union xhci_trb *event;
1014 int update_ptrs = 1;
1015 int ret;
1016
1017 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
1018 xhci->error_bitmask |= 1 << 1;
1019 return;
1020 }
1021
1022 event = xhci->event_ring->dequeue;
1023 /* Does the HC or OS own the TRB? */
1024 if ((event->event_cmd.flags & TRB_CYCLE) !=
1025 xhci->event_ring->cycle_state) {
1026 xhci->error_bitmask |= 1 << 2;
1027 return;
1028 }
1029
1030 /* FIXME: Handle more event types. */
1031 switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
1032 case TRB_TYPE(TRB_COMPLETION):
1033 handle_cmd_completion(xhci, &event->event_cmd);
1034 break;
1035 case TRB_TYPE(TRB_PORT_STATUS):
1036 handle_port_status(xhci, event);
1037 update_ptrs = 0;
1038 break;
1039 case TRB_TYPE(TRB_TRANSFER):
1040 ret = handle_tx_event(xhci, &event->trans_event);
1041 if (ret < 0)
1042 xhci->error_bitmask |= 1 << 9;
1043 else
1044 update_ptrs = 0;
1045 break;
1046 default:
1047 xhci->error_bitmask |= 1 << 3;
1048 }
1049
1050 if (update_ptrs) {
1051 /* Update SW and HC event ring dequeue pointer */
1052 inc_deq(xhci, xhci->event_ring, true);
1053 xhci_set_hc_event_deq(xhci);
1054 }
1055 /* Are there more items on the event ring? */
1056 xhci_handle_event(xhci);
1057}
1058
1059/**** Endpoint Ring Operations ****/
1060
1061/*
1062 * Generic function for queueing a TRB on a ring.
1063 * The caller must have checked to make sure there's room on the ring.
1064 */
1065static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
1066 bool consumer,
1067 u32 field1, u32 field2, u32 field3, u32 field4)
1068{
1069 struct xhci_generic_trb *trb;
1070
1071 trb = &ring->enqueue->generic;
1072 trb->field[0] = field1;
1073 trb->field[1] = field2;
1074 trb->field[2] = field3;
1075 trb->field[3] = field4;
1076 inc_enq(xhci, ring, consumer);
1077}
1078
1079/*
1080 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
1081 * FIXME allocate segments if the ring is full.
1082 */
1083static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
1084 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
1085{
1086 /* Make sure the endpoint has been added to xHC schedule */
1087 xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
1088 switch (ep_state) {
1089 case EP_STATE_DISABLED:
1090 /*
1091 * USB core changed config/interfaces without notifying us,
1092 * or hardware is reporting the wrong state.
1093 */
1094 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
1095 return -ENOENT;
1096 case EP_STATE_HALTED:
1097 case EP_STATE_ERROR:
1098 xhci_warn(xhci, "WARN waiting for halt or error on ep "
1099 "to be cleared\n");
1100 /* FIXME event handling code for error needs to clear it */
1101 /* XXX not sure if this should be -ENOENT or not */
1102 return -EINVAL;
1103 case EP_STATE_STOPPED:
1104 case EP_STATE_RUNNING:
1105 break;
1106 default:
1107 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
1108 /*
1109 * FIXME issue Configure Endpoint command to try to get the HC
1110 * back into a known state.
1111 */
1112 return -EINVAL;
1113 }
1114 if (!room_on_ring(xhci, ep_ring, num_trbs)) {
1115 /* FIXME allocate more room */
1116 xhci_err(xhci, "ERROR no room on ep ring\n");
1117 return -ENOMEM;
1118 }
1119 return 0;
1120}
1121
1122static int prepare_transfer(struct xhci_hcd *xhci,
1123 struct xhci_virt_device *xdev,
1124 unsigned int ep_index,
1125 unsigned int num_trbs,
1126 struct urb *urb,
1127 struct xhci_td **td,
1128 gfp_t mem_flags)
1129{
1130 int ret;
1131
1132 ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
1133 xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK,
1134 num_trbs, mem_flags);
1135 if (ret)
1136 return ret;
1137 *td = kzalloc(sizeof(struct xhci_td), mem_flags);
1138 if (!*td)
1139 return -ENOMEM;
1140 INIT_LIST_HEAD(&(*td)->td_list);
1141 INIT_LIST_HEAD(&(*td)->cancelled_td_list);
1142
1143 ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
1144 if (unlikely(ret)) {
1145 kfree(*td);
1146 return ret;
1147 }
1148
1149 (*td)->urb = urb;
1150 urb->hcpriv = (void *) (*td);
1151 /* Add this TD to the tail of the endpoint ring's TD list */
1152 list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list);
1153 (*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg;
1154 (*td)->first_trb = xdev->ep_rings[ep_index]->enqueue;
1155
1156 return 0;
1157}
1158
1159static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
1160{
1161 int num_sgs, num_trbs, running_total, temp, i;
1162 struct scatterlist *sg;
1163
1164 sg = NULL;
1165 num_sgs = urb->num_sgs;
1166 temp = urb->transfer_buffer_length;
1167
1168 xhci_dbg(xhci, "count sg list trbs: \n");
1169 num_trbs = 0;
1170 for_each_sg(urb->sg->sg, sg, num_sgs, i) {
1171 unsigned int previous_total_trbs = num_trbs;
1172 unsigned int len = sg_dma_len(sg);
1173
1174 /* Scatter gather list entries may cross 64KB boundaries */
1175 running_total = TRB_MAX_BUFF_SIZE -
1176 (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1177 if (running_total != 0)
1178 num_trbs++;
1179
1180 /* How many more 64KB chunks to transfer, how many more TRBs? */
1181 while (running_total < sg_dma_len(sg)) {
1182 num_trbs++;
1183 running_total += TRB_MAX_BUFF_SIZE;
1184 }
1185 xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
1186 i, (unsigned long long)sg_dma_address(sg),
1187 len, len, num_trbs - previous_total_trbs);
1188
1189 len = min_t(int, len, temp);
1190 temp -= len;
1191 if (temp == 0)
1192 break;
1193 }
1194 xhci_dbg(xhci, "\n");
1195 if (!in_interrupt())
1196 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
1197 urb->ep->desc.bEndpointAddress,
1198 urb->transfer_buffer_length,
1199 num_trbs);
1200 return num_trbs;
1201}
1202
1203static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
1204{
1205 if (num_trbs != 0)
1206 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
1207 "TRBs, %d left\n", __func__,
1208 urb->ep->desc.bEndpointAddress, num_trbs);
1209 if (running_total != urb->transfer_buffer_length)
1210 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
1211 "queued %#x (%d), asked for %#x (%d)\n",
1212 __func__,
1213 urb->ep->desc.bEndpointAddress,
1214 running_total, running_total,
1215 urb->transfer_buffer_length,
1216 urb->transfer_buffer_length);
1217}
1218
1219static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
1220 unsigned int ep_index, int start_cycle,
1221 struct xhci_generic_trb *start_trb, struct xhci_td *td)
1222{
1223 /*
1224 * Pass all the TRBs to the hardware at once and make sure this write
1225 * isn't reordered.
1226 */
1227 wmb();
1228 start_trb->field[3] |= start_cycle;
1229 ring_ep_doorbell(xhci, slot_id, ep_index);
1230}
1231
1232static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1233 struct urb *urb, int slot_id, unsigned int ep_index)
1234{
1235 struct xhci_ring *ep_ring;
1236 unsigned int num_trbs;
1237 struct xhci_td *td;
1238 struct scatterlist *sg;
1239 int num_sgs;
1240 int trb_buff_len, this_sg_len, running_total;
1241 bool first_trb;
1242 u64 addr;
1243
1244 struct xhci_generic_trb *start_trb;
1245 int start_cycle;
1246
1247 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
1248 num_trbs = count_sg_trbs_needed(xhci, urb);
1249 num_sgs = urb->num_sgs;
1250
1251 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
1252 ep_index, num_trbs, urb, &td, mem_flags);
1253 if (trb_buff_len < 0)
1254 return trb_buff_len;
1255 /*
1256 * Don't give the first TRB to the hardware (by toggling the cycle bit)
1257 * until we've finished creating all the other TRBs. The ring's cycle
1258 * state may change as we enqueue the other TRBs, so save it too.
1259 */
1260 start_trb = &ep_ring->enqueue->generic;
1261 start_cycle = ep_ring->cycle_state;
1262
1263 running_total = 0;
1264 /*
1265 * How much data is in the first TRB?
1266 *
1267 * There are three forces at work for TRB buffer pointers and lengths:
1268 * 1. We don't want to walk off the end of this sg-list entry buffer.
1269 * 2. The transfer length that the driver requested may be smaller than
1270 * the amount of memory allocated for this scatter-gather list.
1271 * 3. TRBs buffers can't cross 64KB boundaries.
1272 */
1273 sg = urb->sg->sg;
1274 addr = (u64) sg_dma_address(sg);
1275 this_sg_len = sg_dma_len(sg);
1276 trb_buff_len = TRB_MAX_BUFF_SIZE -
1277 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1278 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
1279 if (trb_buff_len > urb->transfer_buffer_length)
1280 trb_buff_len = urb->transfer_buffer_length;
1281 xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
1282 trb_buff_len);
1283
1284 first_trb = true;
1285 /* Queue the first TRB, even if it's zero-length */
1286 do {
1287 u32 field = 0;
1288
1289 /* Don't change the cycle bit of the first TRB until later */
1290 if (first_trb)
1291 first_trb = false;
1292 else
1293 field |= ep_ring->cycle_state;
1294
1295 /* Chain all the TRBs together; clear the chain bit in the last
1296 * TRB to indicate it's the last TRB in the chain.
1297 */
1298 if (num_trbs > 1) {
1299 field |= TRB_CHAIN;
1300 } else {
1301 /* FIXME - add check for ZERO_PACKET flag before this */
1302 td->last_trb = ep_ring->enqueue;
1303 field |= TRB_IOC;
1304 }
1305 xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
1306 "64KB boundary at %#x, end dma = %#x\n",
1307 (unsigned int) addr, trb_buff_len, trb_buff_len,
1308 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
1309 (unsigned int) addr + trb_buff_len);
1310 if (TRB_MAX_BUFF_SIZE -
1311 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
1312 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
1313 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
1314 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
1315 (unsigned int) addr + trb_buff_len);
1316 }
1317 queue_trb(xhci, ep_ring, false,
1318 (u32) addr,
1319 (u32) ((u64) addr >> 32),
1320 TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
1321 /* We always want to know if the TRB was short,
1322 * or we won't get an event when it completes.
1323 * (Unless we use event data TRBs, which are a
1324 * waste of space and HC resources.)
1325 */
1326 field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
1327 --num_trbs;
1328 running_total += trb_buff_len;
1329
1330 /* Calculate length for next transfer --
1331 * Are we done queueing all the TRBs for this sg entry?
1332 */
1333 this_sg_len -= trb_buff_len;
1334 if (this_sg_len == 0) {
1335 --num_sgs;
1336 if (num_sgs == 0)
1337 break;
1338 sg = sg_next(sg);
1339 addr = (u64) sg_dma_address(sg);
1340 this_sg_len = sg_dma_len(sg);
1341 } else {
1342 addr += trb_buff_len;
1343 }
1344
1345 trb_buff_len = TRB_MAX_BUFF_SIZE -
1346 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1347 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
1348 if (running_total + trb_buff_len > urb->transfer_buffer_length)
1349 trb_buff_len =
1350 urb->transfer_buffer_length - running_total;
1351 } while (running_total < urb->transfer_buffer_length);
1352
1353 check_trb_math(urb, num_trbs, running_total);
1354 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
1355 return 0;
1356}
1357
1358/* This is very similar to what ehci-q.c qtd_fill() does */
1359int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1360 struct urb *urb, int slot_id, unsigned int ep_index)
1361{
1362 struct xhci_ring *ep_ring;
1363 struct xhci_td *td;
1364 int num_trbs;
1365 struct xhci_generic_trb *start_trb;
1366 bool first_trb;
1367 int start_cycle;
1368 u32 field;
1369
1370 int running_total, trb_buff_len, ret;
1371 u64 addr;
1372
1373 if (urb->sg)
1374 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
1375
1376 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
1377
1378 num_trbs = 0;
1379 /* How much data is (potentially) left before the 64KB boundary? */
1380 running_total = TRB_MAX_BUFF_SIZE -
1381 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1382
1383 /* If there's some data on this 64KB chunk, or we have to send a
1384 * zero-length transfer, we need at least one TRB
1385 */
1386 if (running_total != 0 || urb->transfer_buffer_length == 0)
1387 num_trbs++;
1388 /* How many more 64KB chunks to transfer, how many more TRBs? */
1389 while (running_total < urb->transfer_buffer_length) {
1390 num_trbs++;
1391 running_total += TRB_MAX_BUFF_SIZE;
1392 }
1393 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
1394
1395 if (!in_interrupt())
1396 dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
1397 urb->ep->desc.bEndpointAddress,
1398 urb->transfer_buffer_length,
1399 urb->transfer_buffer_length,
1400 (unsigned long long)urb->transfer_dma,
1401 num_trbs);
1402
1403 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
1404 num_trbs, urb, &td, mem_flags);
1405 if (ret < 0)
1406 return ret;
1407
1408 /*
1409 * Don't give the first TRB to the hardware (by toggling the cycle bit)
1410 * until we've finished creating all the other TRBs. The ring's cycle
1411 * state may change as we enqueue the other TRBs, so save it too.
1412 */
1413 start_trb = &ep_ring->enqueue->generic;
1414 start_cycle = ep_ring->cycle_state;
1415
1416 running_total = 0;
1417 /* How much data is in the first TRB? */
1418 addr = (u64) urb->transfer_dma;
1419 trb_buff_len = TRB_MAX_BUFF_SIZE -
1420 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
1421 if (urb->transfer_buffer_length < trb_buff_len)
1422 trb_buff_len = urb->transfer_buffer_length;
1423
1424 first_trb = true;
1425
1426 /* Queue the first TRB, even if it's zero-length */
1427 do {
1428 field = 0;
1429
1430 /* Don't change the cycle bit of the first TRB until later */
1431 if (first_trb)
1432 first_trb = false;
1433 else
1434 field |= ep_ring->cycle_state;
1435
1436 /* Chain all the TRBs together; clear the chain bit in the last
1437 * TRB to indicate it's the last TRB in the chain.
1438 */
1439 if (num_trbs > 1) {
1440 field |= TRB_CHAIN;
1441 } else {
1442 /* FIXME - add check for ZERO_PACKET flag before this */
1443 td->last_trb = ep_ring->enqueue;
1444 field |= TRB_IOC;
1445 }
1446 queue_trb(xhci, ep_ring, false,
1447 (u32) addr,
1448 (u32) ((u64) addr >> 32),
1449 TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
1450 /* We always want to know if the TRB was short,
1451 * or we won't get an event when it completes.
1452 * (Unless we use event data TRBs, which are a
1453 * waste of space and HC resources.)
1454 */
1455 field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
1456 --num_trbs;
1457 running_total += trb_buff_len;
1458
1459 /* Calculate length for next transfer */
1460 addr += trb_buff_len;
1461 trb_buff_len = urb->transfer_buffer_length - running_total;
1462 if (trb_buff_len > TRB_MAX_BUFF_SIZE)
1463 trb_buff_len = TRB_MAX_BUFF_SIZE;
1464 } while (running_total < urb->transfer_buffer_length);
1465
1466 check_trb_math(urb, num_trbs, running_total);
1467 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
1468 return 0;
1469}
1470
1471/* Caller must have locked xhci->lock */
1472int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1473 struct urb *urb, int slot_id, unsigned int ep_index)
1474{
1475 struct xhci_ring *ep_ring;
1476 int num_trbs;
1477 int ret;
1478 struct usb_ctrlrequest *setup;
1479 struct xhci_generic_trb *start_trb;
1480 int start_cycle;
1481 u32 field;
1482 struct xhci_td *td;
1483
1484 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
1485
1486 /*
1487 * Need to copy setup packet into setup TRB, so we can't use the setup
1488 * DMA address.
1489 */
1490 if (!urb->setup_packet)
1491 return -EINVAL;
1492
1493 if (!in_interrupt())
1494 xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
1495 slot_id, ep_index);
1496 /* 1 TRB for setup, 1 for status */
1497 num_trbs = 2;
1498 /*
1499 * Don't need to check if we need additional event data and normal TRBs,
1500 * since data in control transfers will never get bigger than 16MB
1501 * XXX: can we get a buffer that crosses 64KB boundaries?
1502 */
1503 if (urb->transfer_buffer_length > 0)
1504 num_trbs++;
1505 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
1506 urb, &td, mem_flags);
1507 if (ret < 0)
1508 return ret;
1509
1510 /*
1511 * Don't give the first TRB to the hardware (by toggling the cycle bit)
1512 * until we've finished creating all the other TRBs. The ring's cycle
1513 * state may change as we enqueue the other TRBs, so save it too.
1514 */
1515 start_trb = &ep_ring->enqueue->generic;
1516 start_cycle = ep_ring->cycle_state;
1517
1518 /* Queue setup TRB - see section 6.4.1.2.1 */
1519 /* FIXME better way to translate setup_packet into two u32 fields? */
1520 setup = (struct usb_ctrlrequest *) urb->setup_packet;
1521 queue_trb(xhci, ep_ring, false,
1522 /* FIXME endianness is probably going to bite my ass here. */
1523 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
1524 setup->wIndex | setup->wLength << 16,
1525 TRB_LEN(8) | TRB_INTR_TARGET(0),
1526 /* Immediate data in pointer */
1527 TRB_IDT | TRB_TYPE(TRB_SETUP));
1528
1529 /* If there's data, queue data TRBs */
1530 field = 0;
1531 if (urb->transfer_buffer_length > 0) {
1532 if (setup->bRequestType & USB_DIR_IN)
1533 field |= TRB_DIR_IN;
1534 queue_trb(xhci, ep_ring, false,
1535 lower_32_bits(urb->transfer_dma),
1536 upper_32_bits(urb->transfer_dma),
1537 TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0),
1538 /* Event on short tx */
1539 field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
1540 }
1541
1542 /* Save the DMA address of the last TRB in the TD */
1543 td->last_trb = ep_ring->enqueue;
1544
1545 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
1546 /* If the device sent data, the status stage is an OUT transfer */
1547 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
1548 field = 0;
1549 else
1550 field = TRB_DIR_IN;
1551 queue_trb(xhci, ep_ring, false,
1552 0,
1553 0,
1554 TRB_INTR_TARGET(0),
1555 /* Event on completion */
1556 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
1557
1558 giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
1559 return 0;
1560}
1561
1562/**** Command Ring Operations ****/
1563
1564/* Generic function for queueing a command TRB on the command ring */
1565static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
1566{
1567 if (!room_on_ring(xhci, xhci->cmd_ring, 1)) {
1568 if (!in_interrupt())
1569 xhci_err(xhci, "ERR: No room for command on command ring\n");
1570 return -ENOMEM;
1571 }
1572 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
1573 field4 | xhci->cmd_ring->cycle_state);
1574 return 0;
1575}
1576
1577/* Queue a no-op command on the command ring */
1578static int queue_cmd_noop(struct xhci_hcd *xhci)
1579{
1580 return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP));
1581}
1582
1583/*
1584 * Place a no-op command on the command ring to test the command and
1585 * event ring.
1586 */
1587void *xhci_setup_one_noop(struct xhci_hcd *xhci)
1588{
1589 if (queue_cmd_noop(xhci) < 0)
1590 return NULL;
1591 xhci->noops_submitted++;
1592 return xhci_ring_cmd_db;
1593}
1594
1595/* Queue a slot enable or disable request on the command ring */
1596int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
1597{
1598 return queue_command(xhci, 0, 0, 0,
1599 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
1600}
1601
1602/* Queue an address device command TRB */
1603int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1604 u32 slot_id)
1605{
1606 return queue_command(xhci, in_ctx_ptr, 0, 0,
1607 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
1608}
1609
1610/* Queue a configure endpoint command TRB */
1611int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1612 u32 slot_id)
1613{
1614 return queue_command(xhci, in_ctx_ptr, 0, 0,
1615 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
1616}
1617
1618int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
1619 unsigned int ep_index)
1620{
1621 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
1622 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
1623 u32 type = TRB_TYPE(TRB_STOP_RING);
1624
1625 return queue_command(xhci, 0, 0, 0,
1626 trb_slot_id | trb_ep_index | type);
1627}
1628
1629/* Set Transfer Ring Dequeue Pointer command.
1630 * This should not be used for endpoints that have streams enabled.
1631 */
1632static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
1633 unsigned int ep_index, struct xhci_segment *deq_seg,
1634 union xhci_trb *deq_ptr, u32 cycle_state)
1635{
1636 dma_addr_t addr;
1637 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
1638 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
1639 u32 type = TRB_TYPE(TRB_SET_DEQ);
1640
1641 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
1642 if (addr == 0)
1643 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
1644 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
1645 deq_seg, deq_ptr);
1646 return queue_command(xhci, (u32) addr | cycle_state, 0, 0,
1647 trb_slot_id | trb_ep_index | type);
1648}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
new file mode 100644
index 000000000000..8936eeb5588b
--- /dev/null
+++ b/drivers/usb/host/xhci.h
@@ -0,0 +1,1157 @@
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#ifndef __LINUX_XHCI_HCD_H
24#define __LINUX_XHCI_HCD_H
25
26#include <linux/usb.h>
27#include <linux/timer.h>
28
29#include "../core/hcd.h"
30/* Code sharing between pci-quirks and xhci hcd */
31#include "xhci-ext-caps.h"
32
33/* xHCI PCI Configuration Registers */
34#define XHCI_SBRN_OFFSET (0x60)
35
36/* Max number of USB devices for any host controller - limit in section 6.1 */
37#define MAX_HC_SLOTS 256
38/* Section 5.3.3 - MaxPorts */
39#define MAX_HC_PORTS 127
40
41/*
42 * xHCI register interface.
43 * This corresponds to the eXtensible Host Controller Interface (xHCI)
44 * Revision 0.95 specification
45 *
46 * Registers should always be accessed with double word or quad word accesses.
47 *
48 * Some xHCI implementations may support 64-bit address pointers. Registers
49 * with 64-bit address pointers should be written to with dword accesses by
50 * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
51 * xHCI implementations that do not support 64-bit address pointers will ignore
52 * the high dword, and write order is irrelevant.
53 */
54
55/**
56 * struct xhci_cap_regs - xHCI Host Controller Capability Registers.
57 * @hc_capbase: length of the capabilities register and HC version number
58 * @hcs_params1: HCSPARAMS1 - Structural Parameters 1
59 * @hcs_params2: HCSPARAMS2 - Structural Parameters 2
60 * @hcs_params3: HCSPARAMS3 - Structural Parameters 3
61 * @hcc_params: HCCPARAMS - Capability Parameters
62 * @db_off: DBOFF - Doorbell array offset
63 * @run_regs_off: RTSOFF - Runtime register space offset
64 */
65struct xhci_cap_regs {
66 u32 hc_capbase;
67 u32 hcs_params1;
68 u32 hcs_params2;
69 u32 hcs_params3;
70 u32 hcc_params;
71 u32 db_off;
72 u32 run_regs_off;
73 /* Reserved up to (CAPLENGTH - 0x1C) */
74};
75
76/* hc_capbase bitmasks */
77/* bits 7:0 - how long is the Capabilities register */
78#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
79/* bits 31:16 */
80#define HC_VERSION(p) (((p) >> 16) & 0xffff)
81
82/* HCSPARAMS1 - hcs_params1 - bitmasks */
83/* bits 0:7, Max Device Slots */
84#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
85#define HCS_SLOTS_MASK 0xff
86/* bits 8:18, Max Interrupters */
87#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
88/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
89#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
90
91/* HCSPARAMS2 - hcs_params2 - bitmasks */
92/* bits 0:3, frames or uframes that SW needs to queue transactions
93 * ahead of the HW to meet periodic deadlines */
94#define HCS_IST(p) (((p) >> 0) & 0xf)
95/* bits 4:7, max number of Event Ring segments */
96#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
97/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
98/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
99
100/* HCSPARAMS3 - hcs_params3 - bitmasks */
101/* bits 0:7, Max U1 to U0 latency for the roothub ports */
102#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
103/* bits 16:31, Max U2 to U0 latency for the roothub ports */
104#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
105
106/* HCCPARAMS - hcc_params - bitmasks */
107/* true: HC can use 64-bit address pointers */
108#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
109/* true: HC can do bandwidth negotiation */
110#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
111/* true: HC uses 64-byte Device Context structures
112 * FIXME 64-byte context structures aren't supported yet.
113 */
114#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
115/* true: HC has port power switches */
116#define HCC_PPC(p) ((p) & (1 << 3))
117/* true: HC has port indicators */
118#define HCS_INDICATOR(p) ((p) & (1 << 4))
119/* true: HC has Light HC Reset Capability */
120#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
121/* true: HC supports latency tolerance messaging */
122#define HCC_LTC(p) ((p) & (1 << 6))
123/* true: no secondary Stream ID Support */
124#define HCC_NSS(p) ((p) & (1 << 7))
125/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
126#define HCC_MAX_PSA (1 << ((((p) >> 12) & 0xf) + 1))
127/* Extended Capabilities pointer from PCI base - section 5.3.6 */
128#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
129
130/* db_off bitmask - bits 0:1 reserved */
131#define DBOFF_MASK (~0x3)
132
133/* run_regs_off bitmask - bits 0:4 reserved */
134#define RTSOFF_MASK (~0x1f)
135
136
137/* Number of registers per port */
138#define NUM_PORT_REGS 4
139
140/**
141 * struct xhci_op_regs - xHCI Host Controller Operational Registers.
142 * @command: USBCMD - xHC command register
143 * @status: USBSTS - xHC status register
144 * @page_size: This indicates the page size that the host controller
145 * supports. If bit n is set, the HC supports a page size
146 * of 2^(n+12), up to a 128MB page size.
147 * 4K is the minimum page size.
148 * @cmd_ring: CRP - 64-bit Command Ring Pointer
149 * @dcbaa_ptr: DCBAAP - 64-bit Device Context Base Address Array Pointer
150 * @config_reg: CONFIG - Configure Register
151 * @port_status_base: PORTSCn - base address for Port Status and Control
152 * Each port has a Port Status and Control register,
153 * followed by a Port Power Management Status and Control
154 * register, a Port Link Info register, and a reserved
155 * register.
156 * @port_power_base: PORTPMSCn - base address for
157 * Port Power Management Status and Control
158 * @port_link_base: PORTLIn - base address for Port Link Info (current
159 * Link PM state and control) for USB 2.1 and USB 3.0
160 * devices.
161 */
162struct xhci_op_regs {
163 u32 command;
164 u32 status;
165 u32 page_size;
166 u32 reserved1;
167 u32 reserved2;
168 u32 dev_notification;
169 u32 cmd_ring[2];
170 /* rsvd: offset 0x20-2F */
171 u32 reserved3[4];
172 u32 dcbaa_ptr[2];
173 u32 config_reg;
174 /* rsvd: offset 0x3C-3FF */
175 u32 reserved4[241];
176 /* port 1 registers, which serve as a base address for other ports */
177 u32 port_status_base;
178 u32 port_power_base;
179 u32 port_link_base;
180 u32 reserved5;
181 /* registers for ports 2-255 */
182 u32 reserved6[NUM_PORT_REGS*254];
183};
184
185/* USBCMD - USB command - command bitmasks */
186/* start/stop HC execution - do not write unless HC is halted*/
187#define CMD_RUN XHCI_CMD_RUN
188/* Reset HC - resets internal HC state machine and all registers (except
189 * PCI config regs). HC does NOT drive a USB reset on the downstream ports.
190 * The xHCI driver must reinitialize the xHC after setting this bit.
191 */
192#define CMD_RESET (1 << 1)
193/* Event Interrupt Enable - a '1' allows interrupts from the host controller */
194#define CMD_EIE XHCI_CMD_EIE
195/* Host System Error Interrupt Enable - get out-of-band signal for HC errors */
196#define CMD_HSEIE XHCI_CMD_HSEIE
197/* bits 4:6 are reserved (and should be preserved on writes). */
198/* light reset (port status stays unchanged) - reset completed when this is 0 */
199#define CMD_LRESET (1 << 7)
200/* FIXME: ignoring host controller save/restore state for now. */
201#define CMD_CSS (1 << 8)
202#define CMD_CRS (1 << 9)
203/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
204#define CMD_EWE XHCI_CMD_EWE
205/* MFINDEX power management - '1' means xHC can stop MFINDEX counter if all root
206 * hubs are in U3 (selective suspend), disconnect, disabled, or powered-off.
207 * '0' means the xHC can power it off if all ports are in the disconnect,
208 * disabled, or powered-off state.
209 */
210#define CMD_PM_INDEX (1 << 11)
211/* bits 12:31 are reserved (and should be preserved on writes). */
212
213/* USBSTS - USB status - status bitmasks */
214/* HC not running - set to 1 when run/stop bit is cleared. */
215#define STS_HALT XHCI_STS_HALT
216/* serious error, e.g. PCI parity error. The HC will clear the run/stop bit. */
217#define STS_FATAL (1 << 2)
218/* event interrupt - clear this prior to clearing any IP flags in IR set*/
219#define STS_EINT (1 << 3)
220/* port change detect */
221#define STS_PORT (1 << 4)
222/* bits 5:7 reserved and zeroed */
223/* save state status - '1' means xHC is saving state */
224#define STS_SAVE (1 << 8)
225/* restore state status - '1' means xHC is restoring state */
226#define STS_RESTORE (1 << 9)
227/* true: save or restore error */
228#define STS_SRE (1 << 10)
229/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
230#define STS_CNR XHCI_STS_CNR
231/* true: internal Host Controller Error - SW needs to reset and reinitialize */
232#define STS_HCE (1 << 12)
233/* bits 13:31 reserved and should be preserved */
234
235/*
236 * DNCTRL - Device Notification Control Register - dev_notification bitmasks
237 * Generate a device notification event when the HC sees a transaction with a
238 * notification type that matches a bit set in this bit field.
239 */
240#define DEV_NOTE_MASK (0xffff)
241#define ENABLE_DEV_NOTE(x) (1 << x)
242/* Most of the device notification types should only be used for debug.
243 * SW does need to pay attention to function wake notifications.
244 */
245#define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1)
246
247/* CRCR - Command Ring Control Register - cmd_ring bitmasks */
248/* bit 0 is the command ring cycle state */
249/* stop ring operation after completion of the currently executing command */
250#define CMD_RING_PAUSE (1 << 1)
251/* stop ring immediately - abort the currently executing command */
252#define CMD_RING_ABORT (1 << 2)
253/* true: command ring is running */
254#define CMD_RING_RUNNING (1 << 3)
255/* bits 4:5 reserved and should be preserved */
256/* Command Ring pointer - bit mask for the lower 32 bits. */
257#define CMD_RING_ADDR_MASK (0xffffffc0)
258
259/* CONFIG - Configure Register - config_reg bitmasks */
260/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
261#define MAX_DEVS(p) ((p) & 0xff)
262/* bits 8:31 - reserved and should be preserved */
263
264/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
265/* true: device connected */
266#define PORT_CONNECT (1 << 0)
267/* true: port enabled */
268#define PORT_PE (1 << 1)
269/* bit 2 reserved and zeroed */
270/* true: port has an over-current condition */
271#define PORT_OC (1 << 3)
272/* true: port reset signaling asserted */
273#define PORT_RESET (1 << 4)
274/* Port Link State - bits 5:8
275 * A read gives the current link PM state of the port,
276 * a write with Link State Write Strobe set sets the link state.
277 */
278/* true: port has power (see HCC_PPC) */
279#define PORT_POWER (1 << 9)
280/* bits 10:13 indicate device speed:
281 * 0 - undefined speed - port hasn't be initialized by a reset yet
282 * 1 - full speed
283 * 2 - low speed
284 * 3 - high speed
285 * 4 - super speed
286 * 5-15 reserved
287 */
288#define DEV_SPEED_MASK (0xf << 10)
289#define XDEV_FS (0x1 << 10)
290#define XDEV_LS (0x2 << 10)
291#define XDEV_HS (0x3 << 10)
292#define XDEV_SS (0x4 << 10)
293#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
294#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
295#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
296#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
297#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
298/* Bits 20:23 in the Slot Context are the speed for the device */
299#define SLOT_SPEED_FS (XDEV_FS << 10)
300#define SLOT_SPEED_LS (XDEV_LS << 10)
301#define SLOT_SPEED_HS (XDEV_HS << 10)
302#define SLOT_SPEED_SS (XDEV_SS << 10)
303/* Port Indicator Control */
304#define PORT_LED_OFF (0 << 14)
305#define PORT_LED_AMBER (1 << 14)
306#define PORT_LED_GREEN (2 << 14)
307#define PORT_LED_MASK (3 << 14)
308/* Port Link State Write Strobe - set this when changing link state */
309#define PORT_LINK_STROBE (1 << 16)
310/* true: connect status change */
311#define PORT_CSC (1 << 17)
312/* true: port enable change */
313#define PORT_PEC (1 << 18)
314/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
315 * into an enabled state, and the device into the default state. A "warm" reset
316 * also resets the link, forcing the device through the link training sequence.
317 * SW can also look at the Port Reset register to see when warm reset is done.
318 */
319#define PORT_WRC (1 << 19)
320/* true: over-current change */
321#define PORT_OCC (1 << 20)
322/* true: reset change - 1 to 0 transition of PORT_RESET */
323#define PORT_RC (1 << 21)
324/* port link status change - set on some port link state transitions:
325 * Transition Reason
326 * ------------------------------------------------------------------------------
327 * - U3 to Resume Wakeup signaling from a device
328 * - Resume to Recovery to U0 USB 3.0 device resume
329 * - Resume to U0 USB 2.0 device resume
330 * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
331 * - U3 to U0 Software resume of USB 2.0 device complete
332 * - U2 to U0 L1 resume of USB 2.1 device complete
333 * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
334 * - U0 to disabled L1 entry error with USB 2.1 device
335 * - Any state to inactive Error on USB 3.0 port
336 */
337#define PORT_PLC (1 << 22)
338/* port configure error change - port failed to configure its link partner */
339#define PORT_CEC (1 << 23)
340/* bit 24 reserved */
341/* wake on connect (enable) */
342#define PORT_WKCONN_E (1 << 25)
343/* wake on disconnect (enable) */
344#define PORT_WKDISC_E (1 << 26)
345/* wake on over-current (enable) */
346#define PORT_WKOC_E (1 << 27)
347/* bits 28:29 reserved */
348/* true: device is removable - for USB 3.0 roothub emulation */
349#define PORT_DEV_REMOVE (1 << 30)
350/* Initiate a warm port reset - complete when PORT_WRC is '1' */
351#define PORT_WR (1 << 31)
352
353/* Port Power Management Status and Control - port_power_base bitmasks */
354/* Inactivity timer value for transitions into U1, in microseconds.
355 * Timeout can be up to 127us. 0xFF means an infinite timeout.
356 */
357#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
358/* Inactivity timer value for transitions into U2 */
359#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
360/* Bits 24:31 for port testing */
361
362
363/**
364 * struct xhci_intr_reg - Interrupt Register Set
365 * @irq_pending: IMAN - Interrupt Management Register. Used to enable
366 * interrupts and check for pending interrupts.
367 * @irq_control: IMOD - Interrupt Moderation Register.
368 * Used to throttle interrupts.
369 * @erst_size: Number of segments in the Event Ring Segment Table (ERST).
370 * @erst_base: ERST base address.
371 * @erst_dequeue: Event ring dequeue pointer.
372 *
373 * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
374 * Ring Segment Table (ERST) associated with it. The event ring is comprised of
375 * multiple segments of the same size. The HC places events on the ring and
376 * "updates the Cycle bit in the TRBs to indicate to software the current
377 * position of the Enqueue Pointer." The HCD (Linux) processes those events and
378 * updates the dequeue pointer.
379 */
380struct xhci_intr_reg {
381 u32 irq_pending;
382 u32 irq_control;
383 u32 erst_size;
384 u32 rsvd;
385 u32 erst_base[2];
386 u32 erst_dequeue[2];
387};
388
389/* irq_pending bitmasks */
390#define ER_IRQ_PENDING(p) ((p) & 0x1)
391/* bits 2:31 need to be preserved */
392/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
393#define ER_IRQ_CLEAR(p) ((p) & 0xfffffffe)
394#define ER_IRQ_ENABLE(p) ((ER_IRQ_CLEAR(p)) | 0x2)
395#define ER_IRQ_DISABLE(p) ((ER_IRQ_CLEAR(p)) & ~(0x2))
396
397/* irq_control bitmasks */
398/* Minimum interval between interrupts (in 250ns intervals). The interval
399 * between interrupts will be longer if there are no events on the event ring.
400 * Default is 4000 (1 ms).
401 */
402#define ER_IRQ_INTERVAL_MASK (0xffff)
403/* Counter used to count down the time to the next interrupt - HW use only */
404#define ER_IRQ_COUNTER_MASK (0xffff << 16)
405
406/* erst_size bitmasks */
407/* Preserve bits 16:31 of erst_size */
408#define ERST_SIZE_MASK (0xffff << 16)
409
410/* erst_dequeue bitmasks */
411/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
412 * where the current dequeue pointer lies. This is an optional HW hint.
413 */
414#define ERST_DESI_MASK (0x7)
415/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by
416 * a work queue (or delayed service routine)?
417 */
418#define ERST_EHB (1 << 3)
419#define ERST_PTR_MASK (0xf)
420
421/**
422 * struct xhci_run_regs
423 * @microframe_index:
424 * MFINDEX - current microframe number
425 *
426 * Section 5.5 Host Controller Runtime Registers:
427 * "Software should read and write these registers using only Dword (32 bit)
428 * or larger accesses"
429 */
430struct xhci_run_regs {
431 u32 microframe_index;
432 u32 rsvd[7];
433 struct xhci_intr_reg ir_set[128];
434};
435
436/**
437 * struct doorbell_array
438 *
439 * Section 5.6
440 */
441struct xhci_doorbell_array {
442 u32 doorbell[256];
443};
444
445#define DB_TARGET_MASK 0xFFFFFF00
446#define DB_STREAM_ID_MASK 0x0000FFFF
447#define DB_TARGET_HOST 0x0
448#define DB_STREAM_ID_HOST 0x0
449#define DB_MASK (0xff << 8)
450
451/* Endpoint Target - bits 0:7 */
452#define EPI_TO_DB(p) (((p) + 1) & 0xff)
453
454
455/**
456 * struct xhci_slot_ctx
457 * @dev_info: Route string, device speed, hub info, and last valid endpoint
458 * @dev_info2: Max exit latency for device number, root hub port number
459 * @tt_info: tt_info is used to construct split transaction tokens
460 * @dev_state: slot state and device address
461 *
462 * Slot Context - section 6.2.1.1. This assumes the HC uses 32-byte context
463 * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes
464 * reserved at the end of the slot context for HC internal use.
465 */
466struct xhci_slot_ctx {
467 u32 dev_info;
468 u32 dev_info2;
469 u32 tt_info;
470 u32 dev_state;
471 /* offset 0x10 to 0x1f reserved for HC internal use */
472 u32 reserved[4];
473};
474
475/* dev_info bitmasks */
476/* Route String - 0:19 */
477#define ROUTE_STRING_MASK (0xfffff)
478/* Device speed - values defined by PORTSC Device Speed field - 20:23 */
479#define DEV_SPEED (0xf << 20)
480/* bit 24 reserved */
481/* Is this LS/FS device connected through a HS hub? - bit 25 */
482#define DEV_MTT (0x1 << 25)
483/* Set if the device is a hub - bit 26 */
484#define DEV_HUB (0x1 << 26)
485/* Index of the last valid endpoint context in this device context - 27:31 */
486#define LAST_CTX_MASK (0x1f << 27)
487#define LAST_CTX(p) ((p) << 27)
488#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1)
489#define SLOT_FLAG (1 << 0)
490#define EP0_FLAG (1 << 1)
491
492/* dev_info2 bitmasks */
493/* Max Exit Latency (ms) - worst case time to wake up all links in dev path */
494#define MAX_EXIT (0xffff)
495/* Root hub port number that is needed to access the USB device */
496#define ROOT_HUB_PORT(p) (((p) & 0xff) << 16)
497
498/* tt_info bitmasks */
499/*
500 * TT Hub Slot ID - for low or full speed devices attached to a high-speed hub
501 * The Slot ID of the hub that isolates the high speed signaling from
502 * this low or full-speed device. '0' if attached to root hub port.
503 */
504#define TT_SLOT (0xff)
505/*
506 * The number of the downstream facing port of the high-speed hub
507 * '0' if the device is not low or full speed.
508 */
509#define TT_PORT (0xff << 8)
510
511/* dev_state bitmasks */
512/* USB device address - assigned by the HC */
513#define DEV_ADDR_MASK (0xff)
514/* bits 8:26 reserved */
515/* Slot state */
516#define SLOT_STATE (0x1f << 27)
517#define GET_SLOT_STATE(p) (((p) & (0x1f << 27)) >> 27)
518
519
520/**
521 * struct xhci_ep_ctx
522 * @ep_info: endpoint state, streams, mult, and interval information.
523 * @ep_info2: information on endpoint type, max packet size, max burst size,
524 * error count, and whether the HC will force an event for all
525 * transactions.
526 * @deq: 64-bit ring dequeue pointer address. If the endpoint only
527 * defines one stream, this points to the endpoint transfer ring.
528 * Otherwise, it points to a stream context array, which has a
529 * ring pointer for each flow.
530 * @tx_info:
531 * Average TRB lengths for the endpoint ring and
532 * max payload within an Endpoint Service Interval Time (ESIT).
533 *
534 * Endpoint Context - section 6.2.1.2. This assumes the HC uses 32-byte context
535 * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes
536 * reserved at the end of the endpoint context for HC internal use.
537 */
538struct xhci_ep_ctx {
539 u32 ep_info;
540 u32 ep_info2;
541 u32 deq[2];
542 u32 tx_info;
543 /* offset 0x14 - 0x1f reserved for HC internal use */
544 u32 reserved[3];
545};
546
547/* ep_info bitmasks */
548/*
549 * Endpoint State - bits 0:2
550 * 0 - disabled
551 * 1 - running
552 * 2 - halted due to halt condition - ok to manipulate endpoint ring
553 * 3 - stopped
554 * 4 - TRB error
555 * 5-7 - reserved
556 */
557#define EP_STATE_MASK (0xf)
558#define EP_STATE_DISABLED 0
559#define EP_STATE_RUNNING 1
560#define EP_STATE_HALTED 2
561#define EP_STATE_STOPPED 3
562#define EP_STATE_ERROR 4
563/* Mult - Max number of burtst within an interval, in EP companion desc. */
564#define EP_MULT(p) ((p & 0x3) << 8)
565/* bits 10:14 are Max Primary Streams */
566/* bit 15 is Linear Stream Array */
567/* Interval - period between requests to an endpoint - 125u increments. */
568#define EP_INTERVAL(p) ((p & 0xff) << 16)
569
570/* ep_info2 bitmasks */
571/*
572 * Force Event - generate transfer events for all TRBs for this endpoint
573 * This will tell the HC to ignore the IOC and ISP flags (for debugging only).
574 */
575#define FORCE_EVENT (0x1)
576#define ERROR_COUNT(p) (((p) & 0x3) << 1)
577#define EP_TYPE(p) ((p) << 3)
578#define ISOC_OUT_EP 1
579#define BULK_OUT_EP 2
580#define INT_OUT_EP 3
581#define CTRL_EP 4
582#define ISOC_IN_EP 5
583#define BULK_IN_EP 6
584#define INT_IN_EP 7
585/* bit 6 reserved */
586/* bit 7 is Host Initiate Disable - for disabling stream selection */
587#define MAX_BURST(p) (((p)&0xff) << 8)
588#define MAX_PACKET(p) (((p)&0xffff) << 16)
589
590
591/**
592 * struct xhci_device_control
593 * Input/Output context; see section 6.2.5.
594 *
595 * @drop_context: set the bit of the endpoint context you want to disable
596 * @add_context: set the bit of the endpoint context you want to enable
597 */
598struct xhci_device_control {
599 u32 drop_flags;
600 u32 add_flags;
601 u32 rsvd[6];
602 struct xhci_slot_ctx slot;
603 struct xhci_ep_ctx ep[31];
604};
605
606/* drop context bitmasks */
607#define DROP_EP(x) (0x1 << x)
608/* add context bitmasks */
609#define ADD_EP(x) (0x1 << x)
610
611
612struct xhci_virt_device {
613 /*
614 * Commands to the hardware are passed an "input context" that
615 * tells the hardware what to change in its data structures.
616 * The hardware will return changes in an "output context" that
617 * software must allocate for the hardware. We need to keep
618 * track of input and output contexts separately because
619 * these commands might fail and we don't trust the hardware.
620 */
621 struct xhci_device_control *out_ctx;
622 dma_addr_t out_ctx_dma;
623 /* Used for addressing devices and configuration changes */
624 struct xhci_device_control *in_ctx;
625 dma_addr_t in_ctx_dma;
626 /* FIXME when stream support is added */
627 struct xhci_ring *ep_rings[31];
628 /* Temporary storage in case the configure endpoint command fails and we
629 * have to restore the device state to the previous state
630 */
631 struct xhci_ring *new_ep_rings[31];
632 struct completion cmd_completion;
633 /* Status of the last command issued for this device */
634 u32 cmd_status;
635};
636
637
638/**
639 * struct xhci_device_context_array
640 * @dev_context_ptr array of 64-bit DMA addresses for device contexts
641 */
642struct xhci_device_context_array {
643 /* 64-bit device addresses; we only write 32-bit addresses */
644 u32 dev_context_ptrs[2*MAX_HC_SLOTS];
645 /* private xHCD pointers */
646 dma_addr_t dma;
647};
648/* TODO: write function to set the 64-bit device DMA address */
649/*
650 * TODO: change this to be dynamically sized at HC mem init time since the HC
651 * might not be able to handle the maximum number of devices possible.
652 */
653
654
655struct xhci_stream_ctx {
656 /* 64-bit stream ring address, cycle state, and stream type */
657 u32 stream_ring[2];
658 /* offset 0x14 - 0x1f reserved for HC internal use */
659 u32 reserved[2];
660};
661
662
663struct xhci_transfer_event {
664 /* 64-bit buffer address, or immediate data */
665 u32 buffer[2];
666 u32 transfer_len;
667 /* This field is interpreted differently based on the type of TRB */
668 u32 flags;
669};
670
671/** Transfer Event bit fields **/
672#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
673
674/* Completion Code - only applicable for some types of TRBs */
675#define COMP_CODE_MASK (0xff << 24)
676#define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24)
677#define COMP_SUCCESS 1
678/* Data Buffer Error */
679#define COMP_DB_ERR 2
680/* Babble Detected Error */
681#define COMP_BABBLE 3
682/* USB Transaction Error */
683#define COMP_TX_ERR 4
684/* TRB Error - some TRB field is invalid */
685#define COMP_TRB_ERR 5
686/* Stall Error - USB device is stalled */
687#define COMP_STALL 6
688/* Resource Error - HC doesn't have memory for that device configuration */
689#define COMP_ENOMEM 7
690/* Bandwidth Error - not enough room in schedule for this dev config */
691#define COMP_BW_ERR 8
692/* No Slots Available Error - HC ran out of device slots */
693#define COMP_ENOSLOTS 9
694/* Invalid Stream Type Error */
695#define COMP_STREAM_ERR 10
696/* Slot Not Enabled Error - doorbell rung for disabled device slot */
697#define COMP_EBADSLT 11
698/* Endpoint Not Enabled Error */
699#define COMP_EBADEP 12
700/* Short Packet */
701#define COMP_SHORT_TX 13
702/* Ring Underrun - doorbell rung for an empty isoc OUT ep ring */
703#define COMP_UNDERRUN 14
704/* Ring Overrun - isoc IN ep ring is empty when ep is scheduled to RX */
705#define COMP_OVERRUN 15
706/* Virtual Function Event Ring Full Error */
707#define COMP_VF_FULL 16
708/* Parameter Error - Context parameter is invalid */
709#define COMP_EINVAL 17
710/* Bandwidth Overrun Error - isoc ep exceeded its allocated bandwidth */
711#define COMP_BW_OVER 18
712/* Context State Error - illegal context state transition requested */
713#define COMP_CTX_STATE 19
714/* No Ping Response Error - HC didn't get PING_RESPONSE in time to TX */
715#define COMP_PING_ERR 20
716/* Event Ring is full */
717#define COMP_ER_FULL 21
718/* Missed Service Error - HC couldn't service an isoc ep within interval */
719#define COMP_MISSED_INT 23
720/* Successfully stopped command ring */
721#define COMP_CMD_STOP 24
722/* Successfully aborted current command and stopped command ring */
723#define COMP_CMD_ABORT 25
724/* Stopped - transfer was terminated by a stop endpoint command */
725#define COMP_STOP 26
726/* Same as COMP_EP_STOPPED, but the transfered length in the event is invalid */
727#define COMP_STOP_INVAL 27
728/* Control Abort Error - Debug Capability - control pipe aborted */
729#define COMP_DBG_ABORT 28
730/* TRB type 29 and 30 reserved */
731/* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */
732#define COMP_BUFF_OVER 31
733/* Event Lost Error - xHC has an "internal event overrun condition" */
734#define COMP_ISSUES 32
735/* Undefined Error - reported when other error codes don't apply */
736#define COMP_UNKNOWN 33
737/* Invalid Stream ID Error */
738#define COMP_STRID_ERR 34
739/* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
740/* FIXME - check for this */
741#define COMP_2ND_BW_ERR 35
742/* Split Transaction Error */
743#define COMP_SPLIT_ERR 36
744
745struct xhci_link_trb {
746 /* 64-bit segment pointer*/
747 u32 segment_ptr[2];
748 u32 intr_target;
749 u32 control;
750};
751
752/* control bitfields */
753#define LINK_TOGGLE (0x1<<1)
754
755/* Command completion event TRB */
756struct xhci_event_cmd {
757 /* Pointer to command TRB, or the value passed by the event data trb */
758 u32 cmd_trb[2];
759 u32 status;
760 u32 flags;
761};
762
763/* flags bitmasks */
764/* bits 16:23 are the virtual function ID */
765/* bits 24:31 are the slot ID */
766#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24)
767#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24)
768
769/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
770#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1)
771#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16)
772
773
774/* Port Status Change Event TRB fields */
775/* Port ID - bits 31:24 */
776#define GET_PORT_ID(p) (((p) & (0xff << 24)) >> 24)
777
778/* Normal TRB fields */
779/* transfer_len bitmasks - bits 0:16 */
780#define TRB_LEN(p) ((p) & 0x1ffff)
781/* TD size - number of bytes remaining in the TD (including this TRB):
782 * bits 17 - 21. Shift the number of bytes by 10. */
783#define TD_REMAINDER(p) ((((p) >> 10) & 0x1f) << 17)
784/* Interrupter Target - which MSI-X vector to target the completion event at */
785#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
786#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
787
788/* Cycle bit - indicates TRB ownership by HC or HCD */
789#define TRB_CYCLE (1<<0)
790/*
791 * Force next event data TRB to be evaluated before task switch.
792 * Used to pass OS data back after a TD completes.
793 */
794#define TRB_ENT (1<<1)
795/* Interrupt on short packet */
796#define TRB_ISP (1<<2)
797/* Set PCIe no snoop attribute */
798#define TRB_NO_SNOOP (1<<3)
799/* Chain multiple TRBs into a TD */
800#define TRB_CHAIN (1<<4)
801/* Interrupt on completion */
802#define TRB_IOC (1<<5)
803/* The buffer pointer contains immediate data */
804#define TRB_IDT (1<<6)
805
806
807/* Control transfer TRB specific fields */
808#define TRB_DIR_IN (1<<16)
809
810struct xhci_generic_trb {
811 u32 field[4];
812};
813
814union xhci_trb {
815 struct xhci_link_trb link;
816 struct xhci_transfer_event trans_event;
817 struct xhci_event_cmd event_cmd;
818 struct xhci_generic_trb generic;
819};
820
821/* TRB bit mask */
822#define TRB_TYPE_BITMASK (0xfc00)
823#define TRB_TYPE(p) ((p) << 10)
824/* TRB type IDs */
825/* bulk, interrupt, isoc scatter/gather, and control data stage */
826#define TRB_NORMAL 1
827/* setup stage for control transfers */
828#define TRB_SETUP 2
829/* data stage for control transfers */
830#define TRB_DATA 3
831/* status stage for control transfers */
832#define TRB_STATUS 4
833/* isoc transfers */
834#define TRB_ISOC 5
835/* TRB for linking ring segments */
836#define TRB_LINK 6
837#define TRB_EVENT_DATA 7
838/* Transfer Ring No-op (not for the command ring) */
839#define TRB_TR_NOOP 8
840/* Command TRBs */
841/* Enable Slot Command */
842#define TRB_ENABLE_SLOT 9
843/* Disable Slot Command */
844#define TRB_DISABLE_SLOT 10
845/* Address Device Command */
846#define TRB_ADDR_DEV 11
847/* Configure Endpoint Command */
848#define TRB_CONFIG_EP 12
849/* Evaluate Context Command */
850#define TRB_EVAL_CONTEXT 13
851/* Reset Transfer Ring Command */
852#define TRB_RESET_RING 14
853/* Stop Transfer Ring Command */
854#define TRB_STOP_RING 15
855/* Set Transfer Ring Dequeue Pointer Command */
856#define TRB_SET_DEQ 16
857/* Reset Device Command */
858#define TRB_RESET_DEV 17
859/* Force Event Command (opt) */
860#define TRB_FORCE_EVENT 18
861/* Negotiate Bandwidth Command (opt) */
862#define TRB_NEG_BANDWIDTH 19
863/* Set Latency Tolerance Value Command (opt) */
864#define TRB_SET_LT 20
865/* Get port bandwidth Command */
866#define TRB_GET_BW 21
867/* Force Header Command - generate a transaction or link management packet */
868#define TRB_FORCE_HEADER 22
869/* No-op Command - not for transfer rings */
870#define TRB_CMD_NOOP 23
871/* TRB IDs 24-31 reserved */
872/* Event TRBS */
873/* Transfer Event */
874#define TRB_TRANSFER 32
875/* Command Completion Event */
876#define TRB_COMPLETION 33
877/* Port Status Change Event */
878#define TRB_PORT_STATUS 34
879/* Bandwidth Request Event (opt) */
880#define TRB_BANDWIDTH_EVENT 35
881/* Doorbell Event (opt) */
882#define TRB_DOORBELL 36
883/* Host Controller Event */
884#define TRB_HC_EVENT 37
885/* Device Notification Event - device sent function wake notification */
886#define TRB_DEV_NOTE 38
887/* MFINDEX Wrap Event - microframe counter wrapped */
888#define TRB_MFINDEX_WRAP 39
889/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
890
891/*
892 * TRBS_PER_SEGMENT must be a multiple of 4,
893 * since the command ring is 64-byte aligned.
894 * It must also be greater than 16.
895 */
896#define TRBS_PER_SEGMENT 64
897#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
898/* TRB buffer pointers can't cross 64KB boundaries */
899#define TRB_MAX_BUFF_SHIFT 16
900#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
901
902struct xhci_segment {
903 union xhci_trb *trbs;
904 /* private to HCD */
905 struct xhci_segment *next;
906 dma_addr_t dma;
907};
908
909struct xhci_td {
910 struct list_head td_list;
911 struct list_head cancelled_td_list;
912 struct urb *urb;
913 struct xhci_segment *start_seg;
914 union xhci_trb *first_trb;
915 union xhci_trb *last_trb;
916};
917
918struct xhci_ring {
919 struct xhci_segment *first_seg;
920 union xhci_trb *enqueue;
921 struct xhci_segment *enq_seg;
922 unsigned int enq_updates;
923 union xhci_trb *dequeue;
924 struct xhci_segment *deq_seg;
925 unsigned int deq_updates;
926 struct list_head td_list;
927 /* ---- Related to URB cancellation ---- */
928 struct list_head cancelled_td_list;
929 unsigned int cancels_pending;
930 unsigned int state;
931#define SET_DEQ_PENDING (1 << 0)
932 /* The TRB that was last reported in a stopped endpoint ring */
933 union xhci_trb *stopped_trb;
934 struct xhci_td *stopped_td;
935 /*
936 * Write the cycle state into the TRB cycle field to give ownership of
937 * the TRB to the host controller (if we are the producer), or to check
938 * if we own the TRB (if we are the consumer). See section 4.9.1.
939 */
940 u32 cycle_state;
941};
942
943struct xhci_erst_entry {
944 /* 64-bit event ring segment address */
945 u32 seg_addr[2];
946 u32 seg_size;
947 /* Set to zero */
948 u32 rsvd;
949};
950
951struct xhci_erst {
952 struct xhci_erst_entry *entries;
953 unsigned int num_entries;
954 /* xhci->event_ring keeps track of segment dma addresses */
955 dma_addr_t erst_dma_addr;
956 /* Num entries the ERST can contain */
957 unsigned int erst_size;
958};
959
960/*
961 * Each segment table entry is 4*32bits long. 1K seems like an ok size:
962 * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
963 * meaning 64 ring segments.
964 * Initial allocated size of the ERST, in number of entries */
965#define ERST_NUM_SEGS 1
966/* Initial allocated size of the ERST, in number of entries */
967#define ERST_SIZE 64
968/* Initial number of event segment rings allocated */
969#define ERST_ENTRIES 1
970/* Poll every 60 seconds */
971#define POLL_TIMEOUT 60
972/* XXX: Make these module parameters */
973
974
975/* There is one ehci_hci structure per controller */
976struct xhci_hcd {
977 /* glue to PCI and HCD framework */
978 struct xhci_cap_regs __iomem *cap_regs;
979 struct xhci_op_regs __iomem *op_regs;
980 struct xhci_run_regs __iomem *run_regs;
981 struct xhci_doorbell_array __iomem *dba;
982 /* Our HCD's current interrupter register set */
983 struct xhci_intr_reg __iomem *ir_set;
984
985 /* Cached register copies of read-only HC data */
986 __u32 hcs_params1;
987 __u32 hcs_params2;
988 __u32 hcs_params3;
989 __u32 hcc_params;
990
991 spinlock_t lock;
992
993 /* packed release number */
994 u8 sbrn;
995 u16 hci_version;
996 u8 max_slots;
997 u8 max_interrupters;
998 u8 max_ports;
999 u8 isoc_threshold;
1000 int event_ring_max;
1001 int addr_64;
1002 /* 4KB min, 128MB max */
1003 int page_size;
1004 /* Valid values are 12 to 20, inclusive */
1005 int page_shift;
1006 /* only one MSI vector for now, but might need more later */
1007 int msix_count;
1008 struct msix_entry *msix_entries;
1009 /* data structures */
1010 struct xhci_device_context_array *dcbaa;
1011 struct xhci_ring *cmd_ring;
1012 struct xhci_ring *event_ring;
1013 struct xhci_erst erst;
1014 /* slot enabling and address device helpers */
1015 struct completion addr_dev;
1016 int slot_id;
1017 /* Internal mirror of the HW's dcbaa */
1018 struct xhci_virt_device *devs[MAX_HC_SLOTS];
1019
1020 /* DMA pools */
1021 struct dma_pool *device_pool;
1022 struct dma_pool *segment_pool;
1023
1024#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
1025 /* Poll the rings - for debugging */
1026 struct timer_list event_ring_timer;
1027 int zombie;
1028#endif
1029 /* Statistics */
1030 int noops_submitted;
1031 int noops_handled;
1032 int error_bitmask;
1033};
1034
1035/* For testing purposes */
1036#define NUM_TEST_NOOPS 0
1037
1038/* convert between an HCD pointer and the corresponding EHCI_HCD */
1039static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
1040{
1041 return (struct xhci_hcd *) (hcd->hcd_priv);
1042}
1043
1044static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
1045{
1046 return container_of((void *) xhci, struct usb_hcd, hcd_priv);
1047}
1048
1049#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
1050#define XHCI_DEBUG 1
1051#else
1052#define XHCI_DEBUG 0
1053#endif
1054
1055#define xhci_dbg(xhci, fmt, args...) \
1056 do { if (XHCI_DEBUG) dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
1057#define xhci_info(xhci, fmt, args...) \
1058 do { if (XHCI_DEBUG) dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
1059#define xhci_err(xhci, fmt, args...) \
1060 dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
1061#define xhci_warn(xhci, fmt, args...) \
1062 dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
1063
1064/* TODO: copied from ehci.h - can be refactored? */
1065/* xHCI spec says all registers are little endian */
1066static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
1067 __u32 __iomem *regs)
1068{
1069 return readl(regs);
1070}
1071static inline void xhci_writel(struct xhci_hcd *xhci,
1072 const unsigned int val, __u32 __iomem *regs)
1073{
1074 if (!in_interrupt())
1075 xhci_dbg(xhci,
1076 "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
1077 regs, val);
1078 writel(val, regs);
1079}
1080
1081/* xHCI debugging */
1082void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
1083void xhci_print_registers(struct xhci_hcd *xhci);
1084void xhci_dbg_regs(struct xhci_hcd *xhci);
1085void xhci_print_run_regs(struct xhci_hcd *xhci);
1086void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb);
1087void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb);
1088void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg);
1089void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
1090void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
1091void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
1092void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
1093void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep);
1094
1095/* xHCI memory managment */
1096void xhci_mem_cleanup(struct xhci_hcd *xhci);
1097int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
1098void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
1099int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
1100int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
1101unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
1102unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
1103void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
1104int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
1105 struct usb_device *udev, struct usb_host_endpoint *ep,
1106 gfp_t mem_flags);
1107void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
1108
1109#ifdef CONFIG_PCI
1110/* xHCI PCI glue */
1111int xhci_register_pci(void);
1112void xhci_unregister_pci(void);
1113#endif
1114
1115/* xHCI host controller glue */
1116int xhci_halt(struct xhci_hcd *xhci);
1117int xhci_reset(struct xhci_hcd *xhci);
1118int xhci_init(struct usb_hcd *hcd);
1119int xhci_run(struct usb_hcd *hcd);
1120void xhci_stop(struct usb_hcd *hcd);
1121void xhci_shutdown(struct usb_hcd *hcd);
1122int xhci_get_frame(struct usb_hcd *hcd);
1123irqreturn_t xhci_irq(struct usb_hcd *hcd);
1124int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
1125void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
1126int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
1127int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
1128int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
1129int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1130int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
1131int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1132void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1133
1134/* xHCI ring, segment, TRB, and TD functions */
1135dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
1136void xhci_ring_cmd_db(struct xhci_hcd *xhci);
1137void *xhci_setup_one_noop(struct xhci_hcd *xhci);
1138void xhci_handle_event(struct xhci_hcd *xhci);
1139void xhci_set_hc_event_deq(struct xhci_hcd *xhci);
1140int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
1141int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1142 u32 slot_id);
1143int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
1144 unsigned int ep_index);
1145int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1146 int slot_id, unsigned int ep_index);
1147int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
1148 int slot_id, unsigned int ep_index);
1149int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
1150 u32 slot_id);
1151
1152/* xHCI roothub code */
1153int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
1154 char *buf, u16 wLength);
1155int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
1156
1157#endif /* __LINUX_XHCI_HCD_H */
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index a4ef77ef917d..3c5fe5cee05a 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -726,12 +726,18 @@ static const struct file_operations iowarrior_fops = {
726 .poll = iowarrior_poll, 726 .poll = iowarrior_poll,
727}; 727};
728 728
729static char *iowarrior_nodename(struct device *dev)
730{
731 return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
732}
733
729/* 734/*
730 * usb class driver info in order to get a minor number from the usb core, 735 * usb class driver info in order to get a minor number from the usb core,
731 * and to have the device registered with devfs and the driver core 736 * and to have the device registered with devfs and the driver core
732 */ 737 */
733static struct usb_class_driver iowarrior_class = { 738static struct usb_class_driver iowarrior_class = {
734 .name = "iowarrior%d", 739 .name = "iowarrior%d",
740 .nodename = iowarrior_nodename,
735 .fops = &iowarrior_fops, 741 .fops = &iowarrior_fops,
736 .minor_base = IOWARRIOR_MINOR_BASE, 742 .minor_base = IOWARRIOR_MINOR_BASE,
737}; 743};
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index ab0f3226158b..c1e2433f640d 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -266,12 +266,18 @@ static const struct file_operations tower_fops = {
266 .llseek = tower_llseek, 266 .llseek = tower_llseek,
267}; 267};
268 268
269static char *legousbtower_nodename(struct device *dev)
270{
271 return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
272}
273
269/* 274/*
270 * usb class driver info in order to get a minor number from the usb core, 275 * usb class driver info in order to get a minor number from the usb core,
271 * and to have the device registered with the driver core 276 * and to have the device registered with the driver core
272 */ 277 */
273static struct usb_class_driver tower_class = { 278static struct usb_class_driver tower_class = {
274 .name = "legousbtower%d", 279 .name = "legousbtower%d",
280 .nodename = legousbtower_nodename,
275 .fops = &tower_fops, 281 .fops = &tower_fops,
276 .minor_base = LEGO_USB_TOWER_MINOR_BASE, 282 .minor_base = LEGO_USB_TOWER_MINOR_BASE,
277}; 283};
diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig
index 7603cbe0865d..30ea7ca6846e 100644
--- a/drivers/usb/misc/sisusbvga/Kconfig
+++ b/drivers/usb/misc/sisusbvga/Kconfig
@@ -1,7 +1,7 @@
1 1
2config USB_SISUSBVGA 2config USB_SISUSBVGA
3 tristate "USB 2.0 SVGA dongle support (Net2280/SiS315)" 3 tristate "USB 2.0 SVGA dongle support (Net2280/SiS315)"
4 depends on USB && USB_EHCI_HCD 4 depends on USB && (USB_MUSB_HDRC || USB_EHCI_HCD)
5 ---help--- 5 ---help---
6 Say Y here if you intend to attach a USB2VGA dongle based on a 6 Say Y here if you intend to attach a USB2VGA dongle based on a
7 Net2280 and a SiS315 chip. 7 Net2280 and a SiS315 chip.
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 5f1a19d1497d..a9f06d76960f 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1072,23 +1072,34 @@ static int unlink1 (struct usbtest_dev *dev, int pipe, int size, int async)
1072 */ 1072 */
1073 msleep (jiffies % (2 * INTERRUPT_RATE)); 1073 msleep (jiffies % (2 * INTERRUPT_RATE));
1074 if (async) { 1074 if (async) {
1075retry: 1075 while (!completion_done(&completion)) {
1076 retval = usb_unlink_urb (urb); 1076 retval = usb_unlink_urb(urb);
1077 if (retval == -EBUSY || retval == -EIDRM) { 1077
1078 /* we can't unlink urbs while they're completing. 1078 switch (retval) {
1079 * or if they've completed, and we haven't resubmitted. 1079 case -EBUSY:
1080 * "normal" drivers would prevent resubmission, but 1080 case -EIDRM:
1081 * since we're testing unlink paths, we can't. 1081 /* we can't unlink urbs while they're completing
1082 */ 1082 * or if they've completed, and we haven't
1083 ERROR(dev, "unlink retry\n"); 1083 * resubmitted. "normal" drivers would prevent
1084 goto retry; 1084 * resubmission, but since we're testing unlink
1085 * paths, we can't.
1086 */
1087 ERROR(dev, "unlink retry\n");
1088 continue;
1089 case 0:
1090 case -EINPROGRESS:
1091 break;
1092
1093 default:
1094 dev_err(&dev->intf->dev,
1095 "unlink fail %d\n", retval);
1096 return retval;
1097 }
1098
1099 break;
1085 } 1100 }
1086 } else 1101 } else
1087 usb_kill_urb (urb); 1102 usb_kill_urb (urb);
1088 if (!(retval == 0 || retval == -EINPROGRESS)) {
1089 dev_err(&dev->intf->dev, "unlink fail %d\n", retval);
1090 return retval;
1091 }
1092 1103
1093 wait_for_completion (&completion); 1104 wait_for_completion (&completion);
1094 retval = urb->status; 1105 retval = urb->status;
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 1f715436d6d3..a7eb4c99342c 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -733,7 +733,7 @@ int __init mon_text_init(void)
733{ 733{
734 struct dentry *mondir; 734 struct dentry *mondir;
735 735
736 mondir = debugfs_create_dir("usbmon", NULL); 736 mondir = debugfs_create_dir("usbmon", usb_debug_root);
737 if (IS_ERR(mondir)) { 737 if (IS_ERR(mondir)) {
738 printk(KERN_NOTICE TAG ": debugfs is not available\n"); 738 printk(KERN_NOTICE TAG ": debugfs is not available\n");
739 return -ENODEV; 739 return -ENODEV;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index b66e8544d8b9..70073b157f0a 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -10,6 +10,7 @@ comment "Enable Host or Gadget support to see Inventra options"
10config USB_MUSB_HDRC 10config USB_MUSB_HDRC
11 depends on (USB || USB_GADGET) && HAVE_CLK 11 depends on (USB || USB_GADGET) && HAVE_CLK
12 depends on !SUPERH 12 depends on !SUPERH
13 select NOP_USB_XCEIV if ARCH_DAVINCI
13 select TWL4030_USB if MACH_OMAP_3430SDP 14 select TWL4030_USB if MACH_OMAP_3430SDP
14 select USB_OTG_UTILS 15 select USB_OTG_UTILS
15 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' 16 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
@@ -55,6 +56,7 @@ comment "Blackfin high speed USB Support"
55config USB_TUSB6010 56config USB_TUSB6010
56 boolean "TUSB 6010 support" 57 boolean "TUSB 6010 support"
57 depends on USB_MUSB_HDRC && !USB_MUSB_SOC 58 depends on USB_MUSB_HDRC && !USB_MUSB_SOC
59 select NOP_USB_XCEIV
58 default y 60 default y
59 help 61 help
60 The TUSB 6010 chip, from Texas Instruments, connects a discrete 62 The TUSB 6010 chip, from Texas Instruments, connects a discrete
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 786134852092..f2f66ebc7362 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -143,7 +143,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
143 u16 val; 143 u16 val;
144 144
145 spin_lock_irqsave(&musb->lock, flags); 145 spin_lock_irqsave(&musb->lock, flags);
146 switch (musb->xceiv.state) { 146 switch (musb->xceiv->state) {
147 case OTG_STATE_A_IDLE: 147 case OTG_STATE_A_IDLE:
148 case OTG_STATE_A_WAIT_BCON: 148 case OTG_STATE_A_WAIT_BCON:
149 /* Start a new session */ 149 /* Start a new session */
@@ -154,7 +154,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
154 val = musb_readw(musb->mregs, MUSB_DEVCTL); 154 val = musb_readw(musb->mregs, MUSB_DEVCTL);
155 if (!(val & MUSB_DEVCTL_BDEVICE)) { 155 if (!(val & MUSB_DEVCTL_BDEVICE)) {
156 gpio_set_value(musb->config->gpio_vrsel, 1); 156 gpio_set_value(musb->config->gpio_vrsel, 1);
157 musb->xceiv.state = OTG_STATE_A_WAIT_BCON; 157 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
158 } else { 158 } else {
159 gpio_set_value(musb->config->gpio_vrsel, 0); 159 gpio_set_value(musb->config->gpio_vrsel, 0);
160 160
@@ -247,6 +247,11 @@ int __init musb_platform_init(struct musb *musb)
247 } 247 }
248 gpio_direction_output(musb->config->gpio_vrsel, 0); 248 gpio_direction_output(musb->config->gpio_vrsel, 0);
249 249
250 usb_nop_xceiv_register();
251 musb->xceiv = otg_get_transceiver();
252 if (!musb->xceiv)
253 return -ENODEV;
254
250 if (ANOMALY_05000346) { 255 if (ANOMALY_05000346) {
251 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); 256 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
252 SSYNC(); 257 SSYNC();
@@ -291,7 +296,7 @@ int __init musb_platform_init(struct musb *musb)
291 musb_conn_timer_handler, (unsigned long) musb); 296 musb_conn_timer_handler, (unsigned long) musb);
292 } 297 }
293 if (is_peripheral_enabled(musb)) 298 if (is_peripheral_enabled(musb))
294 musb->xceiv.set_power = bfin_set_power; 299 musb->xceiv->set_power = bfin_set_power;
295 300
296 musb->isr = blackfin_interrupt; 301 musb->isr = blackfin_interrupt;
297 302
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 1976e9b41800..c3577bbbae6c 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -6,6 +6,7 @@
6 * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci. 6 * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
7 */ 7 */
8 8
9#include <linux/platform_device.h>
9#include <linux/usb.h> 10#include <linux/usb.h>
10 11
11#include "musb_core.h" 12#include "musb_core.h"
@@ -1145,17 +1146,27 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
1145 return completed; 1146 return completed;
1146} 1147}
1147 1148
1148void cppi_completion(struct musb *musb, u32 rx, u32 tx) 1149irqreturn_t cppi_interrupt(int irq, void *dev_id)
1149{ 1150{
1150 void __iomem *tibase; 1151 struct musb *musb = dev_id;
1151 int i, index;
1152 struct cppi *cppi; 1152 struct cppi *cppi;
1153 void __iomem *tibase;
1153 struct musb_hw_ep *hw_ep = NULL; 1154 struct musb_hw_ep *hw_ep = NULL;
1155 u32 rx, tx;
1156 int i, index;
1154 1157
1155 cppi = container_of(musb->dma_controller, struct cppi, controller); 1158 cppi = container_of(musb->dma_controller, struct cppi, controller);
1156 1159
1157 tibase = musb->ctrl_base; 1160 tibase = musb->ctrl_base;
1158 1161
1162 tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
1163 rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
1164
1165 if (!tx && !rx)
1166 return IRQ_NONE;
1167
1168 DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
1169
1159 /* process TX channels */ 1170 /* process TX channels */
1160 for (index = 0; tx; tx = tx >> 1, index++) { 1171 for (index = 0; tx; tx = tx >> 1, index++) {
1161 struct cppi_channel *tx_ch; 1172 struct cppi_channel *tx_ch;
@@ -1273,6 +1284,8 @@ void cppi_completion(struct musb *musb, u32 rx, u32 tx)
1273 1284
1274 /* write to CPPI EOI register to re-enable interrupts */ 1285 /* write to CPPI EOI register to re-enable interrupts */
1275 musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0); 1286 musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
1287
1288 return IRQ_HANDLED;
1276} 1289}
1277 1290
1278/* Instantiate a software object representing a DMA controller. */ 1291/* Instantiate a software object representing a DMA controller. */
@@ -1280,6 +1293,9 @@ struct dma_controller *__init
1280dma_controller_create(struct musb *musb, void __iomem *mregs) 1293dma_controller_create(struct musb *musb, void __iomem *mregs)
1281{ 1294{
1282 struct cppi *controller; 1295 struct cppi *controller;
1296 struct device *dev = musb->controller;
1297 struct platform_device *pdev = to_platform_device(dev);
1298 int irq = platform_get_irq(pdev, 1);
1283 1299
1284 controller = kzalloc(sizeof *controller, GFP_KERNEL); 1300 controller = kzalloc(sizeof *controller, GFP_KERNEL);
1285 if (!controller) 1301 if (!controller)
@@ -1310,6 +1326,15 @@ dma_controller_create(struct musb *musb, void __iomem *mregs)
1310 return NULL; 1326 return NULL;
1311 } 1327 }
1312 1328
1329 if (irq > 0) {
1330 if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) {
1331 dev_err(dev, "request_irq %d failed!\n", irq);
1332 dma_controller_destroy(&controller->controller);
1333 return NULL;
1334 }
1335 controller->irq = irq;
1336 }
1337
1313 return &controller->controller; 1338 return &controller->controller;
1314} 1339}
1315 1340
@@ -1322,6 +1347,9 @@ void dma_controller_destroy(struct dma_controller *c)
1322 1347
1323 cppi = container_of(c, struct cppi, controller); 1348 cppi = container_of(c, struct cppi, controller);
1324 1349
1350 if (cppi->irq)
1351 free_irq(cppi->irq, cppi->musb);
1352
1325 /* assert: caller stopped the controller first */ 1353 /* assert: caller stopped the controller first */
1326 dma_pool_destroy(cppi->pool); 1354 dma_pool_destroy(cppi->pool);
1327 1355
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
index 729b4071787b..8a39de3e6e47 100644
--- a/drivers/usb/musb/cppi_dma.h
+++ b/drivers/usb/musb/cppi_dma.h
@@ -119,6 +119,8 @@ struct cppi {
119 void __iomem *mregs; /* Mentor regs */ 119 void __iomem *mregs; /* Mentor regs */
120 void __iomem *tibase; /* TI/CPPI regs */ 120 void __iomem *tibase; /* TI/CPPI regs */
121 121
122 int irq;
123
122 struct cppi_channel tx[4]; 124 struct cppi_channel tx[4];
123 struct cppi_channel rx[4]; 125 struct cppi_channel rx[4];
124 126
@@ -127,7 +129,7 @@ struct cppi {
127 struct list_head tx_complete; 129 struct list_head tx_complete;
128}; 130};
129 131
130/* irq handling hook */ 132/* CPPI IRQ handler */
131extern void cppi_completion(struct musb *, u32 rx, u32 tx); 133extern irqreturn_t cppi_interrupt(int, void *);
132 134
133#endif /* end of ifndef _CPPI_DMA_H_ */ 135#endif /* end of ifndef _CPPI_DMA_H_ */
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 10d11ab113ab..180d7daa4099 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -215,7 +215,7 @@ static void otg_timer(unsigned long _musb)
215 DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb)); 215 DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb));
216 216
217 spin_lock_irqsave(&musb->lock, flags); 217 spin_lock_irqsave(&musb->lock, flags);
218 switch (musb->xceiv.state) { 218 switch (musb->xceiv->state) {
219 case OTG_STATE_A_WAIT_VFALL: 219 case OTG_STATE_A_WAIT_VFALL:
220 /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL 220 /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL
221 * seems to mis-handle session "start" otherwise (or in our 221 * seems to mis-handle session "start" otherwise (or in our
@@ -226,7 +226,7 @@ static void otg_timer(unsigned long _musb)
226 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 226 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
227 break; 227 break;
228 } 228 }
229 musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; 229 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
230 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, 230 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
231 MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT); 231 MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT);
232 break; 232 break;
@@ -251,7 +251,7 @@ static void otg_timer(unsigned long _musb)
251 if (devctl & MUSB_DEVCTL_BDEVICE) 251 if (devctl & MUSB_DEVCTL_BDEVICE)
252 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 252 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
253 else 253 else
254 musb->xceiv.state = OTG_STATE_A_IDLE; 254 musb->xceiv->state = OTG_STATE_A_IDLE;
255 break; 255 break;
256 default: 256 default:
257 break; 257 break;
@@ -265,6 +265,7 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
265 irqreturn_t retval = IRQ_NONE; 265 irqreturn_t retval = IRQ_NONE;
266 struct musb *musb = __hci; 266 struct musb *musb = __hci;
267 void __iomem *tibase = musb->ctrl_base; 267 void __iomem *tibase = musb->ctrl_base;
268 struct cppi *cppi;
268 u32 tmp; 269 u32 tmp;
269 270
270 spin_lock_irqsave(&musb->lock, flags); 271 spin_lock_irqsave(&musb->lock, flags);
@@ -281,16 +282,9 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
281 /* CPPI interrupts share the same IRQ line, but have their own 282 /* CPPI interrupts share the same IRQ line, but have their own
282 * mask, state, "vector", and EOI registers. 283 * mask, state, "vector", and EOI registers.
283 */ 284 */
284 if (is_cppi_enabled()) { 285 cppi = container_of(musb->dma_controller, struct cppi, controller);
285 u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); 286 if (is_cppi_enabled() && musb->dma_controller && !cppi->irq)
286 u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); 287 retval = cppi_interrupt(irq, __hci);
287
288 if (cppi_tx || cppi_rx) {
289 DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx);
290 cppi_completion(musb, cppi_rx, cppi_tx);
291 retval = IRQ_HANDLED;
292 }
293 }
294 288
295 /* ack and handle non-CPPI interrupts */ 289 /* ack and handle non-CPPI interrupts */
296 tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG); 290 tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG);
@@ -331,21 +325,21 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
331 * to stop registering in devctl. 325 * to stop registering in devctl.
332 */ 326 */
333 musb->int_usb &= ~MUSB_INTR_VBUSERROR; 327 musb->int_usb &= ~MUSB_INTR_VBUSERROR;
334 musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; 328 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
335 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 329 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
336 WARNING("VBUS error workaround (delay coming)\n"); 330 WARNING("VBUS error workaround (delay coming)\n");
337 } else if (is_host_enabled(musb) && drvvbus) { 331 } else if (is_host_enabled(musb) && drvvbus) {
338 musb->is_active = 1; 332 musb->is_active = 1;
339 MUSB_HST_MODE(musb); 333 MUSB_HST_MODE(musb);
340 musb->xceiv.default_a = 1; 334 musb->xceiv->default_a = 1;
341 musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; 335 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
342 portstate(musb->port1_status |= USB_PORT_STAT_POWER); 336 portstate(musb->port1_status |= USB_PORT_STAT_POWER);
343 del_timer(&otg_workaround); 337 del_timer(&otg_workaround);
344 } else { 338 } else {
345 musb->is_active = 0; 339 musb->is_active = 0;
346 MUSB_DEV_MODE(musb); 340 MUSB_DEV_MODE(musb);
347 musb->xceiv.default_a = 0; 341 musb->xceiv->default_a = 0;
348 musb->xceiv.state = OTG_STATE_B_IDLE; 342 musb->xceiv->state = OTG_STATE_B_IDLE;
349 portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); 343 portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
350 } 344 }
351 345
@@ -367,17 +361,12 @@ static irqreturn_t davinci_interrupt(int irq, void *__hci)
367 361
368 /* poll for ID change */ 362 /* poll for ID change */
369 if (is_otg_enabled(musb) 363 if (is_otg_enabled(musb)
370 && musb->xceiv.state == OTG_STATE_B_IDLE) 364 && musb->xceiv->state == OTG_STATE_B_IDLE)
371 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 365 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
372 366
373 spin_unlock_irqrestore(&musb->lock, flags); 367 spin_unlock_irqrestore(&musb->lock, flags);
374 368
375 /* REVISIT we sometimes get unhandled IRQs 369 return retval;
376 * (e.g. ep0). not clear why...
377 */
378 if (retval != IRQ_HANDLED)
379 DBG(5, "unhandled? %08x\n", tmp);
380 return IRQ_HANDLED;
381} 370}
382 371
383int musb_platform_set_mode(struct musb *musb, u8 mode) 372int musb_platform_set_mode(struct musb *musb, u8 mode)
@@ -391,6 +380,11 @@ int __init musb_platform_init(struct musb *musb)
391 void __iomem *tibase = musb->ctrl_base; 380 void __iomem *tibase = musb->ctrl_base;
392 u32 revision; 381 u32 revision;
393 382
383 usb_nop_xceiv_register();
384 musb->xceiv = otg_get_transceiver();
385 if (!musb->xceiv)
386 return -ENODEV;
387
394 musb->mregs += DAVINCI_BASE_OFFSET; 388 musb->mregs += DAVINCI_BASE_OFFSET;
395 389
396 clk_enable(musb->clock); 390 clk_enable(musb->clock);
@@ -398,7 +392,7 @@ int __init musb_platform_init(struct musb *musb)
398 /* returns zero if e.g. not clocked */ 392 /* returns zero if e.g. not clocked */
399 revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG); 393 revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
400 if (revision == 0) 394 if (revision == 0)
401 return -ENODEV; 395 goto fail;
402 396
403 if (is_host_enabled(musb)) 397 if (is_host_enabled(musb))
404 setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); 398 setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
@@ -432,6 +426,10 @@ int __init musb_platform_init(struct musb *musb)
432 426
433 musb->isr = davinci_interrupt; 427 musb->isr = davinci_interrupt;
434 return 0; 428 return 0;
429
430fail:
431 usb_nop_xceiv_unregister();
432 return -ENODEV;
435} 433}
436 434
437int musb_platform_exit(struct musb *musb) 435int musb_platform_exit(struct musb *musb)
@@ -442,7 +440,7 @@ int musb_platform_exit(struct musb *musb)
442 davinci_source_power(musb, 0 /*off*/, 1); 440 davinci_source_power(musb, 0 /*off*/, 1);
443 441
444 /* delay, to avoid problems with module reload */ 442 /* delay, to avoid problems with module reload */
445 if (is_host_enabled(musb) && musb->xceiv.default_a) { 443 if (is_host_enabled(musb) && musb->xceiv->default_a) {
446 int maxdelay = 30; 444 int maxdelay = 30;
447 u8 devctl, warn = 0; 445 u8 devctl, warn = 0;
448 446
@@ -471,5 +469,7 @@ int musb_platform_exit(struct musb *musb)
471 469
472 clk_disable(musb->clock); 470 clk_disable(musb->clock);
473 471
472 usb_nop_xceiv_unregister();
473
474 return 0; 474 return 0;
475} 475}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 4000cf6d1e81..554a414f65d1 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -112,6 +112,7 @@
112#include "davinci.h" 112#include "davinci.h"
113#endif 113#endif
114 114
115#define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
115 116
116 117
117unsigned musb_debug; 118unsigned musb_debug;
@@ -267,7 +268,7 @@ void musb_load_testpacket(struct musb *musb)
267 268
268const char *otg_state_string(struct musb *musb) 269const char *otg_state_string(struct musb *musb)
269{ 270{
270 switch (musb->xceiv.state) { 271 switch (musb->xceiv->state) {
271 case OTG_STATE_A_IDLE: return "a_idle"; 272 case OTG_STATE_A_IDLE: return "a_idle";
272 case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise"; 273 case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise";
273 case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon"; 274 case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon";
@@ -288,12 +289,6 @@ const char *otg_state_string(struct musb *musb)
288#ifdef CONFIG_USB_MUSB_OTG 289#ifdef CONFIG_USB_MUSB_OTG
289 290
290/* 291/*
291 * See also USB_OTG_1-3.pdf 6.6.5 Timers
292 * REVISIT: Are the other timers done in the hardware?
293 */
294#define TB_ASE0_BRST 100 /* Min 3.125 ms */
295
296/*
297 * Handles OTG hnp timeouts, such as b_ase0_brst 292 * Handles OTG hnp timeouts, such as b_ase0_brst
298 */ 293 */
299void musb_otg_timer_func(unsigned long data) 294void musb_otg_timer_func(unsigned long data)
@@ -302,16 +297,18 @@ void musb_otg_timer_func(unsigned long data)
302 unsigned long flags; 297 unsigned long flags;
303 298
304 spin_lock_irqsave(&musb->lock, flags); 299 spin_lock_irqsave(&musb->lock, flags);
305 switch (musb->xceiv.state) { 300 switch (musb->xceiv->state) {
306 case OTG_STATE_B_WAIT_ACON: 301 case OTG_STATE_B_WAIT_ACON:
307 DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n"); 302 DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n");
308 musb_g_disconnect(musb); 303 musb_g_disconnect(musb);
309 musb->xceiv.state = OTG_STATE_B_PERIPHERAL; 304 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
310 musb->is_active = 0; 305 musb->is_active = 0;
311 break; 306 break;
307 case OTG_STATE_A_SUSPEND:
312 case OTG_STATE_A_WAIT_BCON: 308 case OTG_STATE_A_WAIT_BCON:
313 DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n"); 309 DBG(1, "HNP: %s timeout\n", otg_state_string(musb));
314 musb_hnp_stop(musb); 310 musb_set_vbus(musb, 0);
311 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
315 break; 312 break;
316 default: 313 default:
317 DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb)); 314 DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb));
@@ -320,10 +317,8 @@ void musb_otg_timer_func(unsigned long data)
320 spin_unlock_irqrestore(&musb->lock, flags); 317 spin_unlock_irqrestore(&musb->lock, flags);
321} 318}
322 319
323static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0);
324
325/* 320/*
326 * Stops the B-device HNP state. Caller must take care of locking. 321 * Stops the HNP transition. Caller must take care of locking.
327 */ 322 */
328void musb_hnp_stop(struct musb *musb) 323void musb_hnp_stop(struct musb *musb)
329{ 324{
@@ -331,20 +326,17 @@ void musb_hnp_stop(struct musb *musb)
331 void __iomem *mbase = musb->mregs; 326 void __iomem *mbase = musb->mregs;
332 u8 reg; 327 u8 reg;
333 328
334 switch (musb->xceiv.state) { 329 DBG(1, "HNP: stop from %s\n", otg_state_string(musb));
330
331 switch (musb->xceiv->state) {
335 case OTG_STATE_A_PERIPHERAL: 332 case OTG_STATE_A_PERIPHERAL:
336 case OTG_STATE_A_WAIT_VFALL:
337 case OTG_STATE_A_WAIT_BCON:
338 DBG(1, "HNP: Switching back to A-host\n");
339 musb_g_disconnect(musb); 333 musb_g_disconnect(musb);
340 musb->xceiv.state = OTG_STATE_A_IDLE; 334 DBG(1, "HNP: back to %s\n", otg_state_string(musb));
341 MUSB_HST_MODE(musb);
342 musb->is_active = 0;
343 break; 335 break;
344 case OTG_STATE_B_HOST: 336 case OTG_STATE_B_HOST:
345 DBG(1, "HNP: Disabling HR\n"); 337 DBG(1, "HNP: Disabling HR\n");
346 hcd->self.is_b_host = 0; 338 hcd->self.is_b_host = 0;
347 musb->xceiv.state = OTG_STATE_B_PERIPHERAL; 339 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
348 MUSB_DEV_MODE(musb); 340 MUSB_DEV_MODE(musb);
349 reg = musb_readb(mbase, MUSB_POWER); 341 reg = musb_readb(mbase, MUSB_POWER);
350 reg |= MUSB_POWER_SUSPENDM; 342 reg |= MUSB_POWER_SUSPENDM;
@@ -402,7 +394,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
402 394
403 if (devctl & MUSB_DEVCTL_HM) { 395 if (devctl & MUSB_DEVCTL_HM) {
404#ifdef CONFIG_USB_MUSB_HDRC_HCD 396#ifdef CONFIG_USB_MUSB_HDRC_HCD
405 switch (musb->xceiv.state) { 397 switch (musb->xceiv->state) {
406 case OTG_STATE_A_SUSPEND: 398 case OTG_STATE_A_SUSPEND:
407 /* remote wakeup? later, GetPortStatus 399 /* remote wakeup? later, GetPortStatus
408 * will stop RESUME signaling 400 * will stop RESUME signaling
@@ -425,12 +417,12 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
425 musb->rh_timer = jiffies 417 musb->rh_timer = jiffies
426 + msecs_to_jiffies(20); 418 + msecs_to_jiffies(20);
427 419
428 musb->xceiv.state = OTG_STATE_A_HOST; 420 musb->xceiv->state = OTG_STATE_A_HOST;
429 musb->is_active = 1; 421 musb->is_active = 1;
430 usb_hcd_resume_root_hub(musb_to_hcd(musb)); 422 usb_hcd_resume_root_hub(musb_to_hcd(musb));
431 break; 423 break;
432 case OTG_STATE_B_WAIT_ACON: 424 case OTG_STATE_B_WAIT_ACON:
433 musb->xceiv.state = OTG_STATE_B_PERIPHERAL; 425 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
434 musb->is_active = 1; 426 musb->is_active = 1;
435 MUSB_DEV_MODE(musb); 427 MUSB_DEV_MODE(musb);
436 break; 428 break;
@@ -441,11 +433,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
441 } 433 }
442#endif 434#endif
443 } else { 435 } else {
444 switch (musb->xceiv.state) { 436 switch (musb->xceiv->state) {
445#ifdef CONFIG_USB_MUSB_HDRC_HCD 437#ifdef CONFIG_USB_MUSB_HDRC_HCD
446 case OTG_STATE_A_SUSPEND: 438 case OTG_STATE_A_SUSPEND:
447 /* possibly DISCONNECT is upcoming */ 439 /* possibly DISCONNECT is upcoming */
448 musb->xceiv.state = OTG_STATE_A_HOST; 440 musb->xceiv->state = OTG_STATE_A_HOST;
449 usb_hcd_resume_root_hub(musb_to_hcd(musb)); 441 usb_hcd_resume_root_hub(musb_to_hcd(musb));
450 break; 442 break;
451#endif 443#endif
@@ -490,7 +482,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
490 */ 482 */
491 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); 483 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
492 musb->ep0_stage = MUSB_EP0_START; 484 musb->ep0_stage = MUSB_EP0_START;
493 musb->xceiv.state = OTG_STATE_A_IDLE; 485 musb->xceiv->state = OTG_STATE_A_IDLE;
494 MUSB_HST_MODE(musb); 486 MUSB_HST_MODE(musb);
495 musb_set_vbus(musb, 1); 487 musb_set_vbus(musb, 1);
496 488
@@ -516,7 +508,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
516 * REVISIT: do delays from lots of DEBUG_KERNEL checks 508 * REVISIT: do delays from lots of DEBUG_KERNEL checks
517 * make trouble here, keeping VBUS < 4.4V ? 509 * make trouble here, keeping VBUS < 4.4V ?
518 */ 510 */
519 switch (musb->xceiv.state) { 511 switch (musb->xceiv->state) {
520 case OTG_STATE_A_HOST: 512 case OTG_STATE_A_HOST:
521 /* recovery is dicey once we've gotten past the 513 /* recovery is dicey once we've gotten past the
522 * initial stages of enumeration, but if VBUS 514 * initial stages of enumeration, but if VBUS
@@ -594,37 +586,40 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
594 if (devctl & MUSB_DEVCTL_LSDEV) 586 if (devctl & MUSB_DEVCTL_LSDEV)
595 musb->port1_status |= USB_PORT_STAT_LOW_SPEED; 587 musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
596 588
597 if (hcd->status_urb)
598 usb_hcd_poll_rh_status(hcd);
599 else
600 usb_hcd_resume_root_hub(hcd);
601
602 MUSB_HST_MODE(musb);
603
604 /* indicate new connection to OTG machine */ 589 /* indicate new connection to OTG machine */
605 switch (musb->xceiv.state) { 590 switch (musb->xceiv->state) {
606 case OTG_STATE_B_PERIPHERAL: 591 case OTG_STATE_B_PERIPHERAL:
607 if (int_usb & MUSB_INTR_SUSPEND) { 592 if (int_usb & MUSB_INTR_SUSPEND) {
608 DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n"); 593 DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n");
609 musb->xceiv.state = OTG_STATE_B_HOST;
610 hcd->self.is_b_host = 1;
611 int_usb &= ~MUSB_INTR_SUSPEND; 594 int_usb &= ~MUSB_INTR_SUSPEND;
595 goto b_host;
612 } else 596 } else
613 DBG(1, "CONNECT as b_peripheral???\n"); 597 DBG(1, "CONNECT as b_peripheral???\n");
614 break; 598 break;
615 case OTG_STATE_B_WAIT_ACON: 599 case OTG_STATE_B_WAIT_ACON:
616 DBG(1, "HNP: Waiting to switch to b_host state\n"); 600 DBG(1, "HNP: CONNECT, now b_host\n");
617 musb->xceiv.state = OTG_STATE_B_HOST; 601b_host:
602 musb->xceiv->state = OTG_STATE_B_HOST;
618 hcd->self.is_b_host = 1; 603 hcd->self.is_b_host = 1;
604 musb->ignore_disconnect = 0;
605 del_timer(&musb->otg_timer);
619 break; 606 break;
620 default: 607 default:
621 if ((devctl & MUSB_DEVCTL_VBUS) 608 if ((devctl & MUSB_DEVCTL_VBUS)
622 == (3 << MUSB_DEVCTL_VBUS_SHIFT)) { 609 == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
623 musb->xceiv.state = OTG_STATE_A_HOST; 610 musb->xceiv->state = OTG_STATE_A_HOST;
624 hcd->self.is_b_host = 0; 611 hcd->self.is_b_host = 0;
625 } 612 }
626 break; 613 break;
627 } 614 }
615
616 /* poke the root hub */
617 MUSB_HST_MODE(musb);
618 if (hcd->status_urb)
619 usb_hcd_poll_rh_status(hcd);
620 else
621 usb_hcd_resume_root_hub(hcd);
622
628 DBG(1, "CONNECT (%s) devctl %02x\n", 623 DBG(1, "CONNECT (%s) devctl %02x\n",
629 otg_state_string(musb), devctl); 624 otg_state_string(musb), devctl);
630 } 625 }
@@ -650,7 +645,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
650 } 645 }
651 } else if (is_peripheral_capable()) { 646 } else if (is_peripheral_capable()) {
652 DBG(1, "BUS RESET as %s\n", otg_state_string(musb)); 647 DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
653 switch (musb->xceiv.state) { 648 switch (musb->xceiv->state) {
654#ifdef CONFIG_USB_OTG 649#ifdef CONFIG_USB_OTG
655 case OTG_STATE_A_SUSPEND: 650 case OTG_STATE_A_SUSPEND:
656 /* We need to ignore disconnect on suspend 651 /* We need to ignore disconnect on suspend
@@ -661,24 +656,27 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
661 musb_g_reset(musb); 656 musb_g_reset(musb);
662 /* FALLTHROUGH */ 657 /* FALLTHROUGH */
663 case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */ 658 case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
664 DBG(1, "HNP: Setting timer as %s\n", 659 /* never use invalid T(a_wait_bcon) */
665 otg_state_string(musb)); 660 DBG(1, "HNP: in %s, %d msec timeout\n",
666 musb_otg_timer.data = (unsigned long)musb; 661 otg_state_string(musb),
667 mod_timer(&musb_otg_timer, jiffies 662 TA_WAIT_BCON(musb));
668 + msecs_to_jiffies(100)); 663 mod_timer(&musb->otg_timer, jiffies
664 + msecs_to_jiffies(TA_WAIT_BCON(musb)));
669 break; 665 break;
670 case OTG_STATE_A_PERIPHERAL: 666 case OTG_STATE_A_PERIPHERAL:
671 musb_hnp_stop(musb); 667 musb->ignore_disconnect = 0;
668 del_timer(&musb->otg_timer);
669 musb_g_reset(musb);
672 break; 670 break;
673 case OTG_STATE_B_WAIT_ACON: 671 case OTG_STATE_B_WAIT_ACON:
674 DBG(1, "HNP: RESET (%s), to b_peripheral\n", 672 DBG(1, "HNP: RESET (%s), to b_peripheral\n",
675 otg_state_string(musb)); 673 otg_state_string(musb));
676 musb->xceiv.state = OTG_STATE_B_PERIPHERAL; 674 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
677 musb_g_reset(musb); 675 musb_g_reset(musb);
678 break; 676 break;
679#endif 677#endif
680 case OTG_STATE_B_IDLE: 678 case OTG_STATE_B_IDLE:
681 musb->xceiv.state = OTG_STATE_B_PERIPHERAL; 679 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
682 /* FALLTHROUGH */ 680 /* FALLTHROUGH */
683 case OTG_STATE_B_PERIPHERAL: 681 case OTG_STATE_B_PERIPHERAL:
684 musb_g_reset(musb); 682 musb_g_reset(musb);
@@ -763,7 +761,7 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
763 MUSB_MODE(musb), devctl); 761 MUSB_MODE(musb), devctl);
764 handled = IRQ_HANDLED; 762 handled = IRQ_HANDLED;
765 763
766 switch (musb->xceiv.state) { 764 switch (musb->xceiv->state) {
767#ifdef CONFIG_USB_MUSB_HDRC_HCD 765#ifdef CONFIG_USB_MUSB_HDRC_HCD
768 case OTG_STATE_A_HOST: 766 case OTG_STATE_A_HOST:
769 case OTG_STATE_A_SUSPEND: 767 case OTG_STATE_A_SUSPEND:
@@ -776,7 +774,16 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
776#endif /* HOST */ 774#endif /* HOST */
777#ifdef CONFIG_USB_MUSB_OTG 775#ifdef CONFIG_USB_MUSB_OTG
778 case OTG_STATE_B_HOST: 776 case OTG_STATE_B_HOST:
779 musb_hnp_stop(musb); 777 /* REVISIT this behaves for "real disconnect"
778 * cases; make sure the other transitions from
779 * from B_HOST act right too. The B_HOST code
780 * in hnp_stop() is currently not used...
781 */
782 musb_root_disconnect(musb);
783 musb_to_hcd(musb)->self.is_b_host = 0;
784 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
785 MUSB_DEV_MODE(musb);
786 musb_g_disconnect(musb);
780 break; 787 break;
781 case OTG_STATE_A_PERIPHERAL: 788 case OTG_STATE_A_PERIPHERAL:
782 musb_hnp_stop(musb); 789 musb_hnp_stop(musb);
@@ -805,26 +812,35 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
805 otg_state_string(musb), devctl, power); 812 otg_state_string(musb), devctl, power);
806 handled = IRQ_HANDLED; 813 handled = IRQ_HANDLED;
807 814
808 switch (musb->xceiv.state) { 815 switch (musb->xceiv->state) {
809#ifdef CONFIG_USB_MUSB_OTG 816#ifdef CONFIG_USB_MUSB_OTG
810 case OTG_STATE_A_PERIPHERAL: 817 case OTG_STATE_A_PERIPHERAL:
811 /* 818 /* We also come here if the cable is removed, since
812 * We cannot stop HNP here, devctl BDEVICE might be 819 * this silicon doesn't report ID-no-longer-grounded.
813 * still set. 820 *
821 * We depend on T(a_wait_bcon) to shut us down, and
822 * hope users don't do anything dicey during this
823 * undesired detour through A_WAIT_BCON.
814 */ 824 */
825 musb_hnp_stop(musb);
826 usb_hcd_resume_root_hub(musb_to_hcd(musb));
827 musb_root_disconnect(musb);
828 musb_platform_try_idle(musb, jiffies
829 + msecs_to_jiffies(musb->a_wait_bcon
830 ? : OTG_TIME_A_WAIT_BCON));
815 break; 831 break;
816#endif 832#endif
817 case OTG_STATE_B_PERIPHERAL: 833 case OTG_STATE_B_PERIPHERAL:
818 musb_g_suspend(musb); 834 musb_g_suspend(musb);
819 musb->is_active = is_otg_enabled(musb) 835 musb->is_active = is_otg_enabled(musb)
820 && musb->xceiv.gadget->b_hnp_enable; 836 && musb->xceiv->gadget->b_hnp_enable;
821 if (musb->is_active) { 837 if (musb->is_active) {
822#ifdef CONFIG_USB_MUSB_OTG 838#ifdef CONFIG_USB_MUSB_OTG
823 musb->xceiv.state = OTG_STATE_B_WAIT_ACON; 839 musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
824 DBG(1, "HNP: Setting timer for b_ase0_brst\n"); 840 DBG(1, "HNP: Setting timer for b_ase0_brst\n");
825 musb_otg_timer.data = (unsigned long)musb; 841 mod_timer(&musb->otg_timer, jiffies
826 mod_timer(&musb_otg_timer, jiffies 842 + msecs_to_jiffies(
827 + msecs_to_jiffies(TB_ASE0_BRST)); 843 OTG_TIME_B_ASE0_BRST));
828#endif 844#endif
829 } 845 }
830 break; 846 break;
@@ -834,9 +850,9 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
834 + msecs_to_jiffies(musb->a_wait_bcon)); 850 + msecs_to_jiffies(musb->a_wait_bcon));
835 break; 851 break;
836 case OTG_STATE_A_HOST: 852 case OTG_STATE_A_HOST:
837 musb->xceiv.state = OTG_STATE_A_SUSPEND; 853 musb->xceiv->state = OTG_STATE_A_SUSPEND;
838 musb->is_active = is_otg_enabled(musb) 854 musb->is_active = is_otg_enabled(musb)
839 && musb->xceiv.host->b_hnp_enable; 855 && musb->xceiv->host->b_hnp_enable;
840 break; 856 break;
841 case OTG_STATE_B_HOST: 857 case OTG_STATE_B_HOST:
842 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */ 858 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
@@ -1068,14 +1084,13 @@ static struct fifo_cfg __initdata mode_4_cfg[] = {
1068{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, }, 1084{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
1069{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, }, 1085{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
1070{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, }, 1086{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
1071{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 512, }, 1087{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 256, },
1072{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 512, }, 1088{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 64, },
1073{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 512, }, 1089{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 256, },
1074{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 512, }, 1090{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 64, },
1075{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 512, }, 1091{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 256, },
1076{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 512, }, 1092{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 64, },
1077{ .hw_ep_num = 13, .style = FIFO_TX, .maxpacket = 512, }, 1093{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
1078{ .hw_ep_num = 13, .style = FIFO_RX, .maxpacket = 512, },
1079{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, }, 1094{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1080{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, }, 1095{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1081}; 1096};
@@ -1335,11 +1350,11 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1335 } 1350 }
1336 if (reg & MUSB_CONFIGDATA_HBRXE) { 1351 if (reg & MUSB_CONFIGDATA_HBRXE) {
1337 strcat(aInfo, ", HB-ISO Rx"); 1352 strcat(aInfo, ", HB-ISO Rx");
1338 strcat(aInfo, " (X)"); /* no driver support */ 1353 musb->hb_iso_rx = true;
1339 } 1354 }
1340 if (reg & MUSB_CONFIGDATA_HBTXE) { 1355 if (reg & MUSB_CONFIGDATA_HBTXE) {
1341 strcat(aInfo, ", HB-ISO Tx"); 1356 strcat(aInfo, ", HB-ISO Tx");
1342 strcat(aInfo, " (X)"); /* no driver support */ 1357 musb->hb_iso_tx = true;
1343 } 1358 }
1344 if (reg & MUSB_CONFIGDATA_SOFTCONE) 1359 if (reg & MUSB_CONFIGDATA_SOFTCONE)
1345 strcat(aInfo, ", SoftConn"); 1360 strcat(aInfo, ", SoftConn");
@@ -1481,13 +1496,7 @@ static irqreturn_t generic_interrupt(int irq, void *__hci)
1481 1496
1482 spin_unlock_irqrestore(&musb->lock, flags); 1497 spin_unlock_irqrestore(&musb->lock, flags);
1483 1498
1484 /* REVISIT we sometimes get spurious IRQs on g_ep0 1499 return retval;
1485 * not clear why...
1486 */
1487 if (retval != IRQ_HANDLED)
1488 DBG(5, "spurious?\n");
1489
1490 return IRQ_HANDLED;
1491} 1500}
1492 1501
1493#else 1502#else
@@ -1687,8 +1696,9 @@ musb_vbus_store(struct device *dev, struct device_attribute *attr,
1687 } 1696 }
1688 1697
1689 spin_lock_irqsave(&musb->lock, flags); 1698 spin_lock_irqsave(&musb->lock, flags);
1690 musb->a_wait_bcon = val; 1699 /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
1691 if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON) 1700 musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
1701 if (musb->xceiv->state == OTG_STATE_A_WAIT_BCON)
1692 musb->is_active = 0; 1702 musb->is_active = 0;
1693 musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val)); 1703 musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
1694 spin_unlock_irqrestore(&musb->lock, flags); 1704 spin_unlock_irqrestore(&musb->lock, flags);
@@ -1706,10 +1716,13 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1706 1716
1707 spin_lock_irqsave(&musb->lock, flags); 1717 spin_lock_irqsave(&musb->lock, flags);
1708 val = musb->a_wait_bcon; 1718 val = musb->a_wait_bcon;
1719 /* FIXME get_vbus_status() is normally #defined as false...
1720 * and is effectively TUSB-specific.
1721 */
1709 vbus = musb_platform_get_vbus_status(musb); 1722 vbus = musb_platform_get_vbus_status(musb);
1710 spin_unlock_irqrestore(&musb->lock, flags); 1723 spin_unlock_irqrestore(&musb->lock, flags);
1711 1724
1712 return sprintf(buf, "Vbus %s, timeout %lu\n", 1725 return sprintf(buf, "Vbus %s, timeout %lu msec\n",
1713 vbus ? "on" : "off", val); 1726 vbus ? "on" : "off", val);
1714} 1727}
1715static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store); 1728static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
@@ -1749,8 +1762,8 @@ static void musb_irq_work(struct work_struct *data)
1749 struct musb *musb = container_of(data, struct musb, irq_work); 1762 struct musb *musb = container_of(data, struct musb, irq_work);
1750 static int old_state; 1763 static int old_state;
1751 1764
1752 if (musb->xceiv.state != old_state) { 1765 if (musb->xceiv->state != old_state) {
1753 old_state = musb->xceiv.state; 1766 old_state = musb->xceiv->state;
1754 sysfs_notify(&musb->controller->kobj, NULL, "mode"); 1767 sysfs_notify(&musb->controller->kobj, NULL, "mode");
1755 } 1768 }
1756} 1769}
@@ -1782,6 +1795,7 @@ allocate_instance(struct device *dev,
1782 hcd->uses_new_polling = 1; 1795 hcd->uses_new_polling = 1;
1783 1796
1784 musb->vbuserr_retry = VBUSERR_RETRY_COUNT; 1797 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
1798 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
1785#else 1799#else
1786 musb = kzalloc(sizeof *musb, GFP_KERNEL); 1800 musb = kzalloc(sizeof *musb, GFP_KERNEL);
1787 if (!musb) 1801 if (!musb)
@@ -1847,7 +1861,7 @@ static void musb_free(struct musb *musb)
1847 } 1861 }
1848 1862
1849#ifdef CONFIG_USB_MUSB_OTG 1863#ifdef CONFIG_USB_MUSB_OTG
1850 put_device(musb->xceiv.dev); 1864 put_device(musb->xceiv->dev);
1851#endif 1865#endif
1852 1866
1853#ifdef CONFIG_USB_MUSB_HDRC_HCD 1867#ifdef CONFIG_USB_MUSB_HDRC_HCD
@@ -1928,10 +1942,18 @@ bad_config:
1928 } 1942 }
1929 } 1943 }
1930 1944
1931 /* assume vbus is off */ 1945 /* The musb_platform_init() call:
1932 1946 * - adjusts musb->mregs and musb->isr if needed,
1933 /* platform adjusts musb->mregs and musb->isr if needed, 1947 * - may initialize an integrated tranceiver
1934 * and activates clocks 1948 * - initializes musb->xceiv, usually by otg_get_transceiver()
1949 * - activates clocks.
1950 * - stops powering VBUS
1951 * - assigns musb->board_set_vbus if host mode is enabled
1952 *
1953 * There are various transciever configurations. Blackfin,
1954 * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
1955 * external/discrete ones in various flavors (twl4030 family,
1956 * isp1504, non-OTG, etc) mostly hooking up through ULPI.
1935 */ 1957 */
1936 musb->isr = generic_interrupt; 1958 musb->isr = generic_interrupt;
1937 status = musb_platform_init(musb); 1959 status = musb_platform_init(musb);
@@ -1968,6 +1990,10 @@ bad_config:
1968 if (status < 0) 1990 if (status < 0)
1969 goto fail2; 1991 goto fail2;
1970 1992
1993#ifdef CONFIG_USB_OTG
1994 setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
1995#endif
1996
1971 /* Init IRQ workqueue before request_irq */ 1997 /* Init IRQ workqueue before request_irq */
1972 INIT_WORK(&musb->irq_work, musb_irq_work); 1998 INIT_WORK(&musb->irq_work, musb_irq_work);
1973 1999
@@ -1999,17 +2025,17 @@ bad_config:
1999 ? "DMA" : "PIO", 2025 ? "DMA" : "PIO",
2000 musb->nIrq); 2026 musb->nIrq);
2001 2027
2002#ifdef CONFIG_USB_MUSB_HDRC_HCD 2028 /* host side needs more setup */
2003 /* host side needs more setup, except for no-host modes */ 2029 if (is_host_enabled(musb)) {
2004 if (musb->board_mode != MUSB_PERIPHERAL) {
2005 struct usb_hcd *hcd = musb_to_hcd(musb); 2030 struct usb_hcd *hcd = musb_to_hcd(musb);
2006 2031
2007 if (musb->board_mode == MUSB_OTG) 2032 otg_set_host(musb->xceiv, &hcd->self);
2033
2034 if (is_otg_enabled(musb))
2008 hcd->self.otg_port = 1; 2035 hcd->self.otg_port = 1;
2009 musb->xceiv.host = &hcd->self; 2036 musb->xceiv->host = &hcd->self;
2010 hcd->power_budget = 2 * (plat->power ? : 250); 2037 hcd->power_budget = 2 * (plat->power ? : 250);
2011 } 2038 }
2012#endif /* CONFIG_USB_MUSB_HDRC_HCD */
2013 2039
2014 /* For the host-only role, we can activate right away. 2040 /* For the host-only role, we can activate right away.
2015 * (We expect the ID pin to be forcibly grounded!!) 2041 * (We expect the ID pin to be forcibly grounded!!)
@@ -2017,8 +2043,8 @@ bad_config:
2017 */ 2043 */
2018 if (!is_otg_enabled(musb) && is_host_enabled(musb)) { 2044 if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
2019 MUSB_HST_MODE(musb); 2045 MUSB_HST_MODE(musb);
2020 musb->xceiv.default_a = 1; 2046 musb->xceiv->default_a = 1;
2021 musb->xceiv.state = OTG_STATE_A_IDLE; 2047 musb->xceiv->state = OTG_STATE_A_IDLE;
2022 2048
2023 status = usb_add_hcd(musb_to_hcd(musb), -1, 0); 2049 status = usb_add_hcd(musb_to_hcd(musb), -1, 0);
2024 if (status) 2050 if (status)
@@ -2033,8 +2059,8 @@ bad_config:
2033 2059
2034 } else /* peripheral is enabled */ { 2060 } else /* peripheral is enabled */ {
2035 MUSB_DEV_MODE(musb); 2061 MUSB_DEV_MODE(musb);
2036 musb->xceiv.default_a = 0; 2062 musb->xceiv->default_a = 0;
2037 musb->xceiv.state = OTG_STATE_B_IDLE; 2063 musb->xceiv->state = OTG_STATE_B_IDLE;
2038 2064
2039 status = musb_gadget_setup(musb); 2065 status = musb_gadget_setup(musb);
2040 if (status) 2066 if (status)
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index efb39b5e55b5..f3772ca3b2cf 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -40,6 +40,7 @@
40#include <linux/interrupt.h> 40#include <linux/interrupt.h>
41#include <linux/smp_lock.h> 41#include <linux/smp_lock.h>
42#include <linux/errno.h> 42#include <linux/errno.h>
43#include <linux/timer.h>
43#include <linux/clk.h> 44#include <linux/clk.h>
44#include <linux/device.h> 45#include <linux/device.h>
45#include <linux/usb/ch9.h> 46#include <linux/usb/ch9.h>
@@ -171,7 +172,8 @@ enum musb_h_ep0_state {
171 172
172/* peripheral side ep0 states */ 173/* peripheral side ep0 states */
173enum musb_g_ep0_state { 174enum musb_g_ep0_state {
174 MUSB_EP0_STAGE_SETUP, /* idle, waiting for setup */ 175 MUSB_EP0_STAGE_IDLE, /* idle, waiting for SETUP */
176 MUSB_EP0_STAGE_SETUP, /* received SETUP */
175 MUSB_EP0_STAGE_TX, /* IN data */ 177 MUSB_EP0_STAGE_TX, /* IN data */
176 MUSB_EP0_STAGE_RX, /* OUT data */ 178 MUSB_EP0_STAGE_RX, /* OUT data */
177 MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */ 179 MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */
@@ -179,10 +181,15 @@ enum musb_g_ep0_state {
179 MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */ 181 MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */
180} __attribute__ ((packed)); 182} __attribute__ ((packed));
181 183
182/* OTG protocol constants */ 184/*
185 * OTG protocol constants. See USB OTG 1.3 spec,
186 * sections 5.5 "Device Timings" and 6.6.5 "Timers".
187 */
183#define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */ 188#define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */
184#define OTG_TIME_A_WAIT_BCON 0 /* 0=infinite; min 1000 msec */ 189#define OTG_TIME_A_WAIT_BCON 1100 /* min 1 second */
185#define OTG_TIME_A_IDLE_BDIS 200 /* msec (min) */ 190#define OTG_TIME_A_AIDL_BDIS 200 /* min 200 msec */
191#define OTG_TIME_B_ASE0_BRST 100 /* min 3.125 ms */
192
186 193
187/*************************** REGISTER ACCESS ********************************/ 194/*************************** REGISTER ACCESS ********************************/
188 195
@@ -331,6 +338,8 @@ struct musb {
331 struct list_head control; /* of musb_qh */ 338 struct list_head control; /* of musb_qh */
332 struct list_head in_bulk; /* of musb_qh */ 339 struct list_head in_bulk; /* of musb_qh */
333 struct list_head out_bulk; /* of musb_qh */ 340 struct list_head out_bulk; /* of musb_qh */
341
342 struct timer_list otg_timer;
334#endif 343#endif
335 344
336 /* called with IRQs blocked; ON/nonzero implies starting a session, 345 /* called with IRQs blocked; ON/nonzero implies starting a session,
@@ -355,7 +364,7 @@ struct musb {
355 u16 int_rx; 364 u16 int_rx;
356 u16 int_tx; 365 u16 int_tx;
357 366
358 struct otg_transceiver xceiv; 367 struct otg_transceiver *xceiv;
359 368
360 int nIrq; 369 int nIrq;
361 unsigned irq_wake:1; 370 unsigned irq_wake:1;
@@ -386,6 +395,9 @@ struct musb {
386 unsigned is_multipoint:1; 395 unsigned is_multipoint:1;
387 unsigned ignore_disconnect:1; /* during bus resets */ 396 unsigned ignore_disconnect:1; /* during bus resets */
388 397
398 unsigned hb_iso_rx:1; /* high bandwidth iso rx? */
399 unsigned hb_iso_tx:1; /* high bandwidth iso tx? */
400
389#ifdef C_MP_TX 401#ifdef C_MP_TX
390 unsigned bulk_split:1; 402 unsigned bulk_split:1;
391#define can_bulk_split(musb,type) \ 403#define can_bulk_split(musb,type) \
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index f79440cdfe7e..8b3c4e2ed7b8 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -310,7 +310,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
310 /* setup DMA, then program endpoint CSR */ 310 /* setup DMA, then program endpoint CSR */
311 request_size = min(request->length, 311 request_size = min(request->length,
312 musb_ep->dma->max_len); 312 musb_ep->dma->max_len);
313 if (request_size <= musb_ep->packet_sz) 313 if (request_size < musb_ep->packet_sz)
314 musb_ep->dma->desired_mode = 0; 314 musb_ep->dma->desired_mode = 0;
315 else 315 else
316 musb_ep->dma->desired_mode = 1; 316 musb_ep->dma->desired_mode = 1;
@@ -349,7 +349,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
349#elif defined(CONFIG_USB_TI_CPPI_DMA) 349#elif defined(CONFIG_USB_TI_CPPI_DMA)
350 /* program endpoint CSR first, then setup DMA */ 350 /* program endpoint CSR first, then setup DMA */
351 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); 351 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
352 csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB; 352 csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
353 MUSB_TXCSR_MODE;
353 musb_writew(epio, MUSB_TXCSR, 354 musb_writew(epio, MUSB_TXCSR,
354 (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) 355 (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
355 | csr); 356 | csr);
@@ -1405,7 +1406,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
1405 1406
1406 spin_lock_irqsave(&musb->lock, flags); 1407 spin_lock_irqsave(&musb->lock, flags);
1407 1408
1408 switch (musb->xceiv.state) { 1409 switch (musb->xceiv->state) {
1409 case OTG_STATE_B_PERIPHERAL: 1410 case OTG_STATE_B_PERIPHERAL:
1410 /* NOTE: OTG state machine doesn't include B_SUSPENDED; 1411 /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1411 * that's part of the standard usb 1.1 state machine, and 1412 * that's part of the standard usb 1.1 state machine, and
@@ -1507,9 +1508,9 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1507{ 1508{
1508 struct musb *musb = gadget_to_musb(gadget); 1509 struct musb *musb = gadget_to_musb(gadget);
1509 1510
1510 if (!musb->xceiv.set_power) 1511 if (!musb->xceiv->set_power)
1511 return -EOPNOTSUPP; 1512 return -EOPNOTSUPP;
1512 return otg_set_power(&musb->xceiv, mA); 1513 return otg_set_power(musb->xceiv, mA);
1513} 1514}
1514 1515
1515static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) 1516static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
@@ -1732,11 +1733,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1732 1733
1733 spin_lock_irqsave(&musb->lock, flags); 1734 spin_lock_irqsave(&musb->lock, flags);
1734 1735
1735 /* REVISIT always use otg_set_peripheral(), handling 1736 otg_set_peripheral(musb->xceiv, &musb->g);
1736 * issues including the root hub one below ...
1737 */
1738 musb->xceiv.gadget = &musb->g;
1739 musb->xceiv.state = OTG_STATE_B_IDLE;
1740 musb->is_active = 1; 1737 musb->is_active = 1;
1741 1738
1742 /* FIXME this ignores the softconnect flag. Drivers are 1739 /* FIXME this ignores the softconnect flag. Drivers are
@@ -1748,6 +1745,8 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1748 if (!is_otg_enabled(musb)) 1745 if (!is_otg_enabled(musb))
1749 musb_start(musb); 1746 musb_start(musb);
1750 1747
1748 otg_set_peripheral(musb->xceiv, &musb->g);
1749
1751 spin_unlock_irqrestore(&musb->lock, flags); 1750 spin_unlock_irqrestore(&musb->lock, flags);
1752 1751
1753 if (is_otg_enabled(musb)) { 1752 if (is_otg_enabled(musb)) {
@@ -1761,8 +1760,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1761 if (retval < 0) { 1760 if (retval < 0) {
1762 DBG(1, "add_hcd failed, %d\n", retval); 1761 DBG(1, "add_hcd failed, %d\n", retval);
1763 spin_lock_irqsave(&musb->lock, flags); 1762 spin_lock_irqsave(&musb->lock, flags);
1764 musb->xceiv.gadget = NULL; 1763 otg_set_peripheral(musb->xceiv, NULL);
1765 musb->xceiv.state = OTG_STATE_UNDEFINED;
1766 musb->gadget_driver = NULL; 1764 musb->gadget_driver = NULL;
1767 musb->g.dev.driver = NULL; 1765 musb->g.dev.driver = NULL;
1768 spin_unlock_irqrestore(&musb->lock, flags); 1766 spin_unlock_irqrestore(&musb->lock, flags);
@@ -1845,8 +1843,9 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1845 1843
1846 (void) musb_gadget_vbus_draw(&musb->g, 0); 1844 (void) musb_gadget_vbus_draw(&musb->g, 0);
1847 1845
1848 musb->xceiv.state = OTG_STATE_UNDEFINED; 1846 musb->xceiv->state = OTG_STATE_UNDEFINED;
1849 stop_activity(musb, driver); 1847 stop_activity(musb, driver);
1848 otg_set_peripheral(musb->xceiv, NULL);
1850 1849
1851 DBG(3, "unregistering driver %s\n", driver->function); 1850 DBG(3, "unregistering driver %s\n", driver->function);
1852 spin_unlock_irqrestore(&musb->lock, flags); 1851 spin_unlock_irqrestore(&musb->lock, flags);
@@ -1882,7 +1881,7 @@ EXPORT_SYMBOL(usb_gadget_unregister_driver);
1882void musb_g_resume(struct musb *musb) 1881void musb_g_resume(struct musb *musb)
1883{ 1882{
1884 musb->is_suspended = 0; 1883 musb->is_suspended = 0;
1885 switch (musb->xceiv.state) { 1884 switch (musb->xceiv->state) {
1886 case OTG_STATE_B_IDLE: 1885 case OTG_STATE_B_IDLE:
1887 break; 1886 break;
1888 case OTG_STATE_B_WAIT_ACON: 1887 case OTG_STATE_B_WAIT_ACON:
@@ -1908,10 +1907,10 @@ void musb_g_suspend(struct musb *musb)
1908 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 1907 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1909 DBG(3, "devctl %02x\n", devctl); 1908 DBG(3, "devctl %02x\n", devctl);
1910 1909
1911 switch (musb->xceiv.state) { 1910 switch (musb->xceiv->state) {
1912 case OTG_STATE_B_IDLE: 1911 case OTG_STATE_B_IDLE:
1913 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 1912 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
1914 musb->xceiv.state = OTG_STATE_B_PERIPHERAL; 1913 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
1915 break; 1914 break;
1916 case OTG_STATE_B_PERIPHERAL: 1915 case OTG_STATE_B_PERIPHERAL:
1917 musb->is_suspended = 1; 1916 musb->is_suspended = 1;
@@ -1957,22 +1956,24 @@ void musb_g_disconnect(struct musb *musb)
1957 spin_lock(&musb->lock); 1956 spin_lock(&musb->lock);
1958 } 1957 }
1959 1958
1960 switch (musb->xceiv.state) { 1959 switch (musb->xceiv->state) {
1961 default: 1960 default:
1962#ifdef CONFIG_USB_MUSB_OTG 1961#ifdef CONFIG_USB_MUSB_OTG
1963 DBG(2, "Unhandled disconnect %s, setting a_idle\n", 1962 DBG(2, "Unhandled disconnect %s, setting a_idle\n",
1964 otg_state_string(musb)); 1963 otg_state_string(musb));
1965 musb->xceiv.state = OTG_STATE_A_IDLE; 1964 musb->xceiv->state = OTG_STATE_A_IDLE;
1965 MUSB_HST_MODE(musb);
1966 break; 1966 break;
1967 case OTG_STATE_A_PERIPHERAL: 1967 case OTG_STATE_A_PERIPHERAL:
1968 musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; 1968 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
1969 MUSB_HST_MODE(musb);
1969 break; 1970 break;
1970 case OTG_STATE_B_WAIT_ACON: 1971 case OTG_STATE_B_WAIT_ACON:
1971 case OTG_STATE_B_HOST: 1972 case OTG_STATE_B_HOST:
1972#endif 1973#endif
1973 case OTG_STATE_B_PERIPHERAL: 1974 case OTG_STATE_B_PERIPHERAL:
1974 case OTG_STATE_B_IDLE: 1975 case OTG_STATE_B_IDLE:
1975 musb->xceiv.state = OTG_STATE_B_IDLE; 1976 musb->xceiv->state = OTG_STATE_B_IDLE;
1976 break; 1977 break;
1977 case OTG_STATE_B_SRP_INIT: 1978 case OTG_STATE_B_SRP_INIT:
1978 break; 1979 break;
@@ -2028,10 +2029,10 @@ __acquires(musb->lock)
2028 * or else after HNP, as A-Device 2029 * or else after HNP, as A-Device
2029 */ 2030 */
2030 if (devctl & MUSB_DEVCTL_BDEVICE) { 2031 if (devctl & MUSB_DEVCTL_BDEVICE) {
2031 musb->xceiv.state = OTG_STATE_B_PERIPHERAL; 2032 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2032 musb->g.is_a_peripheral = 0; 2033 musb->g.is_a_peripheral = 0;
2033 } else if (is_otg_enabled(musb)) { 2034 } else if (is_otg_enabled(musb)) {
2034 musb->xceiv.state = OTG_STATE_A_PERIPHERAL; 2035 musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
2035 musb->g.is_a_peripheral = 1; 2036 musb->g.is_a_peripheral = 1;
2036 } else 2037 } else
2037 WARN_ON(1); 2038 WARN_ON(1);
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 3f5e30ddfa27..40ed50ecedff 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -4,6 +4,7 @@
4 * Copyright 2005 Mentor Graphics Corporation 4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments 5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation 6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
7 * 8 *
8 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
@@ -58,7 +59,8 @@
58static char *decode_ep0stage(u8 stage) 59static char *decode_ep0stage(u8 stage)
59{ 60{
60 switch (stage) { 61 switch (stage) {
61 case MUSB_EP0_STAGE_SETUP: return "idle"; 62 case MUSB_EP0_STAGE_IDLE: return "idle";
63 case MUSB_EP0_STAGE_SETUP: return "setup";
62 case MUSB_EP0_STAGE_TX: return "in"; 64 case MUSB_EP0_STAGE_TX: return "in";
63 case MUSB_EP0_STAGE_RX: return "out"; 65 case MUSB_EP0_STAGE_RX: return "out";
64 case MUSB_EP0_STAGE_ACKWAIT: return "wait"; 66 case MUSB_EP0_STAGE_ACKWAIT: return "wait";
@@ -628,7 +630,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
628 musb_writew(regs, MUSB_CSR0, 630 musb_writew(regs, MUSB_CSR0,
629 csr & ~MUSB_CSR0_P_SENTSTALL); 631 csr & ~MUSB_CSR0_P_SENTSTALL);
630 retval = IRQ_HANDLED; 632 retval = IRQ_HANDLED;
631 musb->ep0_state = MUSB_EP0_STAGE_SETUP; 633 musb->ep0_state = MUSB_EP0_STAGE_IDLE;
632 csr = musb_readw(regs, MUSB_CSR0); 634 csr = musb_readw(regs, MUSB_CSR0);
633 } 635 }
634 636
@@ -636,7 +638,18 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
636 if (csr & MUSB_CSR0_P_SETUPEND) { 638 if (csr & MUSB_CSR0_P_SETUPEND) {
637 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND); 639 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
638 retval = IRQ_HANDLED; 640 retval = IRQ_HANDLED;
639 musb->ep0_state = MUSB_EP0_STAGE_SETUP; 641 /* Transition into the early status phase */
642 switch (musb->ep0_state) {
643 case MUSB_EP0_STAGE_TX:
644 musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
645 break;
646 case MUSB_EP0_STAGE_RX:
647 musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
648 break;
649 default:
650 ERR("SetupEnd came in a wrong ep0stage %s",
651 decode_ep0stage(musb->ep0_state));
652 }
640 csr = musb_readw(regs, MUSB_CSR0); 653 csr = musb_readw(regs, MUSB_CSR0);
641 /* NOTE: request may need completion */ 654 /* NOTE: request may need completion */
642 } 655 }
@@ -697,11 +710,31 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
697 if (req) 710 if (req)
698 musb_g_ep0_giveback(musb, req); 711 musb_g_ep0_giveback(musb, req);
699 } 712 }
713
714 /*
715 * In case when several interrupts can get coalesced,
716 * check to see if we've already received a SETUP packet...
717 */
718 if (csr & MUSB_CSR0_RXPKTRDY)
719 goto setup;
720
721 retval = IRQ_HANDLED;
722 musb->ep0_state = MUSB_EP0_STAGE_IDLE;
723 break;
724
725 case MUSB_EP0_STAGE_IDLE:
726 /*
727 * This state is typically (but not always) indiscernible
728 * from the status states since the corresponding interrupts
729 * tend to happen within too little period of time (with only
730 * a zero-length packet in between) and so get coalesced...
731 */
700 retval = IRQ_HANDLED; 732 retval = IRQ_HANDLED;
701 musb->ep0_state = MUSB_EP0_STAGE_SETUP; 733 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
702 /* FALLTHROUGH */ 734 /* FALLTHROUGH */
703 735
704 case MUSB_EP0_STAGE_SETUP: 736 case MUSB_EP0_STAGE_SETUP:
737setup:
705 if (csr & MUSB_CSR0_RXPKTRDY) { 738 if (csr & MUSB_CSR0_RXPKTRDY) {
706 struct usb_ctrlrequest setup; 739 struct usb_ctrlrequest setup;
707 int handled = 0; 740 int handled = 0;
@@ -783,7 +816,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
783stall: 816stall:
784 DBG(3, "stall (%d)\n", handled); 817 DBG(3, "stall (%d)\n", handled);
785 musb->ackpend |= MUSB_CSR0_P_SENDSTALL; 818 musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
786 musb->ep0_state = MUSB_EP0_STAGE_SETUP; 819 musb->ep0_state = MUSB_EP0_STAGE_IDLE;
787finish: 820finish:
788 musb_writew(regs, MUSB_CSR0, 821 musb_writew(regs, MUSB_CSR0,
789 musb->ackpend); 822 musb->ackpend);
@@ -803,7 +836,7 @@ finish:
803 /* "can't happen" */ 836 /* "can't happen" */
804 WARN_ON(1); 837 WARN_ON(1);
805 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL); 838 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
806 musb->ep0_state = MUSB_EP0_STAGE_SETUP; 839 musb->ep0_state = MUSB_EP0_STAGE_IDLE;
807 break; 840 break;
808 } 841 }
809 842
@@ -959,7 +992,7 @@ static int musb_g_ep0_halt(struct usb_ep *e, int value)
959 992
960 csr |= MUSB_CSR0_P_SENDSTALL; 993 csr |= MUSB_CSR0_P_SENDSTALL;
961 musb_writew(regs, MUSB_CSR0, csr); 994 musb_writew(regs, MUSB_CSR0, csr);
962 musb->ep0_state = MUSB_EP0_STAGE_SETUP; 995 musb->ep0_state = MUSB_EP0_STAGE_IDLE;
963 musb->ackpend = 0; 996 musb->ackpend = 0;
964 break; 997 break;
965 default: 998 default:
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index db1b57415ec7..94a2a350a414 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -181,6 +181,19 @@ static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
181 musb_writew(ep->regs, MUSB_TXCSR, txcsr); 181 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
182} 182}
183 183
184static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
185{
186 if (is_in != 0 || ep->is_shared_fifo)
187 ep->in_qh = qh;
188 if (is_in == 0 || ep->is_shared_fifo)
189 ep->out_qh = qh;
190}
191
192static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
193{
194 return is_in ? ep->in_qh : ep->out_qh;
195}
196
184/* 197/*
185 * Start the URB at the front of an endpoint's queue 198 * Start the URB at the front of an endpoint's queue
186 * end must be claimed from the caller. 199 * end must be claimed from the caller.
@@ -210,7 +223,6 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
210 case USB_ENDPOINT_XFER_CONTROL: 223 case USB_ENDPOINT_XFER_CONTROL:
211 /* control transfers always start with SETUP */ 224 /* control transfers always start with SETUP */
212 is_in = 0; 225 is_in = 0;
213 hw_ep->out_qh = qh;
214 musb->ep0_stage = MUSB_EP0_START; 226 musb->ep0_stage = MUSB_EP0_START;
215 buf = urb->setup_packet; 227 buf = urb->setup_packet;
216 len = 8; 228 len = 8;
@@ -239,10 +251,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
239 epnum, buf + offset, len); 251 epnum, buf + offset, len);
240 252
241 /* Configure endpoint */ 253 /* Configure endpoint */
242 if (is_in || hw_ep->is_shared_fifo) 254 musb_ep_set_qh(hw_ep, is_in, qh);
243 hw_ep->in_qh = qh;
244 else
245 hw_ep->out_qh = qh;
246 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len); 255 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
247 256
248 /* transmit may have more work: start it when it is time */ 257 /* transmit may have more work: start it when it is time */
@@ -286,9 +295,8 @@ start:
286 } 295 }
287} 296}
288 297
289/* caller owns controller lock, irqs are blocked */ 298/* Context: caller owns controller lock, IRQs are blocked */
290static void 299static void musb_giveback(struct musb *musb, struct urb *urb, int status)
291__musb_giveback(struct musb *musb, struct urb *urb, int status)
292__releases(musb->lock) 300__releases(musb->lock)
293__acquires(musb->lock) 301__acquires(musb->lock)
294{ 302{
@@ -321,60 +329,57 @@ __acquires(musb->lock)
321 spin_lock(&musb->lock); 329 spin_lock(&musb->lock);
322} 330}
323 331
324/* for bulk/interrupt endpoints only */ 332/* For bulk/interrupt endpoints only */
325static inline void 333static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
326musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb) 334 struct urb *urb)
327{ 335{
328 struct usb_device *udev = urb->dev; 336 void __iomem *epio = qh->hw_ep->regs;
329 u16 csr; 337 u16 csr;
330 void __iomem *epio = ep->regs;
331 struct musb_qh *qh;
332 338
333 /* FIXME: the current Mentor DMA code seems to have 339 /*
340 * FIXME: the current Mentor DMA code seems to have
334 * problems getting toggle correct. 341 * problems getting toggle correct.
335 */ 342 */
336 343
337 if (is_in || ep->is_shared_fifo) 344 if (is_in)
338 qh = ep->in_qh; 345 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
339 else 346 else
340 qh = ep->out_qh; 347 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
341 348
342 if (!is_in) { 349 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
343 csr = musb_readw(epio, MUSB_TXCSR);
344 usb_settoggle(udev, qh->epnum, 1,
345 (csr & MUSB_TXCSR_H_DATATOGGLE)
346 ? 1 : 0);
347 } else {
348 csr = musb_readw(epio, MUSB_RXCSR);
349 usb_settoggle(udev, qh->epnum, 0,
350 (csr & MUSB_RXCSR_H_DATATOGGLE)
351 ? 1 : 0);
352 }
353} 350}
354 351
355/* caller owns controller lock, irqs are blocked */ 352/*
356static struct musb_qh * 353 * Advance this hardware endpoint's queue, completing the specified URB and
357musb_giveback(struct musb_qh *qh, struct urb *urb, int status) 354 * advancing to either the next URB queued to that qh, or else invalidating
355 * that qh and advancing to the next qh scheduled after the current one.
356 *
357 * Context: caller owns controller lock, IRQs are blocked
358 */
359static void musb_advance_schedule(struct musb *musb, struct urb *urb,
360 struct musb_hw_ep *hw_ep, int is_in)
358{ 361{
362 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
359 struct musb_hw_ep *ep = qh->hw_ep; 363 struct musb_hw_ep *ep = qh->hw_ep;
360 struct musb *musb = ep->musb;
361 int is_in = usb_pipein(urb->pipe);
362 int ready = qh->is_ready; 364 int ready = qh->is_ready;
365 int status;
366
367 status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
363 368
364 /* save toggle eagerly, for paranoia */ 369 /* save toggle eagerly, for paranoia */
365 switch (qh->type) { 370 switch (qh->type) {
366 case USB_ENDPOINT_XFER_BULK: 371 case USB_ENDPOINT_XFER_BULK:
367 case USB_ENDPOINT_XFER_INT: 372 case USB_ENDPOINT_XFER_INT:
368 musb_save_toggle(ep, is_in, urb); 373 musb_save_toggle(qh, is_in, urb);
369 break; 374 break;
370 case USB_ENDPOINT_XFER_ISOC: 375 case USB_ENDPOINT_XFER_ISOC:
371 if (status == 0 && urb->error_count) 376 if (urb->error_count)
372 status = -EXDEV; 377 status = -EXDEV;
373 break; 378 break;
374 } 379 }
375 380
376 qh->is_ready = 0; 381 qh->is_ready = 0;
377 __musb_giveback(musb, urb, status); 382 musb_giveback(musb, urb, status);
378 qh->is_ready = ready; 383 qh->is_ready = ready;
379 384
380 /* reclaim resources (and bandwidth) ASAP; deschedule it, and 385 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
@@ -388,11 +393,8 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
388 else 393 else
389 ep->tx_reinit = 1; 394 ep->tx_reinit = 1;
390 395
391 /* clobber old pointers to this qh */ 396 /* Clobber old pointers to this qh */
392 if (is_in || ep->is_shared_fifo) 397 musb_ep_set_qh(ep, is_in, NULL);
393 ep->in_qh = NULL;
394 else
395 ep->out_qh = NULL;
396 qh->hep->hcpriv = NULL; 398 qh->hep->hcpriv = NULL;
397 399
398 switch (qh->type) { 400 switch (qh->type) {
@@ -421,36 +423,10 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
421 break; 423 break;
422 } 424 }
423 } 425 }
424 return qh;
425}
426
427/*
428 * Advance this hardware endpoint's queue, completing the specified urb and
429 * advancing to either the next urb queued to that qh, or else invalidating
430 * that qh and advancing to the next qh scheduled after the current one.
431 *
432 * Context: caller owns controller lock, irqs are blocked
433 */
434static void
435musb_advance_schedule(struct musb *musb, struct urb *urb,
436 struct musb_hw_ep *hw_ep, int is_in)
437{
438 struct musb_qh *qh;
439
440 if (is_in || hw_ep->is_shared_fifo)
441 qh = hw_ep->in_qh;
442 else
443 qh = hw_ep->out_qh;
444
445 if (urb->status == -EINPROGRESS)
446 qh = musb_giveback(qh, urb, 0);
447 else
448 qh = musb_giveback(qh, urb, urb->status);
449 426
450 if (qh != NULL && qh->is_ready) { 427 if (qh != NULL && qh->is_ready) {
451 DBG(4, "... next ep%d %cX urb %p\n", 428 DBG(4, "... next ep%d %cX urb %p\n",
452 hw_ep->epnum, is_in ? 'R' : 'T', 429 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
453 next_urb(qh));
454 musb_start_urb(musb, is_in, qh); 430 musb_start_urb(musb, is_in, qh);
455 } 431 }
456} 432}
@@ -629,7 +605,8 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
629 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); 605 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
630 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); 606 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
631 /* NOTE: bulk combining rewrites high bits of maxpacket */ 607 /* NOTE: bulk combining rewrites high bits of maxpacket */
632 musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket); 608 musb_writew(ep->regs, MUSB_RXMAXP,
609 qh->maxpacket | ((qh->hb_mult - 1) << 11));
633 610
634 ep->rx_reinit = 0; 611 ep->rx_reinit = 0;
635} 612}
@@ -651,9 +628,10 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
651 csr = musb_readw(epio, MUSB_TXCSR); 628 csr = musb_readw(epio, MUSB_TXCSR);
652 if (length > pkt_size) { 629 if (length > pkt_size) {
653 mode = 1; 630 mode = 1;
654 csr |= MUSB_TXCSR_AUTOSET 631 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
655 | MUSB_TXCSR_DMAMODE 632 /* autoset shouldn't be set in high bandwidth */
656 | MUSB_TXCSR_DMAENAB; 633 if (qh->hb_mult == 1)
634 csr |= MUSB_TXCSR_AUTOSET;
657 } else { 635 } else {
658 mode = 0; 636 mode = 0;
659 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); 637 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
@@ -703,15 +681,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
703 void __iomem *mbase = musb->mregs; 681 void __iomem *mbase = musb->mregs;
704 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 682 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
705 void __iomem *epio = hw_ep->regs; 683 void __iomem *epio = hw_ep->regs;
706 struct musb_qh *qh; 684 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
707 u16 packet_sz; 685 u16 packet_sz = qh->maxpacket;
708
709 if (!is_out || hw_ep->is_shared_fifo)
710 qh = hw_ep->in_qh;
711 else
712 qh = hw_ep->out_qh;
713
714 packet_sz = qh->maxpacket;
715 686
716 DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s " 687 DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
717 "h_addr%02x h_port%02x bytes %d\n", 688 "h_addr%02x h_port%02x bytes %d\n",
@@ -1129,17 +1100,14 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1129 u16 tx_csr; 1100 u16 tx_csr;
1130 size_t length = 0; 1101 size_t length = 0;
1131 size_t offset = 0; 1102 size_t offset = 0;
1132 struct urb *urb;
1133 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1103 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1134 void __iomem *epio = hw_ep->regs; 1104 void __iomem *epio = hw_ep->regs;
1135 struct musb_qh *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh 1105 struct musb_qh *qh = hw_ep->out_qh;
1136 : hw_ep->out_qh; 1106 struct urb *urb = next_urb(qh);
1137 u32 status = 0; 1107 u32 status = 0;
1138 void __iomem *mbase = musb->mregs; 1108 void __iomem *mbase = musb->mregs;
1139 struct dma_channel *dma; 1109 struct dma_channel *dma;
1140 1110
1141 urb = next_urb(qh);
1142
1143 musb_ep_select(mbase, epnum); 1111 musb_ep_select(mbase, epnum);
1144 tx_csr = musb_readw(epio, MUSB_TXCSR); 1112 tx_csr = musb_readw(epio, MUSB_TXCSR);
1145 1113
@@ -1427,7 +1395,7 @@ static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
1427 urb->actual_length += dma->actual_len; 1395 urb->actual_length += dma->actual_len;
1428 dma->actual_len = 0L; 1396 dma->actual_len = 0L;
1429 } 1397 }
1430 musb_save_toggle(ep, 1, urb); 1398 musb_save_toggle(cur_qh, 1, urb);
1431 1399
1432 /* move cur_qh to end of queue */ 1400 /* move cur_qh to end of queue */
1433 list_move_tail(&cur_qh->ring, &musb->in_bulk); 1401 list_move_tail(&cur_qh->ring, &musb->in_bulk);
@@ -1531,6 +1499,10 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1531 /* packet error reported later */ 1499 /* packet error reported later */
1532 iso_err = true; 1500 iso_err = true;
1533 } 1501 }
1502 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1503 DBG(3, "end %d high bandwidth incomplete ISO packet RX\n",
1504 epnum);
1505 status = -EPROTO;
1534 } 1506 }
1535 1507
1536 /* faults abort the transfer */ 1508 /* faults abort the transfer */
@@ -1738,7 +1710,11 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1738 val &= ~MUSB_RXCSR_H_AUTOREQ; 1710 val &= ~MUSB_RXCSR_H_AUTOREQ;
1739 else 1711 else
1740 val |= MUSB_RXCSR_H_AUTOREQ; 1712 val |= MUSB_RXCSR_H_AUTOREQ;
1741 val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB; 1713 val |= MUSB_RXCSR_DMAENAB;
1714
1715 /* autoclear shouldn't be set in high bandwidth */
1716 if (qh->hb_mult == 1)
1717 val |= MUSB_RXCSR_AUTOCLEAR;
1742 1718
1743 musb_writew(epio, MUSB_RXCSR, 1719 musb_writew(epio, MUSB_RXCSR,
1744 MUSB_RXCSR_H_WZC_BITS | val); 1720 MUSB_RXCSR_H_WZC_BITS | val);
@@ -1817,19 +1793,17 @@ static int musb_schedule(
1817 epnum++, hw_ep++) { 1793 epnum++, hw_ep++) {
1818 int diff; 1794 int diff;
1819 1795
1820 if (is_in || hw_ep->is_shared_fifo) { 1796 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
1821 if (hw_ep->in_qh != NULL)
1822 continue;
1823 } else if (hw_ep->out_qh != NULL)
1824 continue; 1797 continue;
1825 1798
1826 if (hw_ep == musb->bulk_ep) 1799 if (hw_ep == musb->bulk_ep)
1827 continue; 1800 continue;
1828 1801
1829 if (is_in) 1802 if (is_in)
1830 diff = hw_ep->max_packet_sz_rx - qh->maxpacket; 1803 diff = hw_ep->max_packet_sz_rx;
1831 else 1804 else
1832 diff = hw_ep->max_packet_sz_tx - qh->maxpacket; 1805 diff = hw_ep->max_packet_sz_tx;
1806 diff -= (qh->maxpacket * qh->hb_mult);
1833 1807
1834 if (diff >= 0 && best_diff > diff) { 1808 if (diff >= 0 && best_diff > diff) {
1835 best_diff = diff; 1809 best_diff = diff;
@@ -1932,15 +1906,27 @@ static int musb_urb_enqueue(
1932 qh->is_ready = 1; 1906 qh->is_ready = 1;
1933 1907
1934 qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize); 1908 qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
1909 qh->type = usb_endpoint_type(epd);
1935 1910
1936 /* no high bandwidth support yet */ 1911 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
1937 if (qh->maxpacket & ~0x7ff) { 1912 * Some musb cores don't support high bandwidth ISO transfers; and
1938 ret = -EMSGSIZE; 1913 * we don't (yet!) support high bandwidth interrupt transfers.
1939 goto done; 1914 */
1915 qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
1916 if (qh->hb_mult > 1) {
1917 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
1918
1919 if (ok)
1920 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
1921 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
1922 if (!ok) {
1923 ret = -EMSGSIZE;
1924 goto done;
1925 }
1926 qh->maxpacket &= 0x7ff;
1940 } 1927 }
1941 1928
1942 qh->epnum = usb_endpoint_num(epd); 1929 qh->epnum = usb_endpoint_num(epd);
1943 qh->type = usb_endpoint_type(epd);
1944 1930
1945 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ 1931 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1946 qh->addr_reg = (u8) usb_pipedevice(urb->pipe); 1932 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
@@ -2052,14 +2038,15 @@ done:
2052 * called with controller locked, irqs blocked 2038 * called with controller locked, irqs blocked
2053 * that hardware queue advances to the next transfer, unless prevented 2039 * that hardware queue advances to the next transfer, unless prevented
2054 */ 2040 */
2055static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in) 2041static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2056{ 2042{
2057 struct musb_hw_ep *ep = qh->hw_ep; 2043 struct musb_hw_ep *ep = qh->hw_ep;
2058 void __iomem *epio = ep->regs; 2044 void __iomem *epio = ep->regs;
2059 unsigned hw_end = ep->epnum; 2045 unsigned hw_end = ep->epnum;
2060 void __iomem *regs = ep->musb->mregs; 2046 void __iomem *regs = ep->musb->mregs;
2061 u16 csr; 2047 int is_in = usb_pipein(urb->pipe);
2062 int status = 0; 2048 int status = 0;
2049 u16 csr;
2063 2050
2064 musb_ep_select(regs, hw_end); 2051 musb_ep_select(regs, hw_end);
2065 2052
@@ -2112,14 +2099,14 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2112{ 2099{
2113 struct musb *musb = hcd_to_musb(hcd); 2100 struct musb *musb = hcd_to_musb(hcd);
2114 struct musb_qh *qh; 2101 struct musb_qh *qh;
2115 struct list_head *sched;
2116 unsigned long flags; 2102 unsigned long flags;
2103 int is_in = usb_pipein(urb->pipe);
2117 int ret; 2104 int ret;
2118 2105
2119 DBG(4, "urb=%p, dev%d ep%d%s\n", urb, 2106 DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
2120 usb_pipedevice(urb->pipe), 2107 usb_pipedevice(urb->pipe),
2121 usb_pipeendpoint(urb->pipe), 2108 usb_pipeendpoint(urb->pipe),
2122 usb_pipein(urb->pipe) ? "in" : "out"); 2109 is_in ? "in" : "out");
2123 2110
2124 spin_lock_irqsave(&musb->lock, flags); 2111 spin_lock_irqsave(&musb->lock, flags);
2125 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 2112 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
@@ -2130,47 +2117,25 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2130 if (!qh) 2117 if (!qh)
2131 goto done; 2118 goto done;
2132 2119
2133 /* Any URB not actively programmed into endpoint hardware can be 2120 /*
2121 * Any URB not actively programmed into endpoint hardware can be
2134 * immediately given back; that's any URB not at the head of an 2122 * immediately given back; that's any URB not at the head of an
2135 * endpoint queue, unless someday we get real DMA queues. And even 2123 * endpoint queue, unless someday we get real DMA queues. And even
2136 * if it's at the head, it might not be known to the hardware... 2124 * if it's at the head, it might not be known to the hardware...
2137 * 2125 *
2138 * Otherwise abort current transfer, pending dma, etc.; urb->status 2126 * Otherwise abort current transfer, pending DMA, etc.; urb->status
2139 * has already been updated. This is a synchronous abort; it'd be 2127 * has already been updated. This is a synchronous abort; it'd be
2140 * OK to hold off until after some IRQ, though. 2128 * OK to hold off until after some IRQ, though.
2129 *
2130 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2141 */ 2131 */
2142 if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list) 2132 if (!qh->is_ready
2143 ret = -EINPROGRESS; 2133 || urb->urb_list.prev != &qh->hep->urb_list
2144 else { 2134 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2145 switch (qh->type) {
2146 case USB_ENDPOINT_XFER_CONTROL:
2147 sched = &musb->control;
2148 break;
2149 case USB_ENDPOINT_XFER_BULK:
2150 if (qh->mux == 1) {
2151 if (usb_pipein(urb->pipe))
2152 sched = &musb->in_bulk;
2153 else
2154 sched = &musb->out_bulk;
2155 break;
2156 }
2157 default:
2158 /* REVISIT when we get a schedule tree, periodic
2159 * transfers won't always be at the head of a
2160 * singleton queue...
2161 */
2162 sched = NULL;
2163 break;
2164 }
2165 }
2166
2167 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2168 if (ret < 0 || (sched && qh != first_qh(sched))) {
2169 int ready = qh->is_ready; 2135 int ready = qh->is_ready;
2170 2136
2171 ret = 0;
2172 qh->is_ready = 0; 2137 qh->is_ready = 0;
2173 __musb_giveback(musb, urb, 0); 2138 musb_giveback(musb, urb, 0);
2174 qh->is_ready = ready; 2139 qh->is_ready = ready;
2175 2140
2176 /* If nothing else (usually musb_giveback) is using it 2141 /* If nothing else (usually musb_giveback) is using it
@@ -2182,7 +2147,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2182 kfree(qh); 2147 kfree(qh);
2183 } 2148 }
2184 } else 2149 } else
2185 ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); 2150 ret = musb_cleanup_urb(urb, qh);
2186done: 2151done:
2187 spin_unlock_irqrestore(&musb->lock, flags); 2152 spin_unlock_irqrestore(&musb->lock, flags);
2188 return ret; 2153 return ret;
@@ -2192,13 +2157,11 @@ done:
2192static void 2157static void
2193musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) 2158musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2194{ 2159{
2195 u8 epnum = hep->desc.bEndpointAddress; 2160 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2196 unsigned long flags; 2161 unsigned long flags;
2197 struct musb *musb = hcd_to_musb(hcd); 2162 struct musb *musb = hcd_to_musb(hcd);
2198 u8 is_in = epnum & USB_DIR_IN;
2199 struct musb_qh *qh; 2163 struct musb_qh *qh;
2200 struct urb *urb; 2164 struct urb *urb;
2201 struct list_head *sched;
2202 2165
2203 spin_lock_irqsave(&musb->lock, flags); 2166 spin_lock_irqsave(&musb->lock, flags);
2204 2167
@@ -2206,31 +2169,11 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2206 if (qh == NULL) 2169 if (qh == NULL)
2207 goto exit; 2170 goto exit;
2208 2171
2209 switch (qh->type) { 2172 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2210 case USB_ENDPOINT_XFER_CONTROL:
2211 sched = &musb->control;
2212 break;
2213 case USB_ENDPOINT_XFER_BULK:
2214 if (qh->mux == 1) {
2215 if (is_in)
2216 sched = &musb->in_bulk;
2217 else
2218 sched = &musb->out_bulk;
2219 break;
2220 }
2221 default:
2222 /* REVISIT when we get a schedule tree, periodic transfers
2223 * won't always be at the head of a singleton queue...
2224 */
2225 sched = NULL;
2226 break;
2227 }
2228
2229 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2230 2173
2231 /* kick first urb off the hardware, if needed */ 2174 /* Kick the first URB off the hardware, if needed */
2232 qh->is_ready = 0; 2175 qh->is_ready = 0;
2233 if (!sched || qh == first_qh(sched)) { 2176 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2234 urb = next_urb(qh); 2177 urb = next_urb(qh);
2235 2178
2236 /* make software (then hardware) stop ASAP */ 2179 /* make software (then hardware) stop ASAP */
@@ -2238,7 +2181,7 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2238 urb->status = -ESHUTDOWN; 2181 urb->status = -ESHUTDOWN;
2239 2182
2240 /* cleanup */ 2183 /* cleanup */
2241 musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN); 2184 musb_cleanup_urb(urb, qh);
2242 2185
2243 /* Then nuke all the others ... and advance the 2186 /* Then nuke all the others ... and advance the
2244 * queue on hw_ep (e.g. bulk ring) when we're done. 2187 * queue on hw_ep (e.g. bulk ring) when we're done.
@@ -2254,7 +2197,7 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2254 * will activate any of these as it advances. 2197 * will activate any of these as it advances.
2255 */ 2198 */
2256 while (!list_empty(&hep->urb_list)) 2199 while (!list_empty(&hep->urb_list))
2257 __musb_giveback(musb, next_urb(qh), -ESHUTDOWN); 2200 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2258 2201
2259 hep->hcpriv = NULL; 2202 hep->hcpriv = NULL;
2260 list_del(&qh->ring); 2203 list_del(&qh->ring);
@@ -2293,7 +2236,7 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
2293{ 2236{
2294 struct musb *musb = hcd_to_musb(hcd); 2237 struct musb *musb = hcd_to_musb(hcd);
2295 2238
2296 if (musb->xceiv.state == OTG_STATE_A_SUSPEND) 2239 if (musb->xceiv->state == OTG_STATE_A_SUSPEND)
2297 return 0; 2240 return 0;
2298 2241
2299 if (is_host_active(musb) && musb->is_active) { 2242 if (is_host_active(musb) && musb->is_active) {
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 0b7fbcd21963..14b00776638d 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -67,6 +67,7 @@ struct musb_qh {
67 u8 is_ready; /* safe to modify hw_ep */ 67 u8 is_ready; /* safe to modify hw_ep */
68 u8 type; /* XFERTYPE_* */ 68 u8 type; /* XFERTYPE_* */
69 u8 epnum; 69 u8 epnum;
70 u8 hb_mult; /* high bandwidth pkts per uf */
70 u16 maxpacket; 71 u16 maxpacket;
71 u16 frame; /* for periodic schedule */ 72 u16 frame; /* for periodic schedule */
72 unsigned iso_idx; /* in urb->iso_frame_desc[] */ 73 unsigned iso_idx; /* in urb->iso_frame_desc[] */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index bf677acc83db..bfe5fe4ebfee 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -78,18 +78,22 @@ static void musb_port_suspend(struct musb *musb, bool do_suspend)
78 DBG(3, "Root port suspended, power %02x\n", power); 78 DBG(3, "Root port suspended, power %02x\n", power);
79 79
80 musb->port1_status |= USB_PORT_STAT_SUSPEND; 80 musb->port1_status |= USB_PORT_STAT_SUSPEND;
81 switch (musb->xceiv.state) { 81 switch (musb->xceiv->state) {
82 case OTG_STATE_A_HOST: 82 case OTG_STATE_A_HOST:
83 musb->xceiv.state = OTG_STATE_A_SUSPEND; 83 musb->xceiv->state = OTG_STATE_A_SUSPEND;
84 musb->is_active = is_otg_enabled(musb) 84 musb->is_active = is_otg_enabled(musb)
85 && musb->xceiv.host->b_hnp_enable; 85 && musb->xceiv->host->b_hnp_enable;
86 if (musb->is_active)
87 mod_timer(&musb->otg_timer, jiffies
88 + msecs_to_jiffies(
89 OTG_TIME_A_AIDL_BDIS));
86 musb_platform_try_idle(musb, 0); 90 musb_platform_try_idle(musb, 0);
87 break; 91 break;
88#ifdef CONFIG_USB_MUSB_OTG 92#ifdef CONFIG_USB_MUSB_OTG
89 case OTG_STATE_B_HOST: 93 case OTG_STATE_B_HOST:
90 musb->xceiv.state = OTG_STATE_B_WAIT_ACON; 94 musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
91 musb->is_active = is_otg_enabled(musb) 95 musb->is_active = is_otg_enabled(musb)
92 && musb->xceiv.host->b_hnp_enable; 96 && musb->xceiv->host->b_hnp_enable;
93 musb_platform_try_idle(musb, 0); 97 musb_platform_try_idle(musb, 0);
94 break; 98 break;
95#endif 99#endif
@@ -116,7 +120,7 @@ static void musb_port_reset(struct musb *musb, bool do_reset)
116 void __iomem *mbase = musb->mregs; 120 void __iomem *mbase = musb->mregs;
117 121
118#ifdef CONFIG_USB_MUSB_OTG 122#ifdef CONFIG_USB_MUSB_OTG
119 if (musb->xceiv.state == OTG_STATE_B_IDLE) { 123 if (musb->xceiv->state == OTG_STATE_B_IDLE) {
120 DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n"); 124 DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n");
121 musb->port1_status &= ~USB_PORT_STAT_RESET; 125 musb->port1_status &= ~USB_PORT_STAT_RESET;
122 return; 126 return;
@@ -186,14 +190,23 @@ void musb_root_disconnect(struct musb *musb)
186 usb_hcd_poll_rh_status(musb_to_hcd(musb)); 190 usb_hcd_poll_rh_status(musb_to_hcd(musb));
187 musb->is_active = 0; 191 musb->is_active = 0;
188 192
189 switch (musb->xceiv.state) { 193 switch (musb->xceiv->state) {
190 case OTG_STATE_A_HOST:
191 case OTG_STATE_A_SUSPEND: 194 case OTG_STATE_A_SUSPEND:
192 musb->xceiv.state = OTG_STATE_A_WAIT_BCON; 195#ifdef CONFIG_USB_MUSB_OTG
196 if (is_otg_enabled(musb)
197 && musb->xceiv->host->b_hnp_enable) {
198 musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
199 musb->g.is_a_peripheral = 1;
200 break;
201 }
202#endif
203 /* FALLTHROUGH */
204 case OTG_STATE_A_HOST:
205 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
193 musb->is_active = 0; 206 musb->is_active = 0;
194 break; 207 break;
195 case OTG_STATE_A_WAIT_VFALL: 208 case OTG_STATE_A_WAIT_VFALL:
196 musb->xceiv.state = OTG_STATE_B_IDLE; 209 musb->xceiv->state = OTG_STATE_B_IDLE;
197 break; 210 break;
198 default: 211 default:
199 DBG(1, "host disconnect (%s)\n", otg_state_string(musb)); 212 DBG(1, "host disconnect (%s)\n", otg_state_string(musb));
@@ -332,7 +345,7 @@ int musb_hub_control(
332 musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; 345 musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
333 usb_hcd_poll_rh_status(musb_to_hcd(musb)); 346 usb_hcd_poll_rh_status(musb_to_hcd(musb));
334 /* NOTE: it might really be A_WAIT_BCON ... */ 347 /* NOTE: it might really be A_WAIT_BCON ... */
335 musb->xceiv.state = OTG_STATE_A_HOST; 348 musb->xceiv->state = OTG_STATE_A_HOST;
336 } 349 }
337 350
338 put_unaligned(cpu_to_le32(musb->port1_status 351 put_unaligned(cpu_to_le32(musb->port1_status
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 60924ce08493..34875201ee04 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -44,7 +44,6 @@
44#define get_cpu_rev() 2 44#define get_cpu_rev() 2
45#endif 45#endif
46 46
47#define MUSB_TIMEOUT_A_WAIT_BCON 1100
48 47
49static struct timer_list musb_idle_timer; 48static struct timer_list musb_idle_timer;
50 49
@@ -61,17 +60,17 @@ static void musb_do_idle(unsigned long _musb)
61 60
62 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 61 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
63 62
64 switch (musb->xceiv.state) { 63 switch (musb->xceiv->state) {
65 case OTG_STATE_A_WAIT_BCON: 64 case OTG_STATE_A_WAIT_BCON:
66 devctl &= ~MUSB_DEVCTL_SESSION; 65 devctl &= ~MUSB_DEVCTL_SESSION;
67 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); 66 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
68 67
69 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 68 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
70 if (devctl & MUSB_DEVCTL_BDEVICE) { 69 if (devctl & MUSB_DEVCTL_BDEVICE) {
71 musb->xceiv.state = OTG_STATE_B_IDLE; 70 musb->xceiv->state = OTG_STATE_B_IDLE;
72 MUSB_DEV_MODE(musb); 71 MUSB_DEV_MODE(musb);
73 } else { 72 } else {
74 musb->xceiv.state = OTG_STATE_A_IDLE; 73 musb->xceiv->state = OTG_STATE_A_IDLE;
75 MUSB_HST_MODE(musb); 74 MUSB_HST_MODE(musb);
76 } 75 }
77 break; 76 break;
@@ -89,7 +88,7 @@ static void musb_do_idle(unsigned long _musb)
89 musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; 88 musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
90 usb_hcd_poll_rh_status(musb_to_hcd(musb)); 89 usb_hcd_poll_rh_status(musb_to_hcd(musb));
91 /* NOTE: it might really be A_WAIT_BCON ... */ 90 /* NOTE: it might really be A_WAIT_BCON ... */
92 musb->xceiv.state = OTG_STATE_A_HOST; 91 musb->xceiv->state = OTG_STATE_A_HOST;
93 } 92 }
94 break; 93 break;
95#endif 94#endif
@@ -97,9 +96,9 @@ static void musb_do_idle(unsigned long _musb)
97 case OTG_STATE_A_HOST: 96 case OTG_STATE_A_HOST:
98 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 97 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
99 if (devctl & MUSB_DEVCTL_BDEVICE) 98 if (devctl & MUSB_DEVCTL_BDEVICE)
100 musb->xceiv.state = OTG_STATE_B_IDLE; 99 musb->xceiv->state = OTG_STATE_B_IDLE;
101 else 100 else
102 musb->xceiv.state = OTG_STATE_A_WAIT_BCON; 101 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
103#endif 102#endif
104 default: 103 default:
105 break; 104 break;
@@ -118,7 +117,7 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
118 117
119 /* Never idle if active, or when VBUS timeout is not set as host */ 118 /* Never idle if active, or when VBUS timeout is not set as host */
120 if (musb->is_active || ((musb->a_wait_bcon == 0) 119 if (musb->is_active || ((musb->a_wait_bcon == 0)
121 && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) { 120 && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
122 DBG(4, "%s active, deleting timer\n", otg_state_string(musb)); 121 DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
123 del_timer(&musb_idle_timer); 122 del_timer(&musb_idle_timer);
124 last_timer = jiffies; 123 last_timer = jiffies;
@@ -163,8 +162,8 @@ static void omap_set_vbus(struct musb *musb, int is_on)
163 162
164 if (is_on) { 163 if (is_on) {
165 musb->is_active = 1; 164 musb->is_active = 1;
166 musb->xceiv.default_a = 1; 165 musb->xceiv->default_a = 1;
167 musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; 166 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
168 devctl |= MUSB_DEVCTL_SESSION; 167 devctl |= MUSB_DEVCTL_SESSION;
169 168
170 MUSB_HST_MODE(musb); 169 MUSB_HST_MODE(musb);
@@ -175,8 +174,8 @@ static void omap_set_vbus(struct musb *musb, int is_on)
175 * jumping right to B_IDLE... 174 * jumping right to B_IDLE...
176 */ 175 */
177 176
178 musb->xceiv.default_a = 0; 177 musb->xceiv->default_a = 0;
179 musb->xceiv.state = OTG_STATE_B_IDLE; 178 musb->xceiv->state = OTG_STATE_B_IDLE;
180 devctl &= ~MUSB_DEVCTL_SESSION; 179 devctl &= ~MUSB_DEVCTL_SESSION;
181 180
182 MUSB_DEV_MODE(musb); 181 MUSB_DEV_MODE(musb);
@@ -188,10 +187,6 @@ static void omap_set_vbus(struct musb *musb, int is_on)
188 otg_state_string(musb), 187 otg_state_string(musb),
189 musb_readb(musb->mregs, MUSB_DEVCTL)); 188 musb_readb(musb->mregs, MUSB_DEVCTL));
190} 189}
191static int omap_set_power(struct otg_transceiver *x, unsigned mA)
192{
193 return 0;
194}
195 190
196static int musb_platform_resume(struct musb *musb); 191static int musb_platform_resume(struct musb *musb);
197 192
@@ -202,24 +197,6 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
202 devctl |= MUSB_DEVCTL_SESSION; 197 devctl |= MUSB_DEVCTL_SESSION;
203 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); 198 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
204 199
205 switch (musb_mode) {
206#ifdef CONFIG_USB_MUSB_HDRC_HCD
207 case MUSB_HOST:
208 otg_set_host(&musb->xceiv, musb->xceiv.host);
209 break;
210#endif
211#ifdef CONFIG_USB_GADGET_MUSB_HDRC
212 case MUSB_PERIPHERAL:
213 otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget);
214 break;
215#endif
216#ifdef CONFIG_USB_MUSB_OTG
217 case MUSB_OTG:
218 break;
219#endif
220 default:
221 return -EINVAL;
222 }
223 return 0; 200 return 0;
224} 201}
225 202
@@ -231,6 +208,16 @@ int __init musb_platform_init(struct musb *musb)
231 omap_cfg_reg(AE5_2430_USB0HS_STP); 208 omap_cfg_reg(AE5_2430_USB0HS_STP);
232#endif 209#endif
233 210
211 /* We require some kind of external transceiver, hooked
212 * up through ULPI. TWL4030-family PMICs include one,
213 * which needs a driver, drivers aren't always needed.
214 */
215 musb->xceiv = otg_get_transceiver();
216 if (!musb->xceiv) {
217 pr_err("HS USB OTG: no transceiver configured\n");
218 return -ENODEV;
219 }
220
234 musb_platform_resume(musb); 221 musb_platform_resume(musb);
235 222
236 l = omap_readl(OTG_SYSCONFIG); 223 l = omap_readl(OTG_SYSCONFIG);
@@ -240,7 +227,12 @@ int __init musb_platform_init(struct musb *musb)
240 l &= ~AUTOIDLE; /* disable auto idle */ 227 l &= ~AUTOIDLE; /* disable auto idle */
241 l &= ~NOIDLE; /* remove possible noidle */ 228 l &= ~NOIDLE; /* remove possible noidle */
242 l |= SMARTIDLE; /* enable smart idle */ 229 l |= SMARTIDLE; /* enable smart idle */
243 l |= AUTOIDLE; /* enable auto idle */ 230 /*
231 * MUSB AUTOIDLE don't work in 3430.
232 * Workaround by Richard Woodruff/TI
233 */
234 if (!cpu_is_omap3430())
235 l |= AUTOIDLE; /* enable auto idle */
244 omap_writel(l, OTG_SYSCONFIG); 236 omap_writel(l, OTG_SYSCONFIG);
245 237
246 l = omap_readl(OTG_INTERFSEL); 238 l = omap_readl(OTG_INTERFSEL);
@@ -257,9 +249,6 @@ int __init musb_platform_init(struct musb *musb)
257 249
258 if (is_host_enabled(musb)) 250 if (is_host_enabled(musb))
259 musb->board_set_vbus = omap_set_vbus; 251 musb->board_set_vbus = omap_set_vbus;
260 if (is_peripheral_enabled(musb))
261 musb->xceiv.set_power = omap_set_power;
262 musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON;
263 252
264 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); 253 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
265 254
@@ -282,8 +271,7 @@ int musb_platform_suspend(struct musb *musb)
282 l |= ENABLEWAKEUP; /* enable wakeup */ 271 l |= ENABLEWAKEUP; /* enable wakeup */
283 omap_writel(l, OTG_SYSCONFIG); 272 omap_writel(l, OTG_SYSCONFIG);
284 273
285 if (musb->xceiv.set_suspend) 274 otg_set_suspend(musb->xceiv, 1);
286 musb->xceiv.set_suspend(&musb->xceiv, 1);
287 275
288 if (musb->set_clock) 276 if (musb->set_clock)
289 musb->set_clock(musb->clock, 0); 277 musb->set_clock(musb->clock, 0);
@@ -300,8 +288,7 @@ static int musb_platform_resume(struct musb *musb)
300 if (!musb->clock) 288 if (!musb->clock)
301 return 0; 289 return 0;
302 290
303 if (musb->xceiv.set_suspend) 291 otg_set_suspend(musb->xceiv, 0);
304 musb->xceiv.set_suspend(&musb->xceiv, 0);
305 292
306 if (musb->set_clock) 293 if (musb->set_clock)
307 musb->set_clock(musb->clock, 1); 294 musb->set_clock(musb->clock, 1);
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 4ac1477d3569..88b587c703e9 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -259,6 +259,8 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
259 tusb_fifo_read_unaligned(fifo, buf, len); 259 tusb_fifo_read_unaligned(fifo, buf, len);
260} 260}
261 261
262static struct musb *the_musb;
263
262#ifdef CONFIG_USB_GADGET_MUSB_HDRC 264#ifdef CONFIG_USB_GADGET_MUSB_HDRC
263 265
264/* This is used by gadget drivers, and OTG transceiver logic, allowing 266/* This is used by gadget drivers, and OTG transceiver logic, allowing
@@ -269,7 +271,7 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
269 */ 271 */
270static int tusb_draw_power(struct otg_transceiver *x, unsigned mA) 272static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
271{ 273{
272 struct musb *musb = container_of(x, struct musb, xceiv); 274 struct musb *musb = the_musb;
273 void __iomem *tbase = musb->ctrl_base; 275 void __iomem *tbase = musb->ctrl_base;
274 u32 reg; 276 u32 reg;
275 277
@@ -419,7 +421,7 @@ static void musb_do_idle(unsigned long _musb)
419 421
420 spin_lock_irqsave(&musb->lock, flags); 422 spin_lock_irqsave(&musb->lock, flags);
421 423
422 switch (musb->xceiv.state) { 424 switch (musb->xceiv->state) {
423 case OTG_STATE_A_WAIT_BCON: 425 case OTG_STATE_A_WAIT_BCON:
424 if ((musb->a_wait_bcon != 0) 426 if ((musb->a_wait_bcon != 0)
425 && (musb->idle_timeout == 0 427 && (musb->idle_timeout == 0
@@ -483,7 +485,7 @@ void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
483 485
484 /* Never idle if active, or when VBUS timeout is not set as host */ 486 /* Never idle if active, or when VBUS timeout is not set as host */
485 if (musb->is_active || ((musb->a_wait_bcon == 0) 487 if (musb->is_active || ((musb->a_wait_bcon == 0)
486 && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) { 488 && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
487 DBG(4, "%s active, deleting timer\n", otg_state_string(musb)); 489 DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
488 del_timer(&musb_idle_timer); 490 del_timer(&musb_idle_timer);
489 last_timer = jiffies; 491 last_timer = jiffies;
@@ -532,8 +534,8 @@ static void tusb_source_power(struct musb *musb, int is_on)
532 if (musb->set_clock) 534 if (musb->set_clock)
533 musb->set_clock(musb->clock, 1); 535 musb->set_clock(musb->clock, 1);
534 timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE); 536 timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
535 musb->xceiv.default_a = 1; 537 musb->xceiv->default_a = 1;
536 musb->xceiv.state = OTG_STATE_A_WAIT_VRISE; 538 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
537 devctl |= MUSB_DEVCTL_SESSION; 539 devctl |= MUSB_DEVCTL_SESSION;
538 540
539 conf |= TUSB_DEV_CONF_USB_HOST_MODE; 541 conf |= TUSB_DEV_CONF_USB_HOST_MODE;
@@ -546,24 +548,24 @@ static void tusb_source_power(struct musb *musb, int is_on)
546 /* If ID pin is grounded, we want to be a_idle */ 548 /* If ID pin is grounded, we want to be a_idle */
547 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); 549 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
548 if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) { 550 if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
549 switch (musb->xceiv.state) { 551 switch (musb->xceiv->state) {
550 case OTG_STATE_A_WAIT_VRISE: 552 case OTG_STATE_A_WAIT_VRISE:
551 case OTG_STATE_A_WAIT_BCON: 553 case OTG_STATE_A_WAIT_BCON:
552 musb->xceiv.state = OTG_STATE_A_WAIT_VFALL; 554 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
553 break; 555 break;
554 case OTG_STATE_A_WAIT_VFALL: 556 case OTG_STATE_A_WAIT_VFALL:
555 musb->xceiv.state = OTG_STATE_A_IDLE; 557 musb->xceiv->state = OTG_STATE_A_IDLE;
556 break; 558 break;
557 default: 559 default:
558 musb->xceiv.state = OTG_STATE_A_IDLE; 560 musb->xceiv->state = OTG_STATE_A_IDLE;
559 } 561 }
560 musb->is_active = 0; 562 musb->is_active = 0;
561 musb->xceiv.default_a = 1; 563 musb->xceiv->default_a = 1;
562 MUSB_HST_MODE(musb); 564 MUSB_HST_MODE(musb);
563 } else { 565 } else {
564 musb->is_active = 0; 566 musb->is_active = 0;
565 musb->xceiv.default_a = 0; 567 musb->xceiv->default_a = 0;
566 musb->xceiv.state = OTG_STATE_B_IDLE; 568 musb->xceiv->state = OTG_STATE_B_IDLE;
567 MUSB_DEV_MODE(musb); 569 MUSB_DEV_MODE(musb);
568 } 570 }
569 571
@@ -674,7 +676,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
674 else 676 else
675 default_a = is_host_enabled(musb); 677 default_a = is_host_enabled(musb);
676 DBG(2, "Default-%c\n", default_a ? 'A' : 'B'); 678 DBG(2, "Default-%c\n", default_a ? 'A' : 'B');
677 musb->xceiv.default_a = default_a; 679 musb->xceiv->default_a = default_a;
678 tusb_source_power(musb, default_a); 680 tusb_source_power(musb, default_a);
679 681
680 /* Don't allow idling immediately */ 682 /* Don't allow idling immediately */
@@ -686,7 +688,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
686 if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) { 688 if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) {
687 689
688 /* B-dev state machine: no vbus ~= disconnect */ 690 /* B-dev state machine: no vbus ~= disconnect */
689 if ((is_otg_enabled(musb) && !musb->xceiv.default_a) 691 if ((is_otg_enabled(musb) && !musb->xceiv->default_a)
690 || !is_host_enabled(musb)) { 692 || !is_host_enabled(musb)) {
691#ifdef CONFIG_USB_MUSB_HDRC_HCD 693#ifdef CONFIG_USB_MUSB_HDRC_HCD
692 /* ? musb_root_disconnect(musb); */ 694 /* ? musb_root_disconnect(musb); */
@@ -701,9 +703,9 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
701 703
702 if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) { 704 if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
703 DBG(1, "Forcing disconnect (no interrupt)\n"); 705 DBG(1, "Forcing disconnect (no interrupt)\n");
704 if (musb->xceiv.state != OTG_STATE_B_IDLE) { 706 if (musb->xceiv->state != OTG_STATE_B_IDLE) {
705 /* INTR_DISCONNECT can hide... */ 707 /* INTR_DISCONNECT can hide... */
706 musb->xceiv.state = OTG_STATE_B_IDLE; 708 musb->xceiv->state = OTG_STATE_B_IDLE;
707 musb->int_usb |= MUSB_INTR_DISCONNECT; 709 musb->int_usb |= MUSB_INTR_DISCONNECT;
708 } 710 }
709 musb->is_active = 0; 711 musb->is_active = 0;
@@ -717,7 +719,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
717 DBG(2, "vbus change, %s, otg %03x\n", 719 DBG(2, "vbus change, %s, otg %03x\n",
718 otg_state_string(musb), otg_stat); 720 otg_state_string(musb), otg_stat);
719 721
720 switch (musb->xceiv.state) { 722 switch (musb->xceiv->state) {
721 case OTG_STATE_A_IDLE: 723 case OTG_STATE_A_IDLE:
722 DBG(2, "Got SRP, turning on VBUS\n"); 724 DBG(2, "Got SRP, turning on VBUS\n");
723 musb_set_vbus(musb, 1); 725 musb_set_vbus(musb, 1);
@@ -765,7 +767,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
765 767
766 DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat); 768 DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat);
767 769
768 switch (musb->xceiv.state) { 770 switch (musb->xceiv->state) {
769 case OTG_STATE_A_WAIT_VRISE: 771 case OTG_STATE_A_WAIT_VRISE:
770 /* VBUS has probably been valid for a while now, 772 /* VBUS has probably been valid for a while now,
771 * but may well have bounced out of range a bit 773 * but may well have bounced out of range a bit
@@ -777,7 +779,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
777 DBG(2, "devctl %02x\n", devctl); 779 DBG(2, "devctl %02x\n", devctl);
778 break; 780 break;
779 } 781 }
780 musb->xceiv.state = OTG_STATE_A_WAIT_BCON; 782 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
781 musb->is_active = 0; 783 musb->is_active = 0;
782 idle_timeout = jiffies 784 idle_timeout = jiffies
783 + msecs_to_jiffies(musb->a_wait_bcon); 785 + msecs_to_jiffies(musb->a_wait_bcon);
@@ -1093,9 +1095,14 @@ int __init musb_platform_init(struct musb *musb)
1093{ 1095{
1094 struct platform_device *pdev; 1096 struct platform_device *pdev;
1095 struct resource *mem; 1097 struct resource *mem;
1096 void __iomem *sync; 1098 void __iomem *sync = NULL;
1097 int ret; 1099 int ret;
1098 1100
1101 usb_nop_xceiv_register();
1102 musb->xceiv = otg_get_transceiver();
1103 if (!musb->xceiv)
1104 return -ENODEV;
1105
1099 pdev = to_platform_device(musb->controller); 1106 pdev = to_platform_device(musb->controller);
1100 1107
1101 /* dma address for async dma */ 1108 /* dma address for async dma */
@@ -1106,14 +1113,16 @@ int __init musb_platform_init(struct musb *musb)
1106 mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1113 mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1107 if (!mem) { 1114 if (!mem) {
1108 pr_debug("no sync dma resource?\n"); 1115 pr_debug("no sync dma resource?\n");
1109 return -ENODEV; 1116 ret = -ENODEV;
1117 goto done;
1110 } 1118 }
1111 musb->sync = mem->start; 1119 musb->sync = mem->start;
1112 1120
1113 sync = ioremap(mem->start, mem->end - mem->start + 1); 1121 sync = ioremap(mem->start, mem->end - mem->start + 1);
1114 if (!sync) { 1122 if (!sync) {
1115 pr_debug("ioremap for sync failed\n"); 1123 pr_debug("ioremap for sync failed\n");
1116 return -ENOMEM; 1124 ret = -ENOMEM;
1125 goto done;
1117 } 1126 }
1118 musb->sync_va = sync; 1127 musb->sync_va = sync;
1119 1128
@@ -1126,28 +1135,37 @@ int __init musb_platform_init(struct musb *musb)
1126 if (ret) { 1135 if (ret) {
1127 printk(KERN_ERR "Could not start tusb6010 (%d)\n", 1136 printk(KERN_ERR "Could not start tusb6010 (%d)\n",
1128 ret); 1137 ret);
1129 return -ENODEV; 1138 goto done;
1130 } 1139 }
1131 musb->isr = tusb_interrupt; 1140 musb->isr = tusb_interrupt;
1132 1141
1133 if (is_host_enabled(musb)) 1142 if (is_host_enabled(musb))
1134 musb->board_set_vbus = tusb_source_power; 1143 musb->board_set_vbus = tusb_source_power;
1135 if (is_peripheral_enabled(musb)) 1144 if (is_peripheral_enabled(musb)) {
1136 musb->xceiv.set_power = tusb_draw_power; 1145 musb->xceiv->set_power = tusb_draw_power;
1146 the_musb = musb;
1147 }
1137 1148
1138 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); 1149 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
1139 1150
1151done:
1152 if (ret < 0) {
1153 if (sync)
1154 iounmap(sync);
1155 usb_nop_xceiv_unregister();
1156 }
1140 return ret; 1157 return ret;
1141} 1158}
1142 1159
1143int musb_platform_exit(struct musb *musb) 1160int musb_platform_exit(struct musb *musb)
1144{ 1161{
1145 del_timer_sync(&musb_idle_timer); 1162 del_timer_sync(&musb_idle_timer);
1163 the_musb = NULL;
1146 1164
1147 if (musb->board_set_power) 1165 if (musb->board_set_power)
1148 musb->board_set_power(0); 1166 musb->board_set_power(0);
1149 1167
1150 iounmap(musb->sync_va); 1168 iounmap(musb->sync_va);
1151 1169 usb_nop_xceiv_unregister();
1152 return 0; 1170 return 0;
1153} 1171}
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index aa884d072f0b..69feeec1628c 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -59,4 +59,18 @@ config NOP_USB_XCEIV
59 built-in with usb ip or which are autonomous and doesn't require any 59 built-in with usb ip or which are autonomous and doesn't require any
60 phy programming such as ISP1x04 etc. 60 phy programming such as ISP1x04 etc.
61 61
62config USB_LANGWELL_OTG
63 tristate "Intel Langwell USB OTG dual-role support"
64 depends on USB && MRST
65 select USB_OTG
66 select USB_OTG_UTILS
67 help
68 Say Y here if you want to build Intel Langwell USB OTG
69 transciever driver in kernel. This driver implements role
70 switch between EHCI host driver and Langwell USB OTG
71 client driver.
72
73 To compile this driver as a module, choose M here: the
74 module will be called langwell_otg.
75
62endif # USB || OTG 76endif # USB || OTG
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 208167856529..6d1abdd3c0ac 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_USB_OTG_UTILS) += otg.o
9obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o 9obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o
10obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o 10obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
11obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o 11obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o
12obj-$(CONFIG_USB_LANGWELL_OTG) += langwell_otg.o
12obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o 13obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o
13 14
14ccflags-$(CONFIG_USB_DEBUG) += -DDEBUG 15ccflags-$(CONFIG_USB_DEBUG) += -DDEBUG
diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c
new file mode 100644
index 000000000000..6f628d0e9f39
--- /dev/null
+++ b/drivers/usb/otg/langwell_otg.c
@@ -0,0 +1,1915 @@
1/*
2 * Intel Langwell USB OTG transceiver driver
3 * Copyright (C) 2008 - 2009, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19/* This driver helps to switch Langwell OTG controller function between host
20 * and peripheral. It works with EHCI driver and Langwell client controller
21 * driver together.
22 */
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/pci.h>
26#include <linux/errno.h>
27#include <linux/interrupt.h>
28#include <linux/kernel.h>
29#include <linux/device.h>
30#include <linux/moduleparam.h>
31#include <linux/usb/ch9.h>
32#include <linux/usb/gadget.h>
33#include <linux/usb.h>
34#include <linux/usb/otg.h>
35#include <linux/notifier.h>
36#include <asm/ipc_defs.h>
37#include <linux/delay.h>
38#include "../core/hcd.h"
39
40#include <linux/usb/langwell_otg.h>
41
42#define DRIVER_DESC "Intel Langwell USB OTG transceiver driver"
43#define DRIVER_VERSION "3.0.0.32L.0002"
44
45MODULE_DESCRIPTION(DRIVER_DESC);
46MODULE_AUTHOR("Henry Yuan <hang.yuan@intel.com>, Hao Wu <hao.wu@intel.com>");
47MODULE_VERSION(DRIVER_VERSION);
48MODULE_LICENSE("GPL");
49
50static const char driver_name[] = "langwell_otg";
51
52static int langwell_otg_probe(struct pci_dev *pdev,
53 const struct pci_device_id *id);
54static void langwell_otg_remove(struct pci_dev *pdev);
55static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message);
56static int langwell_otg_resume(struct pci_dev *pdev);
57
58static int langwell_otg_set_host(struct otg_transceiver *otg,
59 struct usb_bus *host);
60static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
61 struct usb_gadget *gadget);
62static int langwell_otg_start_srp(struct otg_transceiver *otg);
63
64static const struct pci_device_id pci_ids[] = {{
65 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
66 .class_mask = ~0,
67 .vendor = 0x8086,
68 .device = 0x0811,
69 .subvendor = PCI_ANY_ID,
70 .subdevice = PCI_ANY_ID,
71}, { /* end: all zeroes */ }
72};
73
74static struct pci_driver otg_pci_driver = {
75 .name = (char *) driver_name,
76 .id_table = pci_ids,
77
78 .probe = langwell_otg_probe,
79 .remove = langwell_otg_remove,
80
81 .suspend = langwell_otg_suspend,
82 .resume = langwell_otg_resume,
83};
84
85static const char *state_string(enum usb_otg_state state)
86{
87 switch (state) {
88 case OTG_STATE_A_IDLE:
89 return "a_idle";
90 case OTG_STATE_A_WAIT_VRISE:
91 return "a_wait_vrise";
92 case OTG_STATE_A_WAIT_BCON:
93 return "a_wait_bcon";
94 case OTG_STATE_A_HOST:
95 return "a_host";
96 case OTG_STATE_A_SUSPEND:
97 return "a_suspend";
98 case OTG_STATE_A_PERIPHERAL:
99 return "a_peripheral";
100 case OTG_STATE_A_WAIT_VFALL:
101 return "a_wait_vfall";
102 case OTG_STATE_A_VBUS_ERR:
103 return "a_vbus_err";
104 case OTG_STATE_B_IDLE:
105 return "b_idle";
106 case OTG_STATE_B_SRP_INIT:
107 return "b_srp_init";
108 case OTG_STATE_B_PERIPHERAL:
109 return "b_peripheral";
110 case OTG_STATE_B_WAIT_ACON:
111 return "b_wait_acon";
112 case OTG_STATE_B_HOST:
113 return "b_host";
114 default:
115 return "UNDEFINED";
116 }
117}
118
119/* HSM timers */
120static inline struct langwell_otg_timer *otg_timer_initializer
121(void (*function)(unsigned long), unsigned long expires, unsigned long data)
122{
123 struct langwell_otg_timer *timer;
124 timer = kmalloc(sizeof(struct langwell_otg_timer), GFP_KERNEL);
125 timer->function = function;
126 timer->expires = expires;
127 timer->data = data;
128 return timer;
129}
130
131static struct langwell_otg_timer *a_wait_vrise_tmr, *a_wait_bcon_tmr,
132 *a_aidl_bdis_tmr, *b_ase0_brst_tmr, *b_se0_srp_tmr, *b_srp_res_tmr,
133 *b_bus_suspend_tmr;
134
135static struct list_head active_timers;
136
137static struct langwell_otg *the_transceiver;
138
139/* host/client notify transceiver when event affects HNP state */
140void langwell_update_transceiver()
141{
142 otg_dbg("transceiver driver is notified\n");
143 queue_work(the_transceiver->qwork, &the_transceiver->work);
144}
145EXPORT_SYMBOL(langwell_update_transceiver);
146
147static int langwell_otg_set_host(struct otg_transceiver *otg,
148 struct usb_bus *host)
149{
150 otg->host = host;
151
152 return 0;
153}
154
155static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
156 struct usb_gadget *gadget)
157{
158 otg->gadget = gadget;
159
160 return 0;
161}
162
163static int langwell_otg_set_power(struct otg_transceiver *otg,
164 unsigned mA)
165{
166 return 0;
167}
168
169/* A-device drives vbus, controlled through PMIC CHRGCNTL register*/
170static void langwell_otg_drv_vbus(int on)
171{
172 struct ipc_pmic_reg_data pmic_data = {0};
173 struct ipc_pmic_reg_data battery_data;
174
175 /* Check if battery is attached or not */
176 battery_data.pmic_reg_data[0].register_address = 0xd2;
177 battery_data.ioc = 0;
178 battery_data.num_entries = 1;
179 if (ipc_pmic_register_read(&battery_data)) {
180 otg_dbg("Failed to read PMIC register 0xd2.\n");
181 return;
182 }
183
184 if ((battery_data.pmic_reg_data[0].value & 0x20) == 0) {
185 otg_dbg("no battery attached\n");
186 return;
187 }
188
189 /* Workaround for battery attachment issue */
190 if (battery_data.pmic_reg_data[0].value == 0x34) {
191 otg_dbg("battery \n");
192 return;
193 }
194
195 otg_dbg("battery attached\n");
196
197 pmic_data.ioc = 0;
198 pmic_data.pmic_reg_data[0].register_address = 0xD4;
199 pmic_data.num_entries = 1;
200 if (on)
201 pmic_data.pmic_reg_data[0].value = 0x20;
202 else
203 pmic_data.pmic_reg_data[0].value = 0xc0;
204
205 if (ipc_pmic_register_write(&pmic_data, TRUE))
206 otg_dbg("Failed to write PMIC.\n");
207
208}
209
210/* charge vbus or discharge vbus through a resistor to ground */
211static void langwell_otg_chrg_vbus(int on)
212{
213
214 u32 val;
215
216 val = readl(the_transceiver->regs + CI_OTGSC);
217
218 if (on)
219 writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VC,
220 the_transceiver->regs + CI_OTGSC);
221 else
222 writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VD,
223 the_transceiver->regs + CI_OTGSC);
224
225}
226
227/* Start SRP */
228static int langwell_otg_start_srp(struct otg_transceiver *otg)
229{
230 u32 val;
231
232 otg_dbg("Start SRP ->\n");
233
234 val = readl(the_transceiver->regs + CI_OTGSC);
235
236 writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP,
237 the_transceiver->regs + CI_OTGSC);
238
239 /* Check if the data plus is finished or not */
240 msleep(8);
241 val = readl(the_transceiver->regs + CI_OTGSC);
242 if (val & (OTGSC_HADP | OTGSC_DP))
243 otg_dbg("DataLine SRP Error\n");
244
245 /* FIXME: VBus SRP */
246
247 return 0;
248}
249
250
251/* stop SOF via bus_suspend */
252static void langwell_otg_loc_sof(int on)
253{
254 struct usb_hcd *hcd;
255 int err;
256
257 otg_dbg("loc_sof -> %d\n", on);
258
259 hcd = bus_to_hcd(the_transceiver->otg.host);
260 if (on)
261 err = hcd->driver->bus_resume(hcd);
262 else
263 err = hcd->driver->bus_suspend(hcd);
264
265 if (err)
266 otg_dbg("Failed to resume/suspend bus - %d\n", err);
267}
268
269static void langwell_otg_phy_low_power(int on)
270{
271 u32 val;
272
273 otg_dbg("phy low power mode-> %d\n", on);
274
275 val = readl(the_transceiver->regs + CI_HOSTPC1);
276 if (on)
277 writel(val | HOSTPC1_PHCD, the_transceiver->regs + CI_HOSTPC1);
278 else
279 writel(val & ~HOSTPC1_PHCD, the_transceiver->regs + CI_HOSTPC1);
280}
281
282/* Enable/Disable OTG interrupt */
283static void langwell_otg_intr(int on)
284{
285 u32 val;
286
287 otg_dbg("interrupt -> %d\n", on);
288
289 val = readl(the_transceiver->regs + CI_OTGSC);
290 if (on) {
291 val = val | (OTGSC_INTEN_MASK | OTGSC_IDPU);
292 writel(val, the_transceiver->regs + CI_OTGSC);
293 } else {
294 val = val & ~(OTGSC_INTEN_MASK | OTGSC_IDPU);
295 writel(val, the_transceiver->regs + CI_OTGSC);
296 }
297}
298
299/* set HAAR: Hardware Assist Auto-Reset */
300static void langwell_otg_HAAR(int on)
301{
302 u32 val;
303
304 otg_dbg("HAAR -> %d\n", on);
305
306 val = readl(the_transceiver->regs + CI_OTGSC);
307 if (on)
308 writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HAAR,
309 the_transceiver->regs + CI_OTGSC);
310 else
311 writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HAAR,
312 the_transceiver->regs + CI_OTGSC);
313}
314
315/* set HABA: Hardware Assist B-Disconnect to A-Connect */
316static void langwell_otg_HABA(int on)
317{
318 u32 val;
319
320 otg_dbg("HABA -> %d\n", on);
321
322 val = readl(the_transceiver->regs + CI_OTGSC);
323 if (on)
324 writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HABA,
325 the_transceiver->regs + CI_OTGSC);
326 else
327 writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HABA,
328 the_transceiver->regs + CI_OTGSC);
329}
330
331static int langwell_otg_check_se0_srp(int on)
332{
333 u32 val;
334
335 int delay_time = TB_SE0_SRP * 10; /* step is 100us */
336
337 otg_dbg("check_se0_srp -> \n");
338
339 do {
340 udelay(100);
341 if (!delay_time--)
342 break;
343 val = readl(the_transceiver->regs + CI_PORTSC1);
344 val &= PORTSC_LS;
345 } while (!val);
346
347 otg_dbg("check_se0_srp <- \n");
348 return val;
349}
350
351/* The timeout callback function to set time out bit */
352static void set_tmout(unsigned long indicator)
353{
354 *(int *)indicator = 1;
355}
356
357void langwell_otg_nsf_msg(unsigned long indicator)
358{
359 switch (indicator) {
360 case 2:
361 case 4:
362 case 6:
363 case 7:
364 printk(KERN_ERR "OTG:NSF-%lu - deivce not responding\n",
365 indicator);
366 break;
367 case 3:
368 printk(KERN_ERR "OTG:NSF-%lu - deivce not supported\n",
369 indicator);
370 break;
371 default:
372 printk(KERN_ERR "Do not have this kind of NSF\n");
373 break;
374 }
375}
376
377/* Initialize timers */
378static void langwell_otg_init_timers(struct otg_hsm *hsm)
379{
380 /* HSM used timers */
381 a_wait_vrise_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_VRISE,
382 (unsigned long)&hsm->a_wait_vrise_tmout);
383 a_wait_bcon_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_BCON,
384 (unsigned long)&hsm->a_wait_bcon_tmout);
385 a_aidl_bdis_tmr = otg_timer_initializer(&set_tmout, TA_AIDL_BDIS,
386 (unsigned long)&hsm->a_aidl_bdis_tmout);
387 b_ase0_brst_tmr = otg_timer_initializer(&set_tmout, TB_ASE0_BRST,
388 (unsigned long)&hsm->b_ase0_brst_tmout);
389 b_se0_srp_tmr = otg_timer_initializer(&set_tmout, TB_SE0_SRP,
390 (unsigned long)&hsm->b_se0_srp);
391 b_srp_res_tmr = otg_timer_initializer(&set_tmout, TB_SRP_RES,
392 (unsigned long)&hsm->b_srp_res_tmout);
393 b_bus_suspend_tmr = otg_timer_initializer(&set_tmout, TB_BUS_SUSPEND,
394 (unsigned long)&hsm->b_bus_suspend_tmout);
395}
396
397/* Free timers */
398static void langwell_otg_free_timers(void)
399{
400 kfree(a_wait_vrise_tmr);
401 kfree(a_wait_bcon_tmr);
402 kfree(a_aidl_bdis_tmr);
403 kfree(b_ase0_brst_tmr);
404 kfree(b_se0_srp_tmr);
405 kfree(b_srp_res_tmr);
406 kfree(b_bus_suspend_tmr);
407}
408
409/* Add timer to timer list */
410static void langwell_otg_add_timer(void *gtimer)
411{
412 struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
413 struct langwell_otg_timer *tmp_timer;
414 u32 val32;
415
416 /* Check if the timer is already in the active list,
417 * if so update timer count
418 */
419 list_for_each_entry(tmp_timer, &active_timers, list)
420 if (tmp_timer == timer) {
421 timer->count = timer->expires;
422 return;
423 }
424 timer->count = timer->expires;
425
426 if (list_empty(&active_timers)) {
427 val32 = readl(the_transceiver->regs + CI_OTGSC);
428 writel(val32 | OTGSC_1MSE, the_transceiver->regs + CI_OTGSC);
429 }
430
431 list_add_tail(&timer->list, &active_timers);
432}
433
434/* Remove timer from the timer list; clear timeout status */
435static void langwell_otg_del_timer(void *gtimer)
436{
437 struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
438 struct langwell_otg_timer *tmp_timer, *del_tmp;
439 u32 val32;
440
441 list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list)
442 if (tmp_timer == timer)
443 list_del(&timer->list);
444
445 if (list_empty(&active_timers)) {
446 val32 = readl(the_transceiver->regs + CI_OTGSC);
447 writel(val32 & ~OTGSC_1MSE, the_transceiver->regs + CI_OTGSC);
448 }
449}
450
451/* Reduce timer count by 1, and find timeout conditions.*/
452static int langwell_otg_tick_timer(u32 *int_sts)
453{
454 struct langwell_otg_timer *tmp_timer, *del_tmp;
455 int expired = 0;
456
457 list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) {
458 tmp_timer->count--;
459 /* check if timer expires */
460 if (!tmp_timer->count) {
461 list_del(&tmp_timer->list);
462 tmp_timer->function(tmp_timer->data);
463 expired = 1;
464 }
465 }
466
467 if (list_empty(&active_timers)) {
468 otg_dbg("tick timer: disable 1ms int\n");
469 *int_sts = *int_sts & ~OTGSC_1MSE;
470 }
471 return expired;
472}
473
474static void reset_otg(void)
475{
476 u32 val;
477 int delay_time = 1000;
478
479 otg_dbg("reseting OTG controller ...\n");
480 val = readl(the_transceiver->regs + CI_USBCMD);
481 writel(val | USBCMD_RST, the_transceiver->regs + CI_USBCMD);
482 do {
483 udelay(100);
484 if (!delay_time--)
485 otg_dbg("reset timeout\n");
486 val = readl(the_transceiver->regs + CI_USBCMD);
487 val &= USBCMD_RST;
488 } while (val != 0);
489 otg_dbg("reset done.\n");
490}
491
492static void set_host_mode(void)
493{
494 u32 val;
495
496 reset_otg();
497 val = readl(the_transceiver->regs + CI_USBMODE);
498 val = (val & (~USBMODE_CM)) | USBMODE_HOST;
499 writel(val, the_transceiver->regs + CI_USBMODE);
500}
501
502static void set_client_mode(void)
503{
504 u32 val;
505
506 reset_otg();
507 val = readl(the_transceiver->regs + CI_USBMODE);
508 val = (val & (~USBMODE_CM)) | USBMODE_DEVICE;
509 writel(val, the_transceiver->regs + CI_USBMODE);
510}
511
512static void init_hsm(void)
513{
514 struct langwell_otg *langwell = the_transceiver;
515 u32 val32;
516
517 /* read OTGSC after reset */
518 val32 = readl(langwell->regs + CI_OTGSC);
519 otg_dbg("%s: OTGSC init value = 0x%x\n", __func__, val32);
520
521 /* set init state */
522 if (val32 & OTGSC_ID) {
523 langwell->hsm.id = 1;
524 langwell->otg.default_a = 0;
525 set_client_mode();
526 langwell->otg.state = OTG_STATE_B_IDLE;
527 langwell_otg_drv_vbus(0);
528 } else {
529 langwell->hsm.id = 0;
530 langwell->otg.default_a = 1;
531 set_host_mode();
532 langwell->otg.state = OTG_STATE_A_IDLE;
533 }
534
535 /* set session indicator */
536 if (val32 & OTGSC_BSE)
537 langwell->hsm.b_sess_end = 1;
538 if (val32 & OTGSC_BSV)
539 langwell->hsm.b_sess_vld = 1;
540 if (val32 & OTGSC_ASV)
541 langwell->hsm.a_sess_vld = 1;
542 if (val32 & OTGSC_AVV)
543 langwell->hsm.a_vbus_vld = 1;
544
545 /* defautly power the bus */
546 langwell->hsm.a_bus_req = 1;
547 langwell->hsm.a_bus_drop = 0;
548 /* defautly don't request bus as B device */
549 langwell->hsm.b_bus_req = 0;
550 /* no system error */
551 langwell->hsm.a_clr_err = 0;
552}
553
554static irqreturn_t otg_dummy_irq(int irq, void *_dev)
555{
556 void __iomem *reg_base = _dev;
557 u32 val;
558 u32 int_mask = 0;
559
560 val = readl(reg_base + CI_USBMODE);
561 if ((val & USBMODE_CM) != USBMODE_DEVICE)
562 return IRQ_NONE;
563
564 val = readl(reg_base + CI_USBSTS);
565 int_mask = val & INTR_DUMMY_MASK;
566
567 if (int_mask == 0)
568 return IRQ_NONE;
569
570 /* clear hsm.b_conn here since host driver can't detect it
571 * otg_dummy_irq called means B-disconnect happened.
572 */
573 if (the_transceiver->hsm.b_conn) {
574 the_transceiver->hsm.b_conn = 0;
575 if (spin_trylock(&the_transceiver->wq_lock)) {
576 queue_work(the_transceiver->qwork,
577 &the_transceiver->work);
578 spin_unlock(&the_transceiver->wq_lock);
579 }
580 }
581 /* Clear interrupts */
582 writel(int_mask, reg_base + CI_USBSTS);
583 return IRQ_HANDLED;
584}
585
586static irqreturn_t otg_irq(int irq, void *_dev)
587{
588 struct langwell_otg *langwell = _dev;
589 u32 int_sts, int_en;
590 u32 int_mask = 0;
591 int flag = 0;
592
593 int_sts = readl(langwell->regs + CI_OTGSC);
594 int_en = (int_sts & OTGSC_INTEN_MASK) >> 8;
595 int_mask = int_sts & int_en;
596 if (int_mask == 0)
597 return IRQ_NONE;
598
599 if (int_mask & OTGSC_IDIS) {
600 otg_dbg("%s: id change int\n", __func__);
601 langwell->hsm.id = (int_sts & OTGSC_ID) ? 1 : 0;
602 flag = 1;
603 }
604 if (int_mask & OTGSC_DPIS) {
605 otg_dbg("%s: data pulse int\n", __func__);
606 langwell->hsm.a_srp_det = (int_sts & OTGSC_DPS) ? 1 : 0;
607 flag = 1;
608 }
609 if (int_mask & OTGSC_BSEIS) {
610 otg_dbg("%s: b session end int\n", __func__);
611 langwell->hsm.b_sess_end = (int_sts & OTGSC_BSE) ? 1 : 0;
612 flag = 1;
613 }
614 if (int_mask & OTGSC_BSVIS) {
615 otg_dbg("%s: b session valid int\n", __func__);
616 langwell->hsm.b_sess_vld = (int_sts & OTGSC_BSV) ? 1 : 0;
617 flag = 1;
618 }
619 if (int_mask & OTGSC_ASVIS) {
620 otg_dbg("%s: a session valid int\n", __func__);
621 langwell->hsm.a_sess_vld = (int_sts & OTGSC_ASV) ? 1 : 0;
622 flag = 1;
623 }
624 if (int_mask & OTGSC_AVVIS) {
625 otg_dbg("%s: a vbus valid int\n", __func__);
626 langwell->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0;
627 flag = 1;
628 }
629
630 if (int_mask & OTGSC_1MSS) {
631 /* need to schedule otg_work if any timer is expired */
632 if (langwell_otg_tick_timer(&int_sts))
633 flag = 1;
634 }
635
636 writel((int_sts & ~OTGSC_INTSTS_MASK) | int_mask,
637 langwell->regs + CI_OTGSC);
638 if (flag)
639 queue_work(langwell->qwork, &langwell->work);
640
641 return IRQ_HANDLED;
642}
643
644static void langwell_otg_work(struct work_struct *work)
645{
646 struct langwell_otg *langwell = container_of(work,
647 struct langwell_otg, work);
648 int retval;
649
650 otg_dbg("%s: old state = %s\n", __func__,
651 state_string(langwell->otg.state));
652
653 switch (langwell->otg.state) {
654 case OTG_STATE_UNDEFINED:
655 case OTG_STATE_B_IDLE:
656 if (!langwell->hsm.id) {
657 langwell_otg_del_timer(b_srp_res_tmr);
658 langwell->otg.default_a = 1;
659 langwell->hsm.a_srp_det = 0;
660
661 langwell_otg_chrg_vbus(0);
662 langwell_otg_drv_vbus(0);
663
664 set_host_mode();
665 langwell->otg.state = OTG_STATE_A_IDLE;
666 queue_work(langwell->qwork, &langwell->work);
667 } else if (langwell->hsm.b_srp_res_tmout) {
668 langwell->hsm.b_srp_res_tmout = 0;
669 langwell->hsm.b_bus_req = 0;
670 langwell_otg_nsf_msg(6);
671 } else if (langwell->hsm.b_sess_vld) {
672 langwell_otg_del_timer(b_srp_res_tmr);
673 langwell->hsm.b_sess_end = 0;
674 langwell->hsm.a_bus_suspend = 0;
675
676 langwell_otg_chrg_vbus(0);
677 if (langwell->client_ops) {
678 langwell->client_ops->resume(langwell->pdev);
679 langwell->otg.state = OTG_STATE_B_PERIPHERAL;
680 } else
681 otg_dbg("client driver not loaded.\n");
682
683 } else if (langwell->hsm.b_bus_req &&
684 (langwell->hsm.b_sess_end)) {
685 /* workaround for b_se0_srp detection */
686 retval = langwell_otg_check_se0_srp(0);
687 if (retval) {
688 langwell->hsm.b_bus_req = 0;
689 otg_dbg("LS is not SE0, try again later\n");
690 } else {
691 /* Start SRP */
692 langwell_otg_start_srp(&langwell->otg);
693 langwell_otg_add_timer(b_srp_res_tmr);
694 }
695 }
696 break;
697 case OTG_STATE_B_SRP_INIT:
698 if (!langwell->hsm.id) {
699 langwell->otg.default_a = 1;
700 langwell->hsm.a_srp_det = 0;
701
702 langwell_otg_drv_vbus(0);
703 langwell_otg_chrg_vbus(0);
704
705 langwell->otg.state = OTG_STATE_A_IDLE;
706 queue_work(langwell->qwork, &langwell->work);
707 } else if (langwell->hsm.b_sess_vld) {
708 langwell_otg_chrg_vbus(0);
709 if (langwell->client_ops) {
710 langwell->client_ops->resume(langwell->pdev);
711 langwell->otg.state = OTG_STATE_B_PERIPHERAL;
712 } else
713 otg_dbg("client driver not loaded.\n");
714 }
715 break;
716 case OTG_STATE_B_PERIPHERAL:
717 if (!langwell->hsm.id) {
718 langwell->otg.default_a = 1;
719 langwell->hsm.a_srp_det = 0;
720
721 langwell_otg_drv_vbus(0);
722 langwell_otg_chrg_vbus(0);
723 set_host_mode();
724
725 if (langwell->client_ops) {
726 langwell->client_ops->suspend(langwell->pdev,
727 PMSG_FREEZE);
728 } else
729 otg_dbg("client driver has been removed.\n");
730
731 langwell->otg.state = OTG_STATE_A_IDLE;
732 queue_work(langwell->qwork, &langwell->work);
733 } else if (!langwell->hsm.b_sess_vld) {
734 langwell->hsm.b_hnp_enable = 0;
735
736 if (langwell->client_ops) {
737 langwell->client_ops->suspend(langwell->pdev,
738 PMSG_FREEZE);
739 } else
740 otg_dbg("client driver has been removed.\n");
741
742 langwell->otg.state = OTG_STATE_B_IDLE;
743 } else if (langwell->hsm.b_bus_req && langwell->hsm.b_hnp_enable
744 && langwell->hsm.a_bus_suspend) {
745
746 if (langwell->client_ops) {
747 langwell->client_ops->suspend(langwell->pdev,
748 PMSG_FREEZE);
749 } else
750 otg_dbg("client driver has been removed.\n");
751
752 langwell_otg_HAAR(1);
753 langwell->hsm.a_conn = 0;
754
755 if (langwell->host_ops) {
756 langwell->host_ops->probe(langwell->pdev,
757 langwell->host_ops->id_table);
758 langwell->otg.state = OTG_STATE_B_WAIT_ACON;
759 } else
760 otg_dbg("host driver not loaded.\n");
761
762 langwell->hsm.a_bus_resume = 0;
763 langwell->hsm.b_ase0_brst_tmout = 0;
764 langwell_otg_add_timer(b_ase0_brst_tmr);
765 }
766 break;
767
768 case OTG_STATE_B_WAIT_ACON:
769 if (!langwell->hsm.id) {
770 langwell_otg_del_timer(b_ase0_brst_tmr);
771 langwell->otg.default_a = 1;
772 langwell->hsm.a_srp_det = 0;
773
774 langwell_otg_drv_vbus(0);
775 langwell_otg_chrg_vbus(0);
776 set_host_mode();
777
778 langwell_otg_HAAR(0);
779 if (langwell->host_ops)
780 langwell->host_ops->remove(langwell->pdev);
781 else
782 otg_dbg("host driver has been removed.\n");
783 langwell->otg.state = OTG_STATE_A_IDLE;
784 queue_work(langwell->qwork, &langwell->work);
785 } else if (!langwell->hsm.b_sess_vld) {
786 langwell_otg_del_timer(b_ase0_brst_tmr);
787 langwell->hsm.b_hnp_enable = 0;
788 langwell->hsm.b_bus_req = 0;
789 langwell_otg_chrg_vbus(0);
790 langwell_otg_HAAR(0);
791
792 if (langwell->host_ops)
793 langwell->host_ops->remove(langwell->pdev);
794 else
795 otg_dbg("host driver has been removed.\n");
796 langwell->otg.state = OTG_STATE_B_IDLE;
797 } else if (langwell->hsm.a_conn) {
798 langwell_otg_del_timer(b_ase0_brst_tmr);
799 langwell_otg_HAAR(0);
800 langwell->otg.state = OTG_STATE_B_HOST;
801 queue_work(langwell->qwork, &langwell->work);
802 } else if (langwell->hsm.a_bus_resume ||
803 langwell->hsm.b_ase0_brst_tmout) {
804 langwell_otg_del_timer(b_ase0_brst_tmr);
805 langwell_otg_HAAR(0);
806 langwell_otg_nsf_msg(7);
807
808 if (langwell->host_ops)
809 langwell->host_ops->remove(langwell->pdev);
810 else
811 otg_dbg("host driver has been removed.\n");
812
813 langwell->hsm.a_bus_suspend = 0;
814 langwell->hsm.b_bus_req = 0;
815
816 if (langwell->client_ops)
817 langwell->client_ops->resume(langwell->pdev);
818 else
819 otg_dbg("client driver not loaded.\n");
820
821 langwell->otg.state = OTG_STATE_B_PERIPHERAL;
822 }
823 break;
824
825 case OTG_STATE_B_HOST:
826 if (!langwell->hsm.id) {
827 langwell->otg.default_a = 1;
828 langwell->hsm.a_srp_det = 0;
829
830 langwell_otg_drv_vbus(0);
831 langwell_otg_chrg_vbus(0);
832 set_host_mode();
833 if (langwell->host_ops)
834 langwell->host_ops->remove(langwell->pdev);
835 else
836 otg_dbg("host driver has been removed.\n");
837 langwell->otg.state = OTG_STATE_A_IDLE;
838 queue_work(langwell->qwork, &langwell->work);
839 } else if (!langwell->hsm.b_sess_vld) {
840 langwell->hsm.b_hnp_enable = 0;
841 langwell->hsm.b_bus_req = 0;
842 langwell_otg_chrg_vbus(0);
843 if (langwell->host_ops)
844 langwell->host_ops->remove(langwell->pdev);
845 else
846 otg_dbg("host driver has been removed.\n");
847 langwell->otg.state = OTG_STATE_B_IDLE;
848 } else if ((!langwell->hsm.b_bus_req) ||
849 (!langwell->hsm.a_conn)) {
850 langwell->hsm.b_bus_req = 0;
851 langwell_otg_loc_sof(0);
852 if (langwell->host_ops)
853 langwell->host_ops->remove(langwell->pdev);
854 else
855 otg_dbg("host driver has been removed.\n");
856
857 langwell->hsm.a_bus_suspend = 0;
858
859 if (langwell->client_ops)
860 langwell->client_ops->resume(langwell->pdev);
861 else
862 otg_dbg("client driver not loaded.\n");
863
864 langwell->otg.state = OTG_STATE_B_PERIPHERAL;
865 }
866 break;
867
868 case OTG_STATE_A_IDLE:
869 langwell->otg.default_a = 1;
870 if (langwell->hsm.id) {
871 langwell->otg.default_a = 0;
872 langwell->hsm.b_bus_req = 0;
873 langwell_otg_drv_vbus(0);
874 langwell_otg_chrg_vbus(0);
875
876 langwell->otg.state = OTG_STATE_B_IDLE;
877 queue_work(langwell->qwork, &langwell->work);
878 } else if (langwell->hsm.a_sess_vld) {
879 langwell_otg_drv_vbus(1);
880 langwell->hsm.a_srp_det = 1;
881 langwell->hsm.a_wait_vrise_tmout = 0;
882 langwell_otg_add_timer(a_wait_vrise_tmr);
883 langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
884 queue_work(langwell->qwork, &langwell->work);
885 } else if (!langwell->hsm.a_bus_drop &&
886 (langwell->hsm.a_srp_det || langwell->hsm.a_bus_req)) {
887 langwell_otg_drv_vbus(1);
888 langwell->hsm.a_wait_vrise_tmout = 0;
889 langwell_otg_add_timer(a_wait_vrise_tmr);
890 langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
891 queue_work(langwell->qwork, &langwell->work);
892 }
893 break;
894 case OTG_STATE_A_WAIT_VRISE:
895 if (langwell->hsm.id) {
896 langwell_otg_del_timer(a_wait_vrise_tmr);
897 langwell->hsm.b_bus_req = 0;
898 langwell->otg.default_a = 0;
899 langwell_otg_drv_vbus(0);
900 langwell->otg.state = OTG_STATE_B_IDLE;
901 } else if (langwell->hsm.a_vbus_vld) {
902 langwell_otg_del_timer(a_wait_vrise_tmr);
903 if (langwell->host_ops)
904 langwell->host_ops->probe(langwell->pdev,
905 langwell->host_ops->id_table);
906 else
907 otg_dbg("host driver not loaded.\n");
908 langwell->hsm.b_conn = 0;
909 langwell->hsm.a_set_b_hnp_en = 0;
910 langwell->hsm.a_wait_bcon_tmout = 0;
911 langwell_otg_add_timer(a_wait_bcon_tmr);
912 langwell->otg.state = OTG_STATE_A_WAIT_BCON;
913 } else if (langwell->hsm.a_wait_vrise_tmout) {
914 if (langwell->hsm.a_vbus_vld) {
915 if (langwell->host_ops)
916 langwell->host_ops->probe(
917 langwell->pdev,
918 langwell->host_ops->id_table);
919 else
920 otg_dbg("host driver not loaded.\n");
921 langwell->hsm.b_conn = 0;
922 langwell->hsm.a_set_b_hnp_en = 0;
923 langwell->hsm.a_wait_bcon_tmout = 0;
924 langwell_otg_add_timer(a_wait_bcon_tmr);
925 langwell->otg.state = OTG_STATE_A_WAIT_BCON;
926 } else {
927 langwell_otg_drv_vbus(0);
928 langwell->otg.state = OTG_STATE_A_VBUS_ERR;
929 }
930 }
931 break;
932 case OTG_STATE_A_WAIT_BCON:
933 if (langwell->hsm.id) {
934 langwell_otg_del_timer(a_wait_bcon_tmr);
935
936 langwell->otg.default_a = 0;
937 langwell->hsm.b_bus_req = 0;
938 if (langwell->host_ops)
939 langwell->host_ops->remove(langwell->pdev);
940 else
941 otg_dbg("host driver has been removed.\n");
942 langwell_otg_drv_vbus(0);
943 langwell->otg.state = OTG_STATE_B_IDLE;
944 queue_work(langwell->qwork, &langwell->work);
945 } else if (!langwell->hsm.a_vbus_vld) {
946 langwell_otg_del_timer(a_wait_bcon_tmr);
947
948 if (langwell->host_ops)
949 langwell->host_ops->remove(langwell->pdev);
950 else
951 otg_dbg("host driver has been removed.\n");
952 langwell_otg_drv_vbus(0);
953 langwell->otg.state = OTG_STATE_A_VBUS_ERR;
954 } else if (langwell->hsm.a_bus_drop ||
955 (langwell->hsm.a_wait_bcon_tmout &&
956 !langwell->hsm.a_bus_req)) {
957 langwell_otg_del_timer(a_wait_bcon_tmr);
958
959 if (langwell->host_ops)
960 langwell->host_ops->remove(langwell->pdev);
961 else
962 otg_dbg("host driver has been removed.\n");
963 langwell_otg_drv_vbus(0);
964 langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
965 } else if (langwell->hsm.b_conn) {
966 langwell_otg_del_timer(a_wait_bcon_tmr);
967
968 langwell->hsm.a_suspend_req = 0;
969 langwell->otg.state = OTG_STATE_A_HOST;
970 if (!langwell->hsm.a_bus_req &&
971 langwell->hsm.a_set_b_hnp_en) {
972 /* It is not safe enough to do a fast
973 * transistion from A_WAIT_BCON to
974 * A_SUSPEND */
975 msleep(10000);
976 if (langwell->hsm.a_bus_req)
977 break;
978
979 if (request_irq(langwell->pdev->irq,
980 otg_dummy_irq, IRQF_SHARED,
981 driver_name, langwell->regs) != 0) {
982 otg_dbg("request interrupt %d fail\n",
983 langwell->pdev->irq);
984 }
985
986 langwell_otg_HABA(1);
987 langwell->hsm.b_bus_resume = 0;
988 langwell->hsm.a_aidl_bdis_tmout = 0;
989 langwell_otg_add_timer(a_aidl_bdis_tmr);
990
991 langwell_otg_loc_sof(0);
992 langwell->otg.state = OTG_STATE_A_SUSPEND;
993 } else if (!langwell->hsm.a_bus_req &&
994 !langwell->hsm.a_set_b_hnp_en) {
995 struct pci_dev *pdev = langwell->pdev;
996 if (langwell->host_ops)
997 langwell->host_ops->remove(pdev);
998 else
999 otg_dbg("host driver removed.\n");
1000 langwell_otg_drv_vbus(0);
1001 langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
1002 }
1003 }
1004 break;
1005 case OTG_STATE_A_HOST:
1006 if (langwell->hsm.id) {
1007 langwell->otg.default_a = 0;
1008 langwell->hsm.b_bus_req = 0;
1009 if (langwell->host_ops)
1010 langwell->host_ops->remove(langwell->pdev);
1011 else
1012 otg_dbg("host driver has been removed.\n");
1013 langwell_otg_drv_vbus(0);
1014 langwell->otg.state = OTG_STATE_B_IDLE;
1015 queue_work(langwell->qwork, &langwell->work);
1016 } else if (langwell->hsm.a_bus_drop ||
1017 (!langwell->hsm.a_set_b_hnp_en && !langwell->hsm.a_bus_req)) {
1018 if (langwell->host_ops)
1019 langwell->host_ops->remove(langwell->pdev);
1020 else
1021 otg_dbg("host driver has been removed.\n");
1022 langwell_otg_drv_vbus(0);
1023 langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
1024 } else if (!langwell->hsm.a_vbus_vld) {
1025 if (langwell->host_ops)
1026 langwell->host_ops->remove(langwell->pdev);
1027 else
1028 otg_dbg("host driver has been removed.\n");
1029 langwell_otg_drv_vbus(0);
1030 langwell->otg.state = OTG_STATE_A_VBUS_ERR;
1031 } else if (langwell->hsm.a_set_b_hnp_en
1032 && !langwell->hsm.a_bus_req) {
1033 /* Set HABA to enable hardware assistance to signal
1034 * A-connect after receiver B-disconnect. Hardware
1035 * will then set client mode and enable URE, SLE and
1036 * PCE after the assistance. otg_dummy_irq is used to
1037 * clean these ints when client driver is not resumed.
1038 */
1039 if (request_irq(langwell->pdev->irq,
1040 otg_dummy_irq, IRQF_SHARED, driver_name,
1041 langwell->regs) != 0) {
1042 otg_dbg("request interrupt %d failed\n",
1043 langwell->pdev->irq);
1044 }
1045
1046 /* set HABA */
1047 langwell_otg_HABA(1);
1048 langwell->hsm.b_bus_resume = 0;
1049 langwell->hsm.a_aidl_bdis_tmout = 0;
1050 langwell_otg_add_timer(a_aidl_bdis_tmr);
1051 langwell_otg_loc_sof(0);
1052 langwell->otg.state = OTG_STATE_A_SUSPEND;
1053 } else if (!langwell->hsm.b_conn || !langwell->hsm.a_bus_req) {
1054 langwell->hsm.a_wait_bcon_tmout = 0;
1055 langwell->hsm.a_set_b_hnp_en = 0;
1056 langwell_otg_add_timer(a_wait_bcon_tmr);
1057 langwell->otg.state = OTG_STATE_A_WAIT_BCON;
1058 }
1059 break;
1060 case OTG_STATE_A_SUSPEND:
1061 if (langwell->hsm.id) {
1062 langwell_otg_del_timer(a_aidl_bdis_tmr);
1063 langwell_otg_HABA(0);
1064 free_irq(langwell->pdev->irq, langwell->regs);
1065 langwell->otg.default_a = 0;
1066 langwell->hsm.b_bus_req = 0;
1067 if (langwell->host_ops)
1068 langwell->host_ops->remove(langwell->pdev);
1069 else
1070 otg_dbg("host driver has been removed.\n");
1071 langwell_otg_drv_vbus(0);
1072 langwell->otg.state = OTG_STATE_B_IDLE;
1073 queue_work(langwell->qwork, &langwell->work);
1074 } else if (langwell->hsm.a_bus_req ||
1075 langwell->hsm.b_bus_resume) {
1076 langwell_otg_del_timer(a_aidl_bdis_tmr);
1077 langwell_otg_HABA(0);
1078 free_irq(langwell->pdev->irq, langwell->regs);
1079 langwell->hsm.a_suspend_req = 0;
1080 langwell_otg_loc_sof(1);
1081 langwell->otg.state = OTG_STATE_A_HOST;
1082 } else if (langwell->hsm.a_aidl_bdis_tmout ||
1083 langwell->hsm.a_bus_drop) {
1084 langwell_otg_del_timer(a_aidl_bdis_tmr);
1085 langwell_otg_HABA(0);
1086 free_irq(langwell->pdev->irq, langwell->regs);
1087 if (langwell->host_ops)
1088 langwell->host_ops->remove(langwell->pdev);
1089 else
1090 otg_dbg("host driver has been removed.\n");
1091 langwell_otg_drv_vbus(0);
1092 langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
1093 } else if (!langwell->hsm.b_conn &&
1094 langwell->hsm.a_set_b_hnp_en) {
1095 langwell_otg_del_timer(a_aidl_bdis_tmr);
1096 langwell_otg_HABA(0);
1097 free_irq(langwell->pdev->irq, langwell->regs);
1098
1099 if (langwell->host_ops)
1100 langwell->host_ops->remove(langwell->pdev);
1101 else
1102 otg_dbg("host driver has been removed.\n");
1103
1104 langwell->hsm.b_bus_suspend = 0;
1105 langwell->hsm.b_bus_suspend_vld = 0;
1106 langwell->hsm.b_bus_suspend_tmout = 0;
1107
1108 /* msleep(200); */
1109 if (langwell->client_ops)
1110 langwell->client_ops->resume(langwell->pdev);
1111 else
1112 otg_dbg("client driver not loaded.\n");
1113
1114 langwell_otg_add_timer(b_bus_suspend_tmr);
1115 langwell->otg.state = OTG_STATE_A_PERIPHERAL;
1116 break;
1117 } else if (!langwell->hsm.a_vbus_vld) {
1118 langwell_otg_del_timer(a_aidl_bdis_tmr);
1119 langwell_otg_HABA(0);
1120 free_irq(langwell->pdev->irq, langwell->regs);
1121 if (langwell->host_ops)
1122 langwell->host_ops->remove(langwell->pdev);
1123 else
1124 otg_dbg("host driver has been removed.\n");
1125 langwell_otg_drv_vbus(0);
1126 langwell->otg.state = OTG_STATE_A_VBUS_ERR;
1127 }
1128 break;
1129 case OTG_STATE_A_PERIPHERAL:
1130 if (langwell->hsm.id) {
1131 langwell_otg_del_timer(b_bus_suspend_tmr);
1132 langwell->otg.default_a = 0;
1133 langwell->hsm.b_bus_req = 0;
1134 if (langwell->client_ops)
1135 langwell->client_ops->suspend(langwell->pdev,
1136 PMSG_FREEZE);
1137 else
1138 otg_dbg("client driver has been removed.\n");
1139 langwell_otg_drv_vbus(0);
1140 langwell->otg.state = OTG_STATE_B_IDLE;
1141 queue_work(langwell->qwork, &langwell->work);
1142 } else if (!langwell->hsm.a_vbus_vld) {
1143 langwell_otg_del_timer(b_bus_suspend_tmr);
1144 if (langwell->client_ops)
1145 langwell->client_ops->suspend(langwell->pdev,
1146 PMSG_FREEZE);
1147 else
1148 otg_dbg("client driver has been removed.\n");
1149 langwell_otg_drv_vbus(0);
1150 langwell->otg.state = OTG_STATE_A_VBUS_ERR;
1151 } else if (langwell->hsm.a_bus_drop) {
1152 langwell_otg_del_timer(b_bus_suspend_tmr);
1153 if (langwell->client_ops)
1154 langwell->client_ops->suspend(langwell->pdev,
1155 PMSG_FREEZE);
1156 else
1157 otg_dbg("client driver has been removed.\n");
1158 langwell_otg_drv_vbus(0);
1159 langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
1160 } else if (langwell->hsm.b_bus_suspend) {
1161 langwell_otg_del_timer(b_bus_suspend_tmr);
1162 if (langwell->client_ops)
1163 langwell->client_ops->suspend(langwell->pdev,
1164 PMSG_FREEZE);
1165 else
1166 otg_dbg("client driver has been removed.\n");
1167
1168 if (langwell->host_ops)
1169 langwell->host_ops->probe(langwell->pdev,
1170 langwell->host_ops->id_table);
1171 else
1172 otg_dbg("host driver not loaded.\n");
1173 langwell->hsm.a_set_b_hnp_en = 0;
1174 langwell->hsm.a_wait_bcon_tmout = 0;
1175 langwell_otg_add_timer(a_wait_bcon_tmr);
1176 langwell->otg.state = OTG_STATE_A_WAIT_BCON;
1177 } else if (langwell->hsm.b_bus_suspend_tmout) {
1178 u32 val;
1179 val = readl(langwell->regs + CI_PORTSC1);
1180 if (!(val & PORTSC_SUSP))
1181 break;
1182 if (langwell->client_ops)
1183 langwell->client_ops->suspend(langwell->pdev,
1184 PMSG_FREEZE);
1185 else
1186 otg_dbg("client driver has been removed.\n");
1187 if (langwell->host_ops)
1188 langwell->host_ops->probe(langwell->pdev,
1189 langwell->host_ops->id_table);
1190 else
1191 otg_dbg("host driver not loaded.\n");
1192 langwell->hsm.a_set_b_hnp_en = 0;
1193 langwell->hsm.a_wait_bcon_tmout = 0;
1194 langwell_otg_add_timer(a_wait_bcon_tmr);
1195 langwell->otg.state = OTG_STATE_A_WAIT_BCON;
1196 }
1197 break;
1198 case OTG_STATE_A_VBUS_ERR:
1199 if (langwell->hsm.id) {
1200 langwell->otg.default_a = 0;
1201 langwell->hsm.a_clr_err = 0;
1202 langwell->hsm.a_srp_det = 0;
1203 langwell->otg.state = OTG_STATE_B_IDLE;
1204 queue_work(langwell->qwork, &langwell->work);
1205 } else if (langwell->hsm.a_clr_err) {
1206 langwell->hsm.a_clr_err = 0;
1207 langwell->hsm.a_srp_det = 0;
1208 reset_otg();
1209 init_hsm();
1210 if (langwell->otg.state == OTG_STATE_A_IDLE)
1211 queue_work(langwell->qwork, &langwell->work);
1212 }
1213 break;
1214 case OTG_STATE_A_WAIT_VFALL:
1215 if (langwell->hsm.id) {
1216 langwell->otg.default_a = 0;
1217 langwell->otg.state = OTG_STATE_B_IDLE;
1218 queue_work(langwell->qwork, &langwell->work);
1219 } else if (langwell->hsm.a_bus_req) {
1220 langwell_otg_drv_vbus(1);
1221 langwell->hsm.a_wait_vrise_tmout = 0;
1222 langwell_otg_add_timer(a_wait_vrise_tmr);
1223 langwell->otg.state = OTG_STATE_A_WAIT_VRISE;
1224 } else if (!langwell->hsm.a_sess_vld) {
1225 langwell->hsm.a_srp_det = 0;
1226 langwell_otg_drv_vbus(0);
1227 set_host_mode();
1228 langwell->otg.state = OTG_STATE_A_IDLE;
1229 }
1230 break;
1231 default:
1232 ;
1233 }
1234
1235 otg_dbg("%s: new state = %s\n", __func__,
1236 state_string(langwell->otg.state));
1237}
1238
1239 static ssize_t
1240show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
1241{
1242 struct langwell_otg *langwell;
1243 char *next;
1244 unsigned size;
1245 unsigned t;
1246
1247 langwell = the_transceiver;
1248 next = buf;
1249 size = PAGE_SIZE;
1250
1251 t = scnprintf(next, size,
1252 "\n"
1253 "USBCMD = 0x%08x \n"
1254 "USBSTS = 0x%08x \n"
1255 "USBINTR = 0x%08x \n"
1256 "ASYNCLISTADDR = 0x%08x \n"
1257 "PORTSC1 = 0x%08x \n"
1258 "HOSTPC1 = 0x%08x \n"
1259 "OTGSC = 0x%08x \n"
1260 "USBMODE = 0x%08x \n",
1261 readl(langwell->regs + 0x30),
1262 readl(langwell->regs + 0x34),
1263 readl(langwell->regs + 0x38),
1264 readl(langwell->regs + 0x48),
1265 readl(langwell->regs + 0x74),
1266 readl(langwell->regs + 0xb4),
1267 readl(langwell->regs + 0xf4),
1268 readl(langwell->regs + 0xf8)
1269 );
1270 size -= t;
1271 next += t;
1272
1273 return PAGE_SIZE - size;
1274}
1275static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
1276
1277static ssize_t
1278show_hsm(struct device *_dev, struct device_attribute *attr, char *buf)
1279{
1280 struct langwell_otg *langwell;
1281 char *next;
1282 unsigned size;
1283 unsigned t;
1284
1285 langwell = the_transceiver;
1286 next = buf;
1287 size = PAGE_SIZE;
1288
1289 t = scnprintf(next, size,
1290 "\n"
1291 "current state = %s\n"
1292 "a_bus_resume = \t%d\n"
1293 "a_bus_suspend = \t%d\n"
1294 "a_conn = \t%d\n"
1295 "a_sess_vld = \t%d\n"
1296 "a_srp_det = \t%d\n"
1297 "a_vbus_vld = \t%d\n"
1298 "b_bus_resume = \t%d\n"
1299 "b_bus_suspend = \t%d\n"
1300 "b_conn = \t%d\n"
1301 "b_se0_srp = \t%d\n"
1302 "b_sess_end = \t%d\n"
1303 "b_sess_vld = \t%d\n"
1304 "id = \t%d\n"
1305 "a_set_b_hnp_en = \t%d\n"
1306 "b_srp_done = \t%d\n"
1307 "b_hnp_enable = \t%d\n"
1308 "a_wait_vrise_tmout = \t%d\n"
1309 "a_wait_bcon_tmout = \t%d\n"
1310 "a_aidl_bdis_tmout = \t%d\n"
1311 "b_ase0_brst_tmout = \t%d\n"
1312 "a_bus_drop = \t%d\n"
1313 "a_bus_req = \t%d\n"
1314 "a_clr_err = \t%d\n"
1315 "a_suspend_req = \t%d\n"
1316 "b_bus_req = \t%d\n"
1317 "b_bus_suspend_tmout = \t%d\n"
1318 "b_bus_suspend_vld = \t%d\n",
1319 state_string(langwell->otg.state),
1320 langwell->hsm.a_bus_resume,
1321 langwell->hsm.a_bus_suspend,
1322 langwell->hsm.a_conn,
1323 langwell->hsm.a_sess_vld,
1324 langwell->hsm.a_srp_det,
1325 langwell->hsm.a_vbus_vld,
1326 langwell->hsm.b_bus_resume,
1327 langwell->hsm.b_bus_suspend,
1328 langwell->hsm.b_conn,
1329 langwell->hsm.b_se0_srp,
1330 langwell->hsm.b_sess_end,
1331 langwell->hsm.b_sess_vld,
1332 langwell->hsm.id,
1333 langwell->hsm.a_set_b_hnp_en,
1334 langwell->hsm.b_srp_done,
1335 langwell->hsm.b_hnp_enable,
1336 langwell->hsm.a_wait_vrise_tmout,
1337 langwell->hsm.a_wait_bcon_tmout,
1338 langwell->hsm.a_aidl_bdis_tmout,
1339 langwell->hsm.b_ase0_brst_tmout,
1340 langwell->hsm.a_bus_drop,
1341 langwell->hsm.a_bus_req,
1342 langwell->hsm.a_clr_err,
1343 langwell->hsm.a_suspend_req,
1344 langwell->hsm.b_bus_req,
1345 langwell->hsm.b_bus_suspend_tmout,
1346 langwell->hsm.b_bus_suspend_vld
1347 );
1348 size -= t;
1349 next += t;
1350
1351 return PAGE_SIZE - size;
1352}
1353static DEVICE_ATTR(hsm, S_IRUGO, show_hsm, NULL);
1354
1355static ssize_t
1356get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
1357{
1358 struct langwell_otg *langwell;
1359 char *next;
1360 unsigned size;
1361 unsigned t;
1362
1363 langwell = the_transceiver;
1364 next = buf;
1365 size = PAGE_SIZE;
1366
1367 t = scnprintf(next, size, "%d", langwell->hsm.a_bus_req);
1368 size -= t;
1369 next += t;
1370
1371 return PAGE_SIZE - size;
1372}
1373
1374static ssize_t
1375set_a_bus_req(struct device *dev, struct device_attribute *attr,
1376 const char *buf, size_t count)
1377{
1378 struct langwell_otg *langwell;
1379 langwell = the_transceiver;
1380 if (!langwell->otg.default_a)
1381 return -1;
1382 if (count > 2)
1383 return -1;
1384
1385 if (buf[0] == '0') {
1386 langwell->hsm.a_bus_req = 0;
1387 otg_dbg("a_bus_req = 0\n");
1388 } else if (buf[0] == '1') {
1389 /* If a_bus_drop is TRUE, a_bus_req can't be set */
1390 if (langwell->hsm.a_bus_drop)
1391 return -1;
1392 langwell->hsm.a_bus_req = 1;
1393 otg_dbg("a_bus_req = 1\n");
1394 }
1395 if (spin_trylock(&langwell->wq_lock)) {
1396 queue_work(langwell->qwork, &langwell->work);
1397 spin_unlock(&langwell->wq_lock);
1398 }
1399 return count;
1400}
1401static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUGO, get_a_bus_req, set_a_bus_req);
1402
1403static ssize_t
1404get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
1405{
1406 struct langwell_otg *langwell;
1407 char *next;
1408 unsigned size;
1409 unsigned t;
1410
1411 langwell = the_transceiver;
1412 next = buf;
1413 size = PAGE_SIZE;
1414
1415 t = scnprintf(next, size, "%d", langwell->hsm.a_bus_drop);
1416 size -= t;
1417 next += t;
1418
1419 return PAGE_SIZE - size;
1420}
1421
1422static ssize_t
1423set_a_bus_drop(struct device *dev, struct device_attribute *attr,
1424 const char *buf, size_t count)
1425{
1426 struct langwell_otg *langwell;
1427 langwell = the_transceiver;
1428 if (!langwell->otg.default_a)
1429 return -1;
1430 if (count > 2)
1431 return -1;
1432
1433 if (buf[0] == '0') {
1434 langwell->hsm.a_bus_drop = 0;
1435 otg_dbg("a_bus_drop = 0\n");
1436 } else if (buf[0] == '1') {
1437 langwell->hsm.a_bus_drop = 1;
1438 langwell->hsm.a_bus_req = 0;
1439 otg_dbg("a_bus_drop = 1, then a_bus_req = 0\n");
1440 }
1441 if (spin_trylock(&langwell->wq_lock)) {
1442 queue_work(langwell->qwork, &langwell->work);
1443 spin_unlock(&langwell->wq_lock);
1444 }
1445 return count;
1446}
1447static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUGO,
1448 get_a_bus_drop, set_a_bus_drop);
1449
1450static ssize_t
1451get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
1452{
1453 struct langwell_otg *langwell;
1454 char *next;
1455 unsigned size;
1456 unsigned t;
1457
1458 langwell = the_transceiver;
1459 next = buf;
1460 size = PAGE_SIZE;
1461
1462 t = scnprintf(next, size, "%d", langwell->hsm.b_bus_req);
1463 size -= t;
1464 next += t;
1465
1466 return PAGE_SIZE - size;
1467}
1468
1469static ssize_t
1470set_b_bus_req(struct device *dev, struct device_attribute *attr,
1471 const char *buf, size_t count)
1472{
1473 struct langwell_otg *langwell;
1474 langwell = the_transceiver;
1475
1476 if (langwell->otg.default_a)
1477 return -1;
1478
1479 if (count > 2)
1480 return -1;
1481
1482 if (buf[0] == '0') {
1483 langwell->hsm.b_bus_req = 0;
1484 otg_dbg("b_bus_req = 0\n");
1485 } else if (buf[0] == '1') {
1486 langwell->hsm.b_bus_req = 1;
1487 otg_dbg("b_bus_req = 1\n");
1488 }
1489 if (spin_trylock(&langwell->wq_lock)) {
1490 queue_work(langwell->qwork, &langwell->work);
1491 spin_unlock(&langwell->wq_lock);
1492 }
1493 return count;
1494}
1495static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUGO, get_b_bus_req, set_b_bus_req);
1496
1497static ssize_t
1498set_a_clr_err(struct device *dev, struct device_attribute *attr,
1499 const char *buf, size_t count)
1500{
1501 struct langwell_otg *langwell;
1502 langwell = the_transceiver;
1503
1504 if (!langwell->otg.default_a)
1505 return -1;
1506 if (count > 2)
1507 return -1;
1508
1509 if (buf[0] == '1') {
1510 langwell->hsm.a_clr_err = 1;
1511 otg_dbg("a_clr_err = 1\n");
1512 }
1513 if (spin_trylock(&langwell->wq_lock)) {
1514 queue_work(langwell->qwork, &langwell->work);
1515 spin_unlock(&langwell->wq_lock);
1516 }
1517 return count;
1518}
1519static DEVICE_ATTR(a_clr_err, S_IWUGO, NULL, set_a_clr_err);
1520
1521static struct attribute *inputs_attrs[] = {
1522 &dev_attr_a_bus_req.attr,
1523 &dev_attr_a_bus_drop.attr,
1524 &dev_attr_b_bus_req.attr,
1525 &dev_attr_a_clr_err.attr,
1526 NULL,
1527};
1528
1529static struct attribute_group debug_dev_attr_group = {
1530 .name = "inputs",
1531 .attrs = inputs_attrs,
1532};
1533
1534int langwell_register_host(struct pci_driver *host_driver)
1535{
1536 int ret = 0;
1537
1538 the_transceiver->host_ops = host_driver;
1539 queue_work(the_transceiver->qwork, &the_transceiver->work);
1540 otg_dbg("host controller driver is registered\n");
1541
1542 return ret;
1543}
1544EXPORT_SYMBOL(langwell_register_host);
1545
1546void langwell_unregister_host(struct pci_driver *host_driver)
1547{
1548 if (the_transceiver->host_ops)
1549 the_transceiver->host_ops->remove(the_transceiver->pdev);
1550 the_transceiver->host_ops = NULL;
1551 the_transceiver->hsm.a_bus_drop = 1;
1552 queue_work(the_transceiver->qwork, &the_transceiver->work);
1553 otg_dbg("host controller driver is unregistered\n");
1554}
1555EXPORT_SYMBOL(langwell_unregister_host);
1556
1557int langwell_register_peripheral(struct pci_driver *client_driver)
1558{
1559 int ret = 0;
1560
1561 if (client_driver)
1562 ret = client_driver->probe(the_transceiver->pdev,
1563 client_driver->id_table);
1564 if (!ret) {
1565 the_transceiver->client_ops = client_driver;
1566 queue_work(the_transceiver->qwork, &the_transceiver->work);
1567 otg_dbg("client controller driver is registered\n");
1568 }
1569
1570 return ret;
1571}
1572EXPORT_SYMBOL(langwell_register_peripheral);
1573
1574void langwell_unregister_peripheral(struct pci_driver *client_driver)
1575{
1576 if (the_transceiver->client_ops)
1577 the_transceiver->client_ops->remove(the_transceiver->pdev);
1578 the_transceiver->client_ops = NULL;
1579 the_transceiver->hsm.b_bus_req = 0;
1580 queue_work(the_transceiver->qwork, &the_transceiver->work);
1581 otg_dbg("client controller driver is unregistered\n");
1582}
1583EXPORT_SYMBOL(langwell_unregister_peripheral);
1584
1585static int langwell_otg_probe(struct pci_dev *pdev,
1586 const struct pci_device_id *id)
1587{
1588 unsigned long resource, len;
1589 void __iomem *base = NULL;
1590 int retval;
1591 u32 val32;
1592 struct langwell_otg *langwell;
1593 char qname[] = "langwell_otg_queue";
1594
1595 retval = 0;
1596 otg_dbg("\notg controller is detected.\n");
1597 if (pci_enable_device(pdev) < 0) {
1598 retval = -ENODEV;
1599 goto done;
1600 }
1601
1602 langwell = kzalloc(sizeof *langwell, GFP_KERNEL);
1603 if (langwell == NULL) {
1604 retval = -ENOMEM;
1605 goto done;
1606 }
1607 the_transceiver = langwell;
1608
1609 /* control register: BAR 0 */
1610 resource = pci_resource_start(pdev, 0);
1611 len = pci_resource_len(pdev, 0);
1612 if (!request_mem_region(resource, len, driver_name)) {
1613 retval = -EBUSY;
1614 goto err;
1615 }
1616 langwell->region = 1;
1617
1618 base = ioremap_nocache(resource, len);
1619 if (base == NULL) {
1620 retval = -EFAULT;
1621 goto err;
1622 }
1623 langwell->regs = base;
1624
1625 if (!pdev->irq) {
1626 otg_dbg("No IRQ.\n");
1627 retval = -ENODEV;
1628 goto err;
1629 }
1630
1631 langwell->qwork = create_workqueue(qname);
1632 if (!langwell->qwork) {
1633 otg_dbg("cannot create workqueue %s\n", qname);
1634 retval = -ENOMEM;
1635 goto err;
1636 }
1637 INIT_WORK(&langwell->work, langwell_otg_work);
1638
1639 /* OTG common part */
1640 langwell->pdev = pdev;
1641 langwell->otg.dev = &pdev->dev;
1642 langwell->otg.label = driver_name;
1643 langwell->otg.set_host = langwell_otg_set_host;
1644 langwell->otg.set_peripheral = langwell_otg_set_peripheral;
1645 langwell->otg.set_power = langwell_otg_set_power;
1646 langwell->otg.start_srp = langwell_otg_start_srp;
1647 langwell->otg.state = OTG_STATE_UNDEFINED;
1648 if (otg_set_transceiver(&langwell->otg)) {
1649 otg_dbg("can't set transceiver\n");
1650 retval = -EBUSY;
1651 goto err;
1652 }
1653
1654 reset_otg();
1655 init_hsm();
1656
1657 spin_lock_init(&langwell->lock);
1658 spin_lock_init(&langwell->wq_lock);
1659 INIT_LIST_HEAD(&active_timers);
1660 langwell_otg_init_timers(&langwell->hsm);
1661
1662 if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
1663 driver_name, langwell) != 0) {
1664 otg_dbg("request interrupt %d failed\n", pdev->irq);
1665 retval = -EBUSY;
1666 goto err;
1667 }
1668
1669 /* enable OTGSC int */
1670 val32 = OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE |
1671 OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE | OTGSC_IDPU;
1672 writel(val32, langwell->regs + CI_OTGSC);
1673
1674 retval = device_create_file(&pdev->dev, &dev_attr_registers);
1675 if (retval < 0) {
1676 otg_dbg("Can't register sysfs attribute: %d\n", retval);
1677 goto err;
1678 }
1679
1680 retval = device_create_file(&pdev->dev, &dev_attr_hsm);
1681 if (retval < 0) {
1682 otg_dbg("Can't hsm sysfs attribute: %d\n", retval);
1683 goto err;
1684 }
1685
1686 retval = sysfs_create_group(&pdev->dev.kobj, &debug_dev_attr_group);
1687 if (retval < 0) {
1688 otg_dbg("Can't register sysfs attr group: %d\n", retval);
1689 goto err;
1690 }
1691
1692 if (langwell->otg.state == OTG_STATE_A_IDLE)
1693 queue_work(langwell->qwork, &langwell->work);
1694
1695 return 0;
1696
1697err:
1698 if (the_transceiver)
1699 langwell_otg_remove(pdev);
1700done:
1701 return retval;
1702}
1703
1704static void langwell_otg_remove(struct pci_dev *pdev)
1705{
1706 struct langwell_otg *langwell;
1707
1708 langwell = the_transceiver;
1709
1710 if (langwell->qwork) {
1711 flush_workqueue(langwell->qwork);
1712 destroy_workqueue(langwell->qwork);
1713 }
1714 langwell_otg_free_timers();
1715
1716 /* disable OTGSC interrupt as OTGSC doesn't change in reset */
1717 writel(0, langwell->regs + CI_OTGSC);
1718
1719 if (pdev->irq)
1720 free_irq(pdev->irq, langwell);
1721 if (langwell->regs)
1722 iounmap(langwell->regs);
1723 if (langwell->region)
1724 release_mem_region(pci_resource_start(pdev, 0),
1725 pci_resource_len(pdev, 0));
1726
1727 otg_set_transceiver(NULL);
1728 pci_disable_device(pdev);
1729 sysfs_remove_group(&pdev->dev.kobj, &debug_dev_attr_group);
1730 device_remove_file(&pdev->dev, &dev_attr_hsm);
1731 device_remove_file(&pdev->dev, &dev_attr_registers);
1732 kfree(langwell);
1733 langwell = NULL;
1734}
1735
1736static void transceiver_suspend(struct pci_dev *pdev)
1737{
1738 pci_save_state(pdev);
1739 pci_set_power_state(pdev, PCI_D3hot);
1740 langwell_otg_phy_low_power(1);
1741}
1742
1743static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message)
1744{
1745 int ret = 0;
1746 struct langwell_otg *langwell;
1747
1748 langwell = the_transceiver;
1749
1750 /* Disbale OTG interrupts */
1751 langwell_otg_intr(0);
1752
1753 if (pdev->irq)
1754 free_irq(pdev->irq, langwell);
1755
1756 /* Prevent more otg_work */
1757 flush_workqueue(langwell->qwork);
1758 spin_lock(&langwell->wq_lock);
1759
1760 /* start actions */
1761 switch (langwell->otg.state) {
1762 case OTG_STATE_A_IDLE:
1763 case OTG_STATE_B_IDLE:
1764 case OTG_STATE_A_WAIT_VFALL:
1765 case OTG_STATE_A_VBUS_ERR:
1766 transceiver_suspend(pdev);
1767 break;
1768 case OTG_STATE_A_WAIT_VRISE:
1769 langwell_otg_del_timer(a_wait_vrise_tmr);
1770 langwell->hsm.a_srp_det = 0;
1771 langwell_otg_drv_vbus(0);
1772 langwell->otg.state = OTG_STATE_A_IDLE;
1773 transceiver_suspend(pdev);
1774 break;
1775 case OTG_STATE_A_WAIT_BCON:
1776 langwell_otg_del_timer(a_wait_bcon_tmr);
1777 if (langwell->host_ops)
1778 ret = langwell->host_ops->suspend(pdev, message);
1779 langwell_otg_drv_vbus(0);
1780 break;
1781 case OTG_STATE_A_HOST:
1782 if (langwell->host_ops)
1783 ret = langwell->host_ops->suspend(pdev, message);
1784 langwell_otg_drv_vbus(0);
1785 langwell_otg_phy_low_power(1);
1786 break;
1787 case OTG_STATE_A_SUSPEND:
1788 langwell_otg_del_timer(a_aidl_bdis_tmr);
1789 langwell_otg_HABA(0);
1790 if (langwell->host_ops)
1791 langwell->host_ops->remove(pdev);
1792 else
1793 otg_dbg("host driver has been removed.\n");
1794 langwell_otg_drv_vbus(0);
1795 transceiver_suspend(pdev);
1796 langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
1797 break;
1798 case OTG_STATE_A_PERIPHERAL:
1799 if (langwell->client_ops)
1800 ret = langwell->client_ops->suspend(pdev, message);
1801 else
1802 otg_dbg("client driver has been removed.\n");
1803 langwell_otg_drv_vbus(0);
1804 transceiver_suspend(pdev);
1805 langwell->otg.state = OTG_STATE_A_WAIT_VFALL;
1806 break;
1807 case OTG_STATE_B_HOST:
1808 if (langwell->host_ops)
1809 langwell->host_ops->remove(pdev);
1810 else
1811 otg_dbg("host driver has been removed.\n");
1812 langwell->hsm.b_bus_req = 0;
1813 transceiver_suspend(pdev);
1814 langwell->otg.state = OTG_STATE_B_IDLE;
1815 break;
1816 case OTG_STATE_B_PERIPHERAL:
1817 if (langwell->client_ops)
1818 ret = langwell->client_ops->suspend(pdev, message);
1819 else
1820 otg_dbg("client driver has been removed.\n");
1821 break;
1822 case OTG_STATE_B_WAIT_ACON:
1823 langwell_otg_del_timer(b_ase0_brst_tmr);
1824 langwell_otg_HAAR(0);
1825 if (langwell->host_ops)
1826 langwell->host_ops->remove(pdev);
1827 else
1828 otg_dbg("host driver has been removed.\n");
1829 langwell->hsm.b_bus_req = 0;
1830 langwell->otg.state = OTG_STATE_B_IDLE;
1831 transceiver_suspend(pdev);
1832 break;
1833 default:
1834 otg_dbg("error state before suspend\n ");
1835 break;
1836 }
1837 spin_unlock(&langwell->wq_lock);
1838
1839 return ret;
1840}
1841
1842static void transceiver_resume(struct pci_dev *pdev)
1843{
1844 pci_restore_state(pdev);
1845 pci_set_power_state(pdev, PCI_D0);
1846 langwell_otg_phy_low_power(0);
1847}
1848
1849static int langwell_otg_resume(struct pci_dev *pdev)
1850{
1851 int ret = 0;
1852 struct langwell_otg *langwell;
1853
1854 langwell = the_transceiver;
1855
1856 spin_lock(&langwell->wq_lock);
1857
1858 switch (langwell->otg.state) {
1859 case OTG_STATE_A_IDLE:
1860 case OTG_STATE_B_IDLE:
1861 case OTG_STATE_A_WAIT_VFALL:
1862 case OTG_STATE_A_VBUS_ERR:
1863 transceiver_resume(pdev);
1864 break;
1865 case OTG_STATE_A_WAIT_BCON:
1866 langwell_otg_add_timer(a_wait_bcon_tmr);
1867 langwell_otg_drv_vbus(1);
1868 if (langwell->host_ops)
1869 ret = langwell->host_ops->resume(pdev);
1870 break;
1871 case OTG_STATE_A_HOST:
1872 langwell_otg_drv_vbus(1);
1873 langwell_otg_phy_low_power(0);
1874 if (langwell->host_ops)
1875 ret = langwell->host_ops->resume(pdev);
1876 break;
1877 case OTG_STATE_B_PERIPHERAL:
1878 if (langwell->client_ops)
1879 ret = langwell->client_ops->resume(pdev);
1880 else
1881 otg_dbg("client driver not loaded.\n");
1882 break;
1883 default:
1884 otg_dbg("error state before suspend\n ");
1885 break;
1886 }
1887
1888 if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
1889 driver_name, the_transceiver) != 0) {
1890 otg_dbg("request interrupt %d failed\n", pdev->irq);
1891 ret = -EBUSY;
1892 }
1893
1894 /* enable OTG interrupts */
1895 langwell_otg_intr(1);
1896
1897 spin_unlock(&langwell->wq_lock);
1898
1899 queue_work(langwell->qwork, &langwell->work);
1900
1901
1902 return ret;
1903}
1904
1905static int __init langwell_otg_init(void)
1906{
1907 return pci_register_driver(&otg_pci_driver);
1908}
1909module_init(langwell_otg_init);
1910
1911static void __exit langwell_otg_cleanup(void)
1912{
1913 pci_unregister_driver(&otg_pci_driver);
1914}
1915module_exit(langwell_otg_cleanup);
diff --git a/drivers/usb/otg/nop-usb-xceiv.c b/drivers/usb/otg/nop-usb-xceiv.c
index c567168f89af..9ed5ea568679 100644
--- a/drivers/usb/otg/nop-usb-xceiv.c
+++ b/drivers/usb/otg/nop-usb-xceiv.c
@@ -22,8 +22,8 @@
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 * 23 *
24 * Current status: 24 * Current status:
25 * this is to add "nop" transceiver for all those phy which is 25 * This provides a "nop" transceiver for PHYs which are
26 * autonomous such as isp1504 etc. 26 * autonomous such as isp1504, isp1707, etc.
27 */ 27 */
28 28
29#include <linux/module.h> 29#include <linux/module.h>
@@ -36,30 +36,25 @@ struct nop_usb_xceiv {
36 struct device *dev; 36 struct device *dev;
37}; 37};
38 38
39static u64 nop_xceiv_dmamask = DMA_BIT_MASK(32); 39static struct platform_device *pd;
40
41static struct platform_device nop_xceiv_device = {
42 .name = "nop_usb_xceiv",
43 .id = -1,
44 .dev = {
45 .dma_mask = &nop_xceiv_dmamask,
46 .coherent_dma_mask = DMA_BIT_MASK(32),
47 .platform_data = NULL,
48 },
49};
50 40
51void usb_nop_xceiv_register(void) 41void usb_nop_xceiv_register(void)
52{ 42{
53 if (platform_device_register(&nop_xceiv_device) < 0) { 43 if (pd)
44 return;
45 pd = platform_device_register_simple("nop_usb_xceiv", -1, NULL, 0);
46 if (!pd) {
54 printk(KERN_ERR "Unable to register usb nop transceiver\n"); 47 printk(KERN_ERR "Unable to register usb nop transceiver\n");
55 return; 48 return;
56 } 49 }
57} 50}
51EXPORT_SYMBOL(usb_nop_xceiv_register);
58 52
59void usb_nop_xceiv_unregister(void) 53void usb_nop_xceiv_unregister(void)
60{ 54{
61 platform_device_unregister(&nop_xceiv_device); 55 platform_device_unregister(pd);
62} 56}
57EXPORT_SYMBOL(usb_nop_xceiv_unregister);
63 58
64static inline struct nop_usb_xceiv *xceiv_to_nop(struct otg_transceiver *x) 59static inline struct nop_usb_xceiv *xceiv_to_nop(struct otg_transceiver *x)
65{ 60{
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index d9478d0e1c8b..9e3e7a5c258b 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -217,6 +217,7 @@
217 217
218/* In module TWL4030_MODULE_PM_MASTER */ 218/* In module TWL4030_MODULE_PM_MASTER */
219#define PROTECT_KEY 0x0E 219#define PROTECT_KEY 0x0E
220#define STS_HW_CONDITIONS 0x0F
220 221
221/* In module TWL4030_MODULE_PM_RECEIVER */ 222/* In module TWL4030_MODULE_PM_RECEIVER */
222#define VUSB_DEDICATED1 0x7D 223#define VUSB_DEDICATED1 0x7D
@@ -351,15 +352,26 @@ static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl)
351 int status; 352 int status;
352 int linkstat = USB_LINK_UNKNOWN; 353 int linkstat = USB_LINK_UNKNOWN;
353 354
354 /* STS_HW_CONDITIONS */ 355 /*
355 status = twl4030_readb(twl, TWL4030_MODULE_PM_MASTER, 0x0f); 356 * For ID/VBUS sensing, see manual section 15.4.8 ...
357 * except when using only battery backup power, two
358 * comparators produce VBUS_PRES and ID_PRES signals,
359 * which don't match docs elsewhere. But ... BIT(7)
360 * and BIT(2) of STS_HW_CONDITIONS, respectively, do
361 * seem to match up. If either is true the USB_PRES
362 * signal is active, the OTG module is activated, and
363 * its interrupt may be raised (may wake the system).
364 */
365 status = twl4030_readb(twl, TWL4030_MODULE_PM_MASTER,
366 STS_HW_CONDITIONS);
356 if (status < 0) 367 if (status < 0)
357 dev_err(twl->dev, "USB link status err %d\n", status); 368 dev_err(twl->dev, "USB link status err %d\n", status);
358 else if (status & BIT(7)) 369 else if (status & (BIT(7) | BIT(2))) {
359 linkstat = USB_LINK_VBUS; 370 if (status & BIT(2))
360 else if (status & BIT(2)) 371 linkstat = USB_LINK_ID;
361 linkstat = USB_LINK_ID; 372 else
362 else 373 linkstat = USB_LINK_VBUS;
374 } else
363 linkstat = USB_LINK_NONE; 375 linkstat = USB_LINK_NONE;
364 376
365 dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n", 377 dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
@@ -641,7 +653,7 @@ static int twl4030_set_host(struct otg_transceiver *x, struct usb_bus *host)
641 return 0; 653 return 0;
642} 654}
643 655
644static int __init twl4030_usb_probe(struct platform_device *pdev) 656static int __devinit twl4030_usb_probe(struct platform_device *pdev)
645{ 657{
646 struct twl4030_usb_data *pdata = pdev->dev.platform_data; 658 struct twl4030_usb_data *pdata = pdev->dev.platform_data;
647 struct twl4030_usb *twl; 659 struct twl4030_usb *twl;
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 6d106e74265e..2cbfab3716e5 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -364,7 +364,7 @@ static int aircable_attach(struct usb_serial *serial)
364 return 0; 364 return 0;
365} 365}
366 366
367static void aircable_shutdown(struct usb_serial *serial) 367static void aircable_release(struct usb_serial *serial)
368{ 368{
369 369
370 struct usb_serial_port *port = serial->port[0]; 370 struct usb_serial_port *port = serial->port[0];
@@ -375,7 +375,6 @@ static void aircable_shutdown(struct usb_serial *serial)
375 if (priv) { 375 if (priv) {
376 serial_buf_free(priv->tx_buf); 376 serial_buf_free(priv->tx_buf);
377 serial_buf_free(priv->rx_buf); 377 serial_buf_free(priv->rx_buf);
378 usb_set_serial_port_data(port, NULL);
379 kfree(priv); 378 kfree(priv);
380 } 379 }
381} 380}
@@ -601,7 +600,7 @@ static struct usb_serial_driver aircable_device = {
601 .num_ports = 1, 600 .num_ports = 1,
602 .attach = aircable_attach, 601 .attach = aircable_attach,
603 .probe = aircable_probe, 602 .probe = aircable_probe,
604 .shutdown = aircable_shutdown, 603 .release = aircable_release,
605 .write = aircable_write, 604 .write = aircable_write,
606 .write_room = aircable_write_room, 605 .write_room = aircable_write_room,
607 .write_bulk_callback = aircable_write_bulk_callback, 606 .write_bulk_callback = aircable_write_bulk_callback,
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 2bfd6dd85b5a..7033b031b443 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -90,7 +90,7 @@ static int debug;
90 90
91/* function prototypes for a Belkin USB Serial Adapter F5U103 */ 91/* function prototypes for a Belkin USB Serial Adapter F5U103 */
92static int belkin_sa_startup(struct usb_serial *serial); 92static int belkin_sa_startup(struct usb_serial *serial);
93static void belkin_sa_shutdown(struct usb_serial *serial); 93static void belkin_sa_release(struct usb_serial *serial);
94static int belkin_sa_open(struct tty_struct *tty, 94static int belkin_sa_open(struct tty_struct *tty,
95 struct usb_serial_port *port, struct file *filp); 95 struct usb_serial_port *port, struct file *filp);
96static void belkin_sa_close(struct usb_serial_port *port); 96static void belkin_sa_close(struct usb_serial_port *port);
@@ -142,7 +142,7 @@ static struct usb_serial_driver belkin_device = {
142 .tiocmget = belkin_sa_tiocmget, 142 .tiocmget = belkin_sa_tiocmget,
143 .tiocmset = belkin_sa_tiocmset, 143 .tiocmset = belkin_sa_tiocmset,
144 .attach = belkin_sa_startup, 144 .attach = belkin_sa_startup,
145 .shutdown = belkin_sa_shutdown, 145 .release = belkin_sa_release,
146}; 146};
147 147
148 148
@@ -197,14 +197,13 @@ static int belkin_sa_startup(struct usb_serial *serial)
197} 197}
198 198
199 199
200static void belkin_sa_shutdown(struct usb_serial *serial) 200static void belkin_sa_release(struct usb_serial *serial)
201{ 201{
202 struct belkin_sa_private *priv; 202 struct belkin_sa_private *priv;
203 int i; 203 int i;
204 204
205 dbg("%s", __func__); 205 dbg("%s", __func__);
206 206
207 /* stop reads and writes on all ports */
208 for (i = 0; i < serial->num_ports; ++i) { 207 for (i = 0; i < serial->num_ports; ++i) {
209 /* My special items, the standard routines free my urbs */ 208 /* My special items, the standard routines free my urbs */
210 priv = usb_get_serial_port_data(serial->port[i]); 209 priv = usb_get_serial_port_data(serial->port[i]);
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 83bbb5bca2ef..ba555c528cc6 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -59,23 +59,22 @@ static int usb_serial_device_probe(struct device *dev)
59 retval = -ENODEV; 59 retval = -ENODEV;
60 goto exit; 60 goto exit;
61 } 61 }
62 if (port->dev_state != PORT_REGISTERING)
63 goto exit;
62 64
63 driver = port->serial->type; 65 driver = port->serial->type;
64 if (driver->port_probe) { 66 if (driver->port_probe) {
65 if (!try_module_get(driver->driver.owner)) {
66 dev_err(dev, "module get failed, exiting\n");
67 retval = -EIO;
68 goto exit;
69 }
70 retval = driver->port_probe(port); 67 retval = driver->port_probe(port);
71 module_put(driver->driver.owner);
72 if (retval) 68 if (retval)
73 goto exit; 69 goto exit;
74 } 70 }
75 71
76 retval = device_create_file(dev, &dev_attr_port_number); 72 retval = device_create_file(dev, &dev_attr_port_number);
77 if (retval) 73 if (retval) {
74 if (driver->port_remove)
75 retval = driver->port_remove(port);
78 goto exit; 76 goto exit;
77 }
79 78
80 minor = port->number; 79 minor = port->number;
81 tty_register_device(usb_serial_tty_driver, minor, dev); 80 tty_register_device(usb_serial_tty_driver, minor, dev);
@@ -98,19 +97,15 @@ static int usb_serial_device_remove(struct device *dev)
98 if (!port) 97 if (!port)
99 return -ENODEV; 98 return -ENODEV;
100 99
100 if (port->dev_state != PORT_UNREGISTERING)
101 return retval;
102
101 device_remove_file(&port->dev, &dev_attr_port_number); 103 device_remove_file(&port->dev, &dev_attr_port_number);
102 104
103 driver = port->serial->type; 105 driver = port->serial->type;
104 if (driver->port_remove) { 106 if (driver->port_remove)
105 if (!try_module_get(driver->driver.owner)) {
106 dev_err(dev, "module get failed, exiting\n");
107 retval = -EIO;
108 goto exit;
109 }
110 retval = driver->port_remove(port); 107 retval = driver->port_remove(port);
111 module_put(driver->driver.owner); 108
112 }
113exit:
114 minor = port->number; 109 minor = port->number;
115 tty_unregister_device(usb_serial_tty_driver, minor); 110 tty_unregister_device(usb_serial_tty_driver, minor);
116 dev_info(dev, "%s converter now disconnected from ttyUSB%d\n", 111 dev_info(dev, "%s converter now disconnected from ttyUSB%d\n",
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 16a154d3b2fe..2b9eeda62bfe 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -50,7 +50,7 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *,
50 unsigned int, unsigned int); 50 unsigned int, unsigned int);
51static void cp210x_break_ctl(struct tty_struct *, int); 51static void cp210x_break_ctl(struct tty_struct *, int);
52static int cp210x_startup(struct usb_serial *); 52static int cp210x_startup(struct usb_serial *);
53static void cp210x_shutdown(struct usb_serial *); 53static void cp210x_disconnect(struct usb_serial *);
54 54
55static int debug; 55static int debug;
56 56
@@ -137,7 +137,7 @@ static struct usb_serial_driver cp210x_device = {
137 .tiocmget = cp210x_tiocmget, 137 .tiocmget = cp210x_tiocmget,
138 .tiocmset = cp210x_tiocmset, 138 .tiocmset = cp210x_tiocmset,
139 .attach = cp210x_startup, 139 .attach = cp210x_startup,
140 .shutdown = cp210x_shutdown, 140 .disconnect = cp210x_disconnect,
141}; 141};
142 142
143/* Config request types */ 143/* Config request types */
@@ -792,7 +792,7 @@ static int cp210x_startup(struct usb_serial *serial)
792 return 0; 792 return 0;
793} 793}
794 794
795static void cp210x_shutdown(struct usb_serial *serial) 795static void cp210x_disconnect(struct usb_serial *serial)
796{ 796{
797 int i; 797 int i;
798 798
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 933ba913e66c..336523fd7366 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -58,7 +58,8 @@ static int debug;
58 58
59/* Function prototypes */ 59/* Function prototypes */
60static int cyberjack_startup(struct usb_serial *serial); 60static int cyberjack_startup(struct usb_serial *serial);
61static void cyberjack_shutdown(struct usb_serial *serial); 61static void cyberjack_disconnect(struct usb_serial *serial);
62static void cyberjack_release(struct usb_serial *serial);
62static int cyberjack_open(struct tty_struct *tty, 63static int cyberjack_open(struct tty_struct *tty,
63 struct usb_serial_port *port, struct file *filp); 64 struct usb_serial_port *port, struct file *filp);
64static void cyberjack_close(struct usb_serial_port *port); 65static void cyberjack_close(struct usb_serial_port *port);
@@ -94,7 +95,8 @@ static struct usb_serial_driver cyberjack_device = {
94 .id_table = id_table, 95 .id_table = id_table,
95 .num_ports = 1, 96 .num_ports = 1,
96 .attach = cyberjack_startup, 97 .attach = cyberjack_startup,
97 .shutdown = cyberjack_shutdown, 98 .disconnect = cyberjack_disconnect,
99 .release = cyberjack_release,
98 .open = cyberjack_open, 100 .open = cyberjack_open,
99 .close = cyberjack_close, 101 .close = cyberjack_close,
100 .write = cyberjack_write, 102 .write = cyberjack_write,
@@ -148,17 +150,25 @@ static int cyberjack_startup(struct usb_serial *serial)
148 return 0; 150 return 0;
149} 151}
150 152
151static void cyberjack_shutdown(struct usb_serial *serial) 153static void cyberjack_disconnect(struct usb_serial *serial)
152{ 154{
153 int i; 155 int i;
154 156
155 dbg("%s", __func__); 157 dbg("%s", __func__);
156 158
157 for (i = 0; i < serial->num_ports; ++i) { 159 for (i = 0; i < serial->num_ports; ++i)
158 usb_kill_urb(serial->port[i]->interrupt_in_urb); 160 usb_kill_urb(serial->port[i]->interrupt_in_urb);
161}
162
163static void cyberjack_release(struct usb_serial *serial)
164{
165 int i;
166
167 dbg("%s", __func__);
168
169 for (i = 0; i < serial->num_ports; ++i) {
159 /* My special items, the standard routines free my urbs */ 170 /* My special items, the standard routines free my urbs */
160 kfree(usb_get_serial_port_data(serial->port[i])); 171 kfree(usb_get_serial_port_data(serial->port[i]));
161 usb_set_serial_port_data(serial->port[i], NULL);
162 } 172 }
163} 173}
164 174
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 669f93848539..9734085fd2fe 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -171,7 +171,7 @@ struct cypress_buf {
171static int cypress_earthmate_startup(struct usb_serial *serial); 171static int cypress_earthmate_startup(struct usb_serial *serial);
172static int cypress_hidcom_startup(struct usb_serial *serial); 172static int cypress_hidcom_startup(struct usb_serial *serial);
173static int cypress_ca42v2_startup(struct usb_serial *serial); 173static int cypress_ca42v2_startup(struct usb_serial *serial);
174static void cypress_shutdown(struct usb_serial *serial); 174static void cypress_release(struct usb_serial *serial);
175static int cypress_open(struct tty_struct *tty, 175static int cypress_open(struct tty_struct *tty,
176 struct usb_serial_port *port, struct file *filp); 176 struct usb_serial_port *port, struct file *filp);
177static void cypress_close(struct usb_serial_port *port); 177static void cypress_close(struct usb_serial_port *port);
@@ -215,7 +215,7 @@ static struct usb_serial_driver cypress_earthmate_device = {
215 .id_table = id_table_earthmate, 215 .id_table = id_table_earthmate,
216 .num_ports = 1, 216 .num_ports = 1,
217 .attach = cypress_earthmate_startup, 217 .attach = cypress_earthmate_startup,
218 .shutdown = cypress_shutdown, 218 .release = cypress_release,
219 .open = cypress_open, 219 .open = cypress_open,
220 .close = cypress_close, 220 .close = cypress_close,
221 .dtr_rts = cypress_dtr_rts, 221 .dtr_rts = cypress_dtr_rts,
@@ -242,7 +242,7 @@ static struct usb_serial_driver cypress_hidcom_device = {
242 .id_table = id_table_cyphidcomrs232, 242 .id_table = id_table_cyphidcomrs232,
243 .num_ports = 1, 243 .num_ports = 1,
244 .attach = cypress_hidcom_startup, 244 .attach = cypress_hidcom_startup,
245 .shutdown = cypress_shutdown, 245 .release = cypress_release,
246 .open = cypress_open, 246 .open = cypress_open,
247 .close = cypress_close, 247 .close = cypress_close,
248 .dtr_rts = cypress_dtr_rts, 248 .dtr_rts = cypress_dtr_rts,
@@ -269,7 +269,7 @@ static struct usb_serial_driver cypress_ca42v2_device = {
269 .id_table = id_table_nokiaca42v2, 269 .id_table = id_table_nokiaca42v2,
270 .num_ports = 1, 270 .num_ports = 1,
271 .attach = cypress_ca42v2_startup, 271 .attach = cypress_ca42v2_startup,
272 .shutdown = cypress_shutdown, 272 .release = cypress_release,
273 .open = cypress_open, 273 .open = cypress_open,
274 .close = cypress_close, 274 .close = cypress_close,
275 .dtr_rts = cypress_dtr_rts, 275 .dtr_rts = cypress_dtr_rts,
@@ -616,7 +616,7 @@ static int cypress_ca42v2_startup(struct usb_serial *serial)
616} /* cypress_ca42v2_startup */ 616} /* cypress_ca42v2_startup */
617 617
618 618
619static void cypress_shutdown(struct usb_serial *serial) 619static void cypress_release(struct usb_serial *serial)
620{ 620{
621 struct cypress_private *priv; 621 struct cypress_private *priv;
622 622
@@ -629,7 +629,6 @@ static void cypress_shutdown(struct usb_serial *serial)
629 if (priv) { 629 if (priv) {
630 cypress_buf_free(priv->buf); 630 cypress_buf_free(priv->buf);
631 kfree(priv); 631 kfree(priv);
632 usb_set_serial_port_data(serial->port[0], NULL);
633 } 632 }
634} 633}
635 634
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 30f5140eff03..f4808091c47c 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -460,7 +460,8 @@ static int digi_carrier_raised(struct usb_serial_port *port);
460static void digi_dtr_rts(struct usb_serial_port *port, int on); 460static void digi_dtr_rts(struct usb_serial_port *port, int on);
461static int digi_startup_device(struct usb_serial *serial); 461static int digi_startup_device(struct usb_serial *serial);
462static int digi_startup(struct usb_serial *serial); 462static int digi_startup(struct usb_serial *serial);
463static void digi_shutdown(struct usb_serial *serial); 463static void digi_disconnect(struct usb_serial *serial);
464static void digi_release(struct usb_serial *serial);
464static void digi_read_bulk_callback(struct urb *urb); 465static void digi_read_bulk_callback(struct urb *urb);
465static int digi_read_inb_callback(struct urb *urb); 466static int digi_read_inb_callback(struct urb *urb);
466static int digi_read_oob_callback(struct urb *urb); 467static int digi_read_oob_callback(struct urb *urb);
@@ -524,7 +525,8 @@ static struct usb_serial_driver digi_acceleport_2_device = {
524 .tiocmget = digi_tiocmget, 525 .tiocmget = digi_tiocmget,
525 .tiocmset = digi_tiocmset, 526 .tiocmset = digi_tiocmset,
526 .attach = digi_startup, 527 .attach = digi_startup,
527 .shutdown = digi_shutdown, 528 .disconnect = digi_disconnect,
529 .release = digi_release,
528}; 530};
529 531
530static struct usb_serial_driver digi_acceleport_4_device = { 532static struct usb_serial_driver digi_acceleport_4_device = {
@@ -550,7 +552,8 @@ static struct usb_serial_driver digi_acceleport_4_device = {
550 .tiocmget = digi_tiocmget, 552 .tiocmget = digi_tiocmget,
551 .tiocmset = digi_tiocmset, 553 .tiocmset = digi_tiocmset,
552 .attach = digi_startup, 554 .attach = digi_startup,
553 .shutdown = digi_shutdown, 555 .disconnect = digi_disconnect,
556 .release = digi_release,
554}; 557};
555 558
556 559
@@ -1556,16 +1559,23 @@ static int digi_startup(struct usb_serial *serial)
1556} 1559}
1557 1560
1558 1561
1559static void digi_shutdown(struct usb_serial *serial) 1562static void digi_disconnect(struct usb_serial *serial)
1560{ 1563{
1561 int i; 1564 int i;
1562 dbg("digi_shutdown: TOP, in_interrupt()=%ld", in_interrupt()); 1565 dbg("digi_disconnect: TOP, in_interrupt()=%ld", in_interrupt());
1563 1566
1564 /* stop reads and writes on all ports */ 1567 /* stop reads and writes on all ports */
1565 for (i = 0; i < serial->type->num_ports + 1; i++) { 1568 for (i = 0; i < serial->type->num_ports + 1; i++) {
1566 usb_kill_urb(serial->port[i]->read_urb); 1569 usb_kill_urb(serial->port[i]->read_urb);
1567 usb_kill_urb(serial->port[i]->write_urb); 1570 usb_kill_urb(serial->port[i]->write_urb);
1568 } 1571 }
1572}
1573
1574
1575static void digi_release(struct usb_serial *serial)
1576{
1577 int i;
1578 dbg("digi_release: TOP, in_interrupt()=%ld", in_interrupt());
1569 1579
1570 /* free the private data structures for all ports */ 1580 /* free the private data structures for all ports */
1571 /* number of regular ports + 1 for the out-of-band port */ 1581 /* number of regular ports + 1 for the out-of-band port */
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 2b141ccb0cd9..80cb3471adbe 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -90,7 +90,6 @@ static int empeg_chars_in_buffer(struct tty_struct *tty);
90static void empeg_throttle(struct tty_struct *tty); 90static void empeg_throttle(struct tty_struct *tty);
91static void empeg_unthrottle(struct tty_struct *tty); 91static void empeg_unthrottle(struct tty_struct *tty);
92static int empeg_startup(struct usb_serial *serial); 92static int empeg_startup(struct usb_serial *serial);
93static void empeg_shutdown(struct usb_serial *serial);
94static void empeg_set_termios(struct tty_struct *tty, 93static void empeg_set_termios(struct tty_struct *tty,
95 struct usb_serial_port *port, struct ktermios *old_termios); 94 struct usb_serial_port *port, struct ktermios *old_termios);
96static void empeg_write_bulk_callback(struct urb *urb); 95static void empeg_write_bulk_callback(struct urb *urb);
@@ -124,7 +123,6 @@ static struct usb_serial_driver empeg_device = {
124 .throttle = empeg_throttle, 123 .throttle = empeg_throttle,
125 .unthrottle = empeg_unthrottle, 124 .unthrottle = empeg_unthrottle,
126 .attach = empeg_startup, 125 .attach = empeg_startup,
127 .shutdown = empeg_shutdown,
128 .set_termios = empeg_set_termios, 126 .set_termios = empeg_set_termios,
129 .write = empeg_write, 127 .write = empeg_write,
130 .write_room = empeg_write_room, 128 .write_room = empeg_write_room,
@@ -427,12 +425,6 @@ static int empeg_startup(struct usb_serial *serial)
427} 425}
428 426
429 427
430static void empeg_shutdown(struct usb_serial *serial)
431{
432 dbg("%s", __func__);
433}
434
435
436static void empeg_set_termios(struct tty_struct *tty, 428static void empeg_set_termios(struct tty_struct *tty,
437 struct usb_serial_port *port, struct ktermios *old_termios) 429 struct usb_serial_port *port, struct ktermios *old_termios)
438{ 430{
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 683304d60615..3dc3768ca71c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -47,7 +47,7 @@
47/* 47/*
48 * Version Information 48 * Version Information
49 */ 49 */
50#define DRIVER_VERSION "v1.4.3" 50#define DRIVER_VERSION "v1.5.0"
51#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>" 51#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>"
52#define DRIVER_DESC "USB FTDI Serial Converters Driver" 52#define DRIVER_DESC "USB FTDI Serial Converters Driver"
53 53
@@ -82,7 +82,8 @@ struct ftdi_private {
82 int rx_processed; 82 int rx_processed;
83 unsigned long rx_bytes; 83 unsigned long rx_bytes;
84 84
85 __u16 interface; /* FT2232C port interface (0 for FT232/245) */ 85 __u16 interface; /* FT2232C, FT2232H or FT4232H port interface
86 (0 for FT232/245) */
86 87
87 speed_t force_baud; /* if non-zero, force the baud rate to 88 speed_t force_baud; /* if non-zero, force the baud rate to
88 this value */ 89 this value */
@@ -94,6 +95,7 @@ struct ftdi_private {
94 unsigned long tx_bytes; 95 unsigned long tx_bytes;
95 unsigned long tx_outstanding_bytes; 96 unsigned long tx_outstanding_bytes;
96 unsigned long tx_outstanding_urbs; 97 unsigned long tx_outstanding_urbs;
98 unsigned short max_packet_size;
97}; 99};
98 100
99/* struct ftdi_sio_quirk is used by devices requiring special attention. */ 101/* struct ftdi_sio_quirk is used by devices requiring special attention. */
@@ -164,6 +166,7 @@ static struct usb_device_id id_table_combined [] = {
164 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) }, 166 { USB_DEVICE(FTDI_VID, FTDI_8U232AM_ALT_PID) },
165 { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) }, 167 { USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
166 { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) }, 168 { USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) },
169 { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
167 { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, 170 { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
168 { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) }, 171 { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
169 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) }, 172 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
@@ -673,6 +676,7 @@ static struct usb_device_id id_table_combined [] = {
673 { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, 676 { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
674 { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), 677 { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
675 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 678 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
679 { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
676 { }, /* Optional parameter entry */ 680 { }, /* Optional parameter entry */
677 { } /* Terminating entry */ 681 { } /* Terminating entry */
678}; 682};
@@ -693,12 +697,13 @@ static const char *ftdi_chip_name[] = {
693 [FT232BM] = "FT232BM", 697 [FT232BM] = "FT232BM",
694 [FT2232C] = "FT2232C", 698 [FT2232C] = "FT2232C",
695 [FT232RL] = "FT232RL", 699 [FT232RL] = "FT232RL",
700 [FT2232H] = "FT2232H",
701 [FT4232H] = "FT4232H"
696}; 702};
697 703
698 704
699/* Constants for read urb and write urb */ 705/* Constants for read urb and write urb */
700#define BUFSZ 512 706#define BUFSZ 512
701#define PKTSZ 64
702 707
703/* rx_flags */ 708/* rx_flags */
704#define THROTTLED 0x01 709#define THROTTLED 0x01
@@ -715,7 +720,6 @@ static const char *ftdi_chip_name[] = {
715/* function prototypes for a FTDI serial converter */ 720/* function prototypes for a FTDI serial converter */
716static int ftdi_sio_probe(struct usb_serial *serial, 721static int ftdi_sio_probe(struct usb_serial *serial,
717 const struct usb_device_id *id); 722 const struct usb_device_id *id);
718static void ftdi_shutdown(struct usb_serial *serial);
719static int ftdi_sio_port_probe(struct usb_serial_port *port); 723static int ftdi_sio_port_probe(struct usb_serial_port *port);
720static int ftdi_sio_port_remove(struct usb_serial_port *port); 724static int ftdi_sio_port_remove(struct usb_serial_port *port);
721static int ftdi_open(struct tty_struct *tty, 725static int ftdi_open(struct tty_struct *tty,
@@ -744,6 +748,8 @@ static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base);
744static unsigned short int ftdi_232am_baud_to_divisor(int baud); 748static unsigned short int ftdi_232am_baud_to_divisor(int baud);
745static __u32 ftdi_232bm_baud_base_to_divisor(int baud, int base); 749static __u32 ftdi_232bm_baud_base_to_divisor(int baud, int base);
746static __u32 ftdi_232bm_baud_to_divisor(int baud); 750static __u32 ftdi_232bm_baud_to_divisor(int baud);
751static __u32 ftdi_2232h_baud_base_to_divisor(int baud, int base);
752static __u32 ftdi_2232h_baud_to_divisor(int baud);
747 753
748static struct usb_serial_driver ftdi_sio_device = { 754static struct usb_serial_driver ftdi_sio_device = {
749 .driver = { 755 .driver = {
@@ -772,7 +778,6 @@ static struct usb_serial_driver ftdi_sio_device = {
772 .ioctl = ftdi_ioctl, 778 .ioctl = ftdi_ioctl,
773 .set_termios = ftdi_set_termios, 779 .set_termios = ftdi_set_termios,
774 .break_ctl = ftdi_break_ctl, 780 .break_ctl = ftdi_break_ctl,
775 .shutdown = ftdi_shutdown,
776}; 781};
777 782
778 783
@@ -838,6 +843,36 @@ static __u32 ftdi_232bm_baud_to_divisor(int baud)
838 return ftdi_232bm_baud_base_to_divisor(baud, 48000000); 843 return ftdi_232bm_baud_base_to_divisor(baud, 48000000);
839} 844}
840 845
846static __u32 ftdi_2232h_baud_base_to_divisor(int baud, int base)
847{
848 static const unsigned char divfrac[8] = { 0, 3, 2, 4, 1, 5, 6, 7 };
849 __u32 divisor;
850 int divisor3;
851
852 /* hi-speed baud rate is 10-bit sampling instead of 16-bit */
853 divisor3 = (base / 10 / baud) * 8;
854
855 divisor = divisor3 >> 3;
856 divisor |= (__u32)divfrac[divisor3 & 0x7] << 14;
857 /* Deal with special cases for highest baud rates. */
858 if (divisor == 1)
859 divisor = 0;
860 else if (divisor == 0x4001)
861 divisor = 1;
862 /*
863 * Set this bit to turn off a divide by 2.5 on baud rate generator
864 * This enables baud rates up to 12Mbaud but cannot reach below 1200
865 * baud with this bit set
866 */
867 divisor |= 0x00020000;
868 return divisor;
869}
870
871static __u32 ftdi_2232h_baud_to_divisor(int baud)
872{
873 return ftdi_2232h_baud_base_to_divisor(baud, 120000000);
874}
875
841#define set_mctrl(port, set) update_mctrl((port), (set), 0) 876#define set_mctrl(port, set) update_mctrl((port), (set), 0)
842#define clear_mctrl(port, clear) update_mctrl((port), 0, (clear)) 877#define clear_mctrl(port, clear) update_mctrl((port), 0, (clear))
843 878
@@ -996,6 +1031,19 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
996 baud = 9600; 1031 baud = 9600;
997 } 1032 }
998 break; 1033 break;
1034 case FT2232H: /* FT2232H chip */
1035 case FT4232H: /* FT4232H chip */
1036 if ((baud <= 12000000) & (baud >= 1200)) {
1037 div_value = ftdi_2232h_baud_to_divisor(baud);
1038 } else if (baud < 1200) {
1039 div_value = ftdi_232bm_baud_to_divisor(baud);
1040 } else {
1041 dbg("%s - Baud rate too high!", __func__);
1042 div_value = ftdi_232bm_baud_to_divisor(9600);
1043 div_okay = 0;
1044 baud = 9600;
1045 }
1046 break;
999 } /* priv->chip_type */ 1047 } /* priv->chip_type */
1000 1048
1001 if (div_okay) { 1049 if (div_okay) {
@@ -1196,14 +1244,29 @@ static void ftdi_determine_type(struct usb_serial_port *port)
1196 if (interfaces > 1) { 1244 if (interfaces > 1) {
1197 int inter; 1245 int inter;
1198 1246
1199 /* Multiple interfaces. Assume FT2232C. */ 1247 /* Multiple interfaces.*/
1200 priv->chip_type = FT2232C; 1248 if (version == 0x0800) {
1249 priv->chip_type = FT4232H;
1250 /* Hi-speed - baud clock runs at 120MHz */
1251 priv->baud_base = 120000000 / 2;
1252 } else if (version == 0x0700) {
1253 priv->chip_type = FT2232H;
1254 /* Hi-speed - baud clock runs at 120MHz */
1255 priv->baud_base = 120000000 / 2;
1256 } else
1257 priv->chip_type = FT2232C;
1258
1201 /* Determine interface code. */ 1259 /* Determine interface code. */
1202 inter = serial->interface->altsetting->desc.bInterfaceNumber; 1260 inter = serial->interface->altsetting->desc.bInterfaceNumber;
1203 if (inter == 0) 1261 if (inter == 0) {
1204 priv->interface = PIT_SIOA; 1262 priv->interface = INTERFACE_A;
1205 else 1263 } else if (inter == 1) {
1206 priv->interface = PIT_SIOB; 1264 priv->interface = INTERFACE_B;
1265 } else if (inter == 2) {
1266 priv->interface = INTERFACE_C;
1267 } else if (inter == 3) {
1268 priv->interface = INTERFACE_D;
1269 }
1207 /* BM-type devices have a bug where bcdDevice gets set 1270 /* BM-type devices have a bug where bcdDevice gets set
1208 * to 0x200 when iSerialNumber is 0. */ 1271 * to 0x200 when iSerialNumber is 0. */
1209 if (version < 0x500) { 1272 if (version < 0x500) {
@@ -1231,6 +1294,45 @@ static void ftdi_determine_type(struct usb_serial_port *port)
1231} 1294}
1232 1295
1233 1296
1297/* Determine the maximum packet size for the device. This depends on the chip
1298 * type and the USB host capabilities. The value should be obtained from the
1299 * device descriptor as the chip will use the appropriate values for the host.*/
1300static void ftdi_set_max_packet_size(struct usb_serial_port *port)
1301{
1302 struct ftdi_private *priv = usb_get_serial_port_data(port);
1303 struct usb_serial *serial = port->serial;
1304 struct usb_device *udev = serial->dev;
1305
1306 struct usb_interface *interface = serial->interface;
1307 struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
1308
1309 unsigned num_endpoints;
1310 int i = 0;
1311
1312 num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
1313 dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
1314
1315 /* NOTE: some customers have programmed FT232R/FT245R devices
1316 * with an endpoint size of 0 - not good. In this case, we
1317 * want to override the endpoint descriptor setting and use a
1318 * value of 64 for wMaxPacketSize */
1319 for (i = 0; i < num_endpoints; i++) {
1320 dev_info(&udev->dev, "Endpoint %d MaxPacketSize %d\n", i+1,
1321 interface->cur_altsetting->endpoint[i].desc.wMaxPacketSize);
1322 ep_desc = &interface->cur_altsetting->endpoint[i].desc;
1323 if (ep_desc->wMaxPacketSize == 0) {
1324 ep_desc->wMaxPacketSize = cpu_to_le16(0x40);
1325 dev_info(&udev->dev, "Overriding wMaxPacketSize on endpoint %d\n", i);
1326 }
1327 }
1328
1329 /* set max packet size based on descriptor */
1330 priv->max_packet_size = ep_desc->wMaxPacketSize;
1331
1332 dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
1333}
1334
1335
1234/* 1336/*
1235 * *************************************************************************** 1337 * ***************************************************************************
1236 * Sysfs Attribute 1338 * Sysfs Attribute
@@ -1314,7 +1416,9 @@ static int create_sysfs_attrs(struct usb_serial_port *port)
1314 if ((!retval) && 1416 if ((!retval) &&
1315 (priv->chip_type == FT232BM || 1417 (priv->chip_type == FT232BM ||
1316 priv->chip_type == FT2232C || 1418 priv->chip_type == FT2232C ||
1317 priv->chip_type == FT232RL)) { 1419 priv->chip_type == FT232RL ||
1420 priv->chip_type == FT2232H ||
1421 priv->chip_type == FT4232H)) {
1318 retval = device_create_file(&port->dev, 1422 retval = device_create_file(&port->dev,
1319 &dev_attr_latency_timer); 1423 &dev_attr_latency_timer);
1320 } 1424 }
@@ -1333,7 +1437,9 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
1333 device_remove_file(&port->dev, &dev_attr_event_char); 1437 device_remove_file(&port->dev, &dev_attr_event_char);
1334 if (priv->chip_type == FT232BM || 1438 if (priv->chip_type == FT232BM ||
1335 priv->chip_type == FT2232C || 1439 priv->chip_type == FT2232C ||
1336 priv->chip_type == FT232RL) { 1440 priv->chip_type == FT232RL ||
1441 priv->chip_type == FT2232H ||
1442 priv->chip_type == FT4232H) {
1337 device_remove_file(&port->dev, &dev_attr_latency_timer); 1443 device_remove_file(&port->dev, &dev_attr_latency_timer);
1338 } 1444 }
1339 } 1445 }
@@ -1416,6 +1522,7 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
1416 usb_set_serial_port_data(port, priv); 1522 usb_set_serial_port_data(port, priv);
1417 1523
1418 ftdi_determine_type(port); 1524 ftdi_determine_type(port);
1525 ftdi_set_max_packet_size(port);
1419 read_latency_timer(port); 1526 read_latency_timer(port);
1420 create_sysfs_attrs(port); 1527 create_sysfs_attrs(port);
1421 return 0; 1528 return 0;
@@ -1485,18 +1592,6 @@ static int ftdi_mtxorb_hack_setup(struct usb_serial *serial)
1485 return 0; 1592 return 0;
1486} 1593}
1487 1594
1488/* ftdi_shutdown is called from usbserial:usb_serial_disconnect
1489 * it is called when the usb device is disconnected
1490 *
1491 * usbserial:usb_serial_disconnect
1492 * calls __serial_close for each open of the port
1493 * shutdown is called then (ie ftdi_shutdown)
1494 */
1495static void ftdi_shutdown(struct usb_serial *serial)
1496{
1497 dbg("%s", __func__);
1498}
1499
1500static void ftdi_sio_priv_release(struct kref *k) 1595static void ftdi_sio_priv_release(struct kref *k)
1501{ 1596{
1502 struct ftdi_private *priv = container_of(k, struct ftdi_private, kref); 1597 struct ftdi_private *priv = container_of(k, struct ftdi_private, kref);
@@ -1671,8 +1766,8 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
1671 if (data_offset > 0) { 1766 if (data_offset > 0) {
1672 /* Original sio needs control bytes too... */ 1767 /* Original sio needs control bytes too... */
1673 transfer_size += (data_offset * 1768 transfer_size += (data_offset *
1674 ((count + (PKTSZ - 1 - data_offset)) / 1769 ((count + (priv->max_packet_size - 1 - data_offset)) /
1675 (PKTSZ - data_offset))); 1770 (priv->max_packet_size - data_offset)));
1676 } 1771 }
1677 1772
1678 buffer = kmalloc(transfer_size, GFP_ATOMIC); 1773 buffer = kmalloc(transfer_size, GFP_ATOMIC);
@@ -1694,7 +1789,7 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
1694 if (data_offset > 0) { 1789 if (data_offset > 0) {
1695 /* Original sio requires control byte at start of 1790 /* Original sio requires control byte at start of
1696 each packet. */ 1791 each packet. */
1697 int user_pktsz = PKTSZ - data_offset; 1792 int user_pktsz = priv->max_packet_size - data_offset;
1698 int todo = count; 1793 int todo = count;
1699 unsigned char *first_byte = buffer; 1794 unsigned char *first_byte = buffer;
1700 const unsigned char *current_position = buf; 1795 const unsigned char *current_position = buf;
@@ -1775,11 +1870,6 @@ static void ftdi_write_bulk_callback(struct urb *urb)
1775 1870
1776 dbg("%s - port %d", __func__, port->number); 1871 dbg("%s - port %d", __func__, port->number);
1777 1872
1778 if (status) {
1779 dbg("nonzero write bulk status received: %d", status);
1780 return;
1781 }
1782
1783 priv = usb_get_serial_port_data(port); 1873 priv = usb_get_serial_port_data(port);
1784 if (!priv) { 1874 if (!priv) {
1785 dbg("%s - bad port private data pointer - exiting", __func__); 1875 dbg("%s - bad port private data pointer - exiting", __func__);
@@ -1790,13 +1880,18 @@ static void ftdi_write_bulk_callback(struct urb *urb)
1790 data_offset = priv->write_offset; 1880 data_offset = priv->write_offset;
1791 if (data_offset > 0) { 1881 if (data_offset > 0) {
1792 /* Subtract the control bytes */ 1882 /* Subtract the control bytes */
1793 countback -= (data_offset * DIV_ROUND_UP(countback, PKTSZ)); 1883 countback -= (data_offset * DIV_ROUND_UP(countback, priv->max_packet_size));
1794 } 1884 }
1795 spin_lock_irqsave(&priv->tx_lock, flags); 1885 spin_lock_irqsave(&priv->tx_lock, flags);
1796 --priv->tx_outstanding_urbs; 1886 --priv->tx_outstanding_urbs;
1797 priv->tx_outstanding_bytes -= countback; 1887 priv->tx_outstanding_bytes -= countback;
1798 spin_unlock_irqrestore(&priv->tx_lock, flags); 1888 spin_unlock_irqrestore(&priv->tx_lock, flags);
1799 1889
1890 if (status) {
1891 dbg("nonzero write bulk status received: %d", status);
1892 return;
1893 }
1894
1800 usb_serial_port_softint(port); 1895 usb_serial_port_softint(port);
1801} /* ftdi_write_bulk_callback */ 1896} /* ftdi_write_bulk_callback */
1802 1897
@@ -1892,7 +1987,7 @@ static void ftdi_read_bulk_callback(struct urb *urb)
1892 1987
1893 /* count data bytes, but not status bytes */ 1988 /* count data bytes, but not status bytes */
1894 countread = urb->actual_length; 1989 countread = urb->actual_length;
1895 countread -= 2 * DIV_ROUND_UP(countread, PKTSZ); 1990 countread -= 2 * DIV_ROUND_UP(countread, priv->max_packet_size);
1896 spin_lock_irqsave(&priv->rx_lock, flags); 1991 spin_lock_irqsave(&priv->rx_lock, flags);
1897 priv->rx_bytes += countread; 1992 priv->rx_bytes += countread;
1898 spin_unlock_irqrestore(&priv->rx_lock, flags); 1993 spin_unlock_irqrestore(&priv->rx_lock, flags);
@@ -1965,7 +2060,7 @@ static void ftdi_process_read(struct work_struct *work)
1965 2060
1966 need_flip = 0; 2061 need_flip = 0;
1967 for (packet_offset = priv->rx_processed; 2062 for (packet_offset = priv->rx_processed;
1968 packet_offset < urb->actual_length; packet_offset += PKTSZ) { 2063 packet_offset < urb->actual_length; packet_offset += priv->max_packet_size) {
1969 int length; 2064 int length;
1970 2065
1971 /* Compare new line status to the old one, signal if different/ 2066 /* Compare new line status to the old one, signal if different/
@@ -1980,7 +2075,7 @@ static void ftdi_process_read(struct work_struct *work)
1980 priv->prev_status = new_status; 2075 priv->prev_status = new_status;
1981 } 2076 }
1982 2077
1983 length = min_t(u32, PKTSZ, urb->actual_length-packet_offset)-2; 2078 length = min_t(u32, priv->max_packet_size, urb->actual_length-packet_offset)-2;
1984 if (length < 0) { 2079 if (length < 0) {
1985 dev_err(&port->dev, "%s - bad packet length: %d\n", 2080 dev_err(&port->dev, "%s - bad packet length: %d\n",
1986 __func__, length+2); 2081 __func__, length+2);
@@ -2011,6 +2106,7 @@ static void ftdi_process_read(struct work_struct *work)
2011 if (data[packet_offset+1] & FTDI_RS_BI) { 2106 if (data[packet_offset+1] & FTDI_RS_BI) {
2012 error_flag = TTY_BREAK; 2107 error_flag = TTY_BREAK;
2013 dbg("BREAK received"); 2108 dbg("BREAK received");
2109 usb_serial_handle_break(port);
2014 } 2110 }
2015 if (data[packet_offset+1] & FTDI_RS_PE) { 2111 if (data[packet_offset+1] & FTDI_RS_PE) {
2016 error_flag = TTY_PARITY; 2112 error_flag = TTY_PARITY;
@@ -2025,8 +2121,11 @@ static void ftdi_process_read(struct work_struct *work)
2025 /* Note that the error flag is duplicated for 2121 /* Note that the error flag is duplicated for
2026 every character received since we don't know 2122 every character received since we don't know
2027 which character it applied to */ 2123 which character it applied to */
2028 tty_insert_flip_char(tty, 2124 if (!usb_serial_handle_sysrq_char(port,
2029 data[packet_offset + i], error_flag); 2125 data[packet_offset + i]))
2126 tty_insert_flip_char(tty,
2127 data[packet_offset + i],
2128 error_flag);
2030 } 2129 }
2031 need_flip = 1; 2130 need_flip = 1;
2032 } 2131 }
@@ -2332,6 +2431,8 @@ static int ftdi_tiocmget(struct tty_struct *tty, struct file *file)
2332 case FT232BM: 2431 case FT232BM:
2333 case FT2232C: 2432 case FT2232C:
2334 case FT232RL: 2433 case FT232RL:
2434 case FT2232H:
2435 case FT4232H:
2335 /* the 8U232AM returns a two byte value (the sio is a 1 byte 2436 /* the 8U232AM returns a two byte value (the sio is a 1 byte
2336 value) - in the same format as the data returned from the in 2437 value) - in the same format as the data returned from the in
2337 point */ 2438 point */
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 12330fa1c095..f1d440a728a3 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -10,7 +10,7 @@
10 * The device is based on the FTDI FT8U100AX chip. It has a DB25 on one side, 10 * The device is based on the FTDI FT8U100AX chip. It has a DB25 on one side,
11 * USB on the other. 11 * USB on the other.
12 * 12 *
13 * Thanx to FTDI (http://www.ftdi.co.uk) for so kindly providing details 13 * Thanx to FTDI (http://www.ftdichip.com) for so kindly providing details
14 * of the protocol required to talk to the device and ongoing assistence 14 * of the protocol required to talk to the device and ongoing assistence
15 * during development. 15 * during development.
16 * 16 *
@@ -28,11 +28,15 @@
28#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */ 28#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
29#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */ 29#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
30#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */ 30#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
31#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
31#define FTDI_RELAIS_PID 0xFA10 /* Relais device from Rudolf Gugler */ 32#define FTDI_RELAIS_PID 0xFA10 /* Relais device from Rudolf Gugler */
32#define FTDI_NF_RIC_VID 0x0DCD /* Vendor Id */ 33#define FTDI_NF_RIC_VID 0x0DCD /* Vendor Id */
33#define FTDI_NF_RIC_PID 0x0001 /* Product Id */ 34#define FTDI_NF_RIC_PID 0x0001 /* Product Id */
34#define FTDI_USBX_707_PID 0xF857 /* ADSTech IR Blaster USBX-707 */ 35#define FTDI_USBX_707_PID 0xF857 /* ADSTech IR Blaster USBX-707 */
35 36
37/* Larsen and Brusgaard AltiTrack/USBtrack */
38#define LARSENBRUSGAARD_VID 0x0FD8
39#define LB_ALTITRACK_PID 0x0001
36 40
37/* www.canusb.com Lawicel CANUSB device */ 41/* www.canusb.com Lawicel CANUSB device */
38#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */ 42#define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
@@ -873,6 +877,11 @@
873#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */ 877#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */
874#define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */ 878#define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */
875 879
880/* Interface indicies for FT2232, FT2232H and FT4232H devices*/
881#define INTERFACE_A 1
882#define INTERFACE_B 2
883#define INTERFACE_C 3
884#define INTERFACE_D 4
876 885
877/* 886/*
878 * FIC / OpenMoko, Inc. http://wiki.openmoko.org/wiki/Neo1973_Debug_Board_v3 887 * FIC / OpenMoko, Inc. http://wiki.openmoko.org/wiki/Neo1973_Debug_Board_v3
@@ -1036,6 +1045,8 @@ typedef enum {
1036 FT232BM = 3, 1045 FT232BM = 3,
1037 FT2232C = 4, 1046 FT2232C = 4,
1038 FT232RL = 5, 1047 FT232RL = 5,
1048 FT2232H = 6,
1049 FT4232H = 7
1039} ftdi_chip_type_t; 1050} ftdi_chip_type_t;
1040 1051
1041typedef enum { 1052typedef enum {
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index ee25a3fe3b09..8839f1c70b7f 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Garmin GPS driver 2 * Garmin GPS driver
3 * 3 *
4 * Copyright (C) 2006,2007 Hermann Kneissel herkne@users.sourceforge.net 4 * Copyright (C) 2006-2009 Hermann Kneissel herkne@users.sourceforge.net
5 * 5 *
6 * The latest version of the driver can be found at 6 * The latest version of the driver can be found at
7 * http://sourceforge.net/projects/garmin-gps/ 7 * http://sourceforge.net/projects/garmin-gps/
@@ -51,7 +51,7 @@ static int debug;
51 */ 51 */
52 52
53#define VERSION_MAJOR 0 53#define VERSION_MAJOR 0
54#define VERSION_MINOR 31 54#define VERSION_MINOR 33
55 55
56#define _STR(s) #s 56#define _STR(s) #s
57#define _DRIVER_VERSION(a, b) "v" _STR(a) "." _STR(b) 57#define _DRIVER_VERSION(a, b) "v" _STR(a) "." _STR(b)
@@ -129,7 +129,6 @@ struct garmin_data {
129 __u8 state; 129 __u8 state;
130 __u16 flags; 130 __u16 flags;
131 __u8 mode; 131 __u8 mode;
132 __u8 ignorePkts;
133 __u8 count; 132 __u8 count;
134 __u8 pkt_id; 133 __u8 pkt_id;
135 __u32 serial_num; 134 __u32 serial_num;
@@ -141,8 +140,6 @@ struct garmin_data {
141 __u8 inbuffer [GPS_IN_BUFSIZ]; /* tty -> usb */ 140 __u8 inbuffer [GPS_IN_BUFSIZ]; /* tty -> usb */
142 __u8 outbuffer[GPS_OUT_BUFSIZ]; /* usb -> tty */ 141 __u8 outbuffer[GPS_OUT_BUFSIZ]; /* usb -> tty */
143 __u8 privpkt[4*6]; 142 __u8 privpkt[4*6];
144 atomic_t req_count;
145 atomic_t resp_count;
146 spinlock_t lock; 143 spinlock_t lock;
147 struct list_head pktlist; 144 struct list_head pktlist;
148}; 145};
@@ -170,6 +167,8 @@ struct garmin_data {
170#define FLAGS_BULK_IN_ACTIVE 0x0020 167#define FLAGS_BULK_IN_ACTIVE 0x0020
171#define FLAGS_BULK_IN_RESTART 0x0010 168#define FLAGS_BULK_IN_RESTART 0x0010
172#define FLAGS_THROTTLED 0x0008 169#define FLAGS_THROTTLED 0x0008
170#define APP_REQ_SEEN 0x0004
171#define APP_RESP_SEEN 0x0002
173#define CLEAR_HALT_REQUIRED 0x0001 172#define CLEAR_HALT_REQUIRED 0x0001
174 173
175#define FLAGS_QUEUING 0x0100 174#define FLAGS_QUEUING 0x0100
@@ -184,20 +183,16 @@ struct garmin_data {
184 183
185 184
186/* function prototypes */ 185/* function prototypes */
187static void gsp_next_packet(struct garmin_data *garmin_data_p); 186static int gsp_next_packet(struct garmin_data *garmin_data_p);
188static int garmin_write_bulk(struct usb_serial_port *port, 187static int garmin_write_bulk(struct usb_serial_port *port,
189 const unsigned char *buf, int count, 188 const unsigned char *buf, int count,
190 int dismiss_ack); 189 int dismiss_ack);
191 190
192/* some special packets to be send or received */ 191/* some special packets to be send or received */
193static unsigned char const GARMIN_START_SESSION_REQ[] 192static unsigned char const GARMIN_START_SESSION_REQ[]
194 = { 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0 }; 193 = { 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0 };
195static unsigned char const GARMIN_START_SESSION_REQ2[]
196 = { 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
197static unsigned char const GARMIN_START_SESSION_REPLY[] 194static unsigned char const GARMIN_START_SESSION_REPLY[]
198 = { 0, 0, 0, 0, 6, 0, 0, 0, 4, 0, 0, 0 }; 195 = { 0, 0, 0, 0, 6, 0, 0, 0, 4, 0, 0, 0 };
199static unsigned char const GARMIN_SESSION_ACTIVE_REPLY[]
200 = { 0, 0, 0, 0, 17, 0, 0, 0, 4, 0, 0, 0, 0, 16, 0, 0 };
201static unsigned char const GARMIN_BULK_IN_AVAIL_REPLY[] 196static unsigned char const GARMIN_BULK_IN_AVAIL_REPLY[]
202 = { 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 }; 197 = { 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 };
203static unsigned char const GARMIN_APP_LAYER_REPLY[] 198static unsigned char const GARMIN_APP_LAYER_REPLY[]
@@ -233,13 +228,6 @@ static struct usb_driver garmin_driver = {
233}; 228};
234 229
235 230
236static inline int noResponseFromAppLayer(struct garmin_data *garmin_data_p)
237{
238 return atomic_read(&garmin_data_p->req_count) ==
239 atomic_read(&garmin_data_p->resp_count);
240}
241
242
243static inline int getLayerId(const __u8 *usbPacket) 231static inline int getLayerId(const __u8 *usbPacket)
244{ 232{
245 return __le32_to_cpup((__le32 *)(usbPacket)); 233 return __le32_to_cpup((__le32 *)(usbPacket));
@@ -325,8 +313,11 @@ static int pkt_add(struct garmin_data *garmin_data_p,
325 state = garmin_data_p->state; 313 state = garmin_data_p->state;
326 spin_unlock_irqrestore(&garmin_data_p->lock, flags); 314 spin_unlock_irqrestore(&garmin_data_p->lock, flags);
327 315
316 dbg("%s - added: pkt: %d - %d bytes",
317 __func__, pkt->seq, data_length);
318
328 /* in serial mode, if someone is waiting for data from 319 /* in serial mode, if someone is waiting for data from
329 the device, iconvert and send the next packet to tty. */ 320 the device, convert and send the next packet to tty. */
330 if (result && (state == STATE_GSP_WAIT_DATA)) 321 if (result && (state == STATE_GSP_WAIT_DATA))
331 gsp_next_packet(garmin_data_p); 322 gsp_next_packet(garmin_data_p);
332 } 323 }
@@ -411,7 +402,7 @@ static int gsp_send_ack(struct garmin_data *garmin_data_p, __u8 pkt_id)
411/* 402/*
412 * called for a complete packet received from tty layer 403 * called for a complete packet received from tty layer
413 * 404 *
414 * the complete packet (pkzid ... cksum) is in garmin_data_p->inbuf starting 405 * the complete packet (pktid ... cksum) is in garmin_data_p->inbuf starting
415 * at GSP_INITIAL_OFFSET. 406 * at GSP_INITIAL_OFFSET.
416 * 407 *
417 * count - number of bytes in the input buffer including space reserved for 408 * count - number of bytes in the input buffer including space reserved for
@@ -501,7 +492,6 @@ static int gsp_receive(struct garmin_data *garmin_data_p,
501 unsigned long flags; 492 unsigned long flags;
502 int offs = 0; 493 int offs = 0;
503 int ack_or_nak_seen = 0; 494 int ack_or_nak_seen = 0;
504 int i = 0;
505 __u8 *dest; 495 __u8 *dest;
506 int size; 496 int size;
507 /* dleSeen: set if last byte read was a DLE */ 497 /* dleSeen: set if last byte read was a DLE */
@@ -519,8 +509,8 @@ static int gsp_receive(struct garmin_data *garmin_data_p,
519 skip = garmin_data_p->flags & FLAGS_GSP_SKIP; 509 skip = garmin_data_p->flags & FLAGS_GSP_SKIP;
520 spin_unlock_irqrestore(&garmin_data_p->lock, flags); 510 spin_unlock_irqrestore(&garmin_data_p->lock, flags);
521 511
522 dbg("%s - dle=%d skip=%d size=%d count=%d", 512 /* dbg("%s - dle=%d skip=%d size=%d count=%d",
523 __func__, dleSeen, skip, size, count); 513 __func__, dleSeen, skip, size, count); */
524 514
525 if (size == 0) 515 if (size == 0)
526 size = GSP_INITIAL_OFFSET; 516 size = GSP_INITIAL_OFFSET;
@@ -568,7 +558,6 @@ static int gsp_receive(struct garmin_data *garmin_data_p,
568 } else if (!skip) { 558 } else if (!skip) {
569 559
570 if (dleSeen) { 560 if (dleSeen) {
571 dbg("non-masked DLE at %d - restarting", i);
572 size = GSP_INITIAL_OFFSET; 561 size = GSP_INITIAL_OFFSET;
573 dleSeen = 0; 562 dleSeen = 0;
574 } 563 }
@@ -599,19 +588,19 @@ static int gsp_receive(struct garmin_data *garmin_data_p,
599 else 588 else
600 garmin_data_p->flags &= ~FLAGS_GSP_DLESEEN; 589 garmin_data_p->flags &= ~FLAGS_GSP_DLESEEN;
601 590
602 if (ack_or_nak_seen)
603 garmin_data_p->state = STATE_GSP_WAIT_DATA;
604
605 spin_unlock_irqrestore(&garmin_data_p->lock, flags); 591 spin_unlock_irqrestore(&garmin_data_p->lock, flags);
606 592
607 if (ack_or_nak_seen) 593 if (ack_or_nak_seen) {
608 gsp_next_packet(garmin_data_p); 594 if (gsp_next_packet(garmin_data_p) > 0)
595 garmin_data_p->state = STATE_ACTIVE;
596 else
597 garmin_data_p->state = STATE_GSP_WAIT_DATA;
598 }
609 return count; 599 return count;
610} 600}
611 601
612 602
613 603
614
615/* 604/*
616 * Sends a usb packet to the tty 605 * Sends a usb packet to the tty
617 * 606 *
@@ -733,29 +722,28 @@ static int gsp_send(struct garmin_data *garmin_data_p,
733} 722}
734 723
735 724
736
737
738
739/* 725/*
740 * Process the next pending data packet - if there is one 726 * Process the next pending data packet - if there is one
741 */ 727 */
742static void gsp_next_packet(struct garmin_data *garmin_data_p) 728static int gsp_next_packet(struct garmin_data *garmin_data_p)
743{ 729{
730 int result = 0;
744 struct garmin_packet *pkt = NULL; 731 struct garmin_packet *pkt = NULL;
745 732
746 while ((pkt = pkt_pop(garmin_data_p)) != NULL) { 733 while ((pkt = pkt_pop(garmin_data_p)) != NULL) {
747 dbg("%s - next pkt: %d", __func__, pkt->seq); 734 dbg("%s - next pkt: %d", __func__, pkt->seq);
748 if (gsp_send(garmin_data_p, pkt->data, pkt->size) > 0) { 735 result = gsp_send(garmin_data_p, pkt->data, pkt->size);
736 if (result > 0) {
749 kfree(pkt); 737 kfree(pkt);
750 return; 738 return result;
751 } 739 }
752 kfree(pkt); 740 kfree(pkt);
753 } 741 }
742 return result;
754} 743}
755 744
756 745
757 746
758
759/****************************************************************************** 747/******************************************************************************
760 * garmin native mode 748 * garmin native mode
761 ******************************************************************************/ 749 ******************************************************************************/
@@ -888,14 +876,6 @@ static int garmin_clear(struct garmin_data *garmin_data_p)
888 unsigned long flags; 876 unsigned long flags;
889 int status = 0; 877 int status = 0;
890 878
891 struct usb_serial_port *port = garmin_data_p->port;
892
893 if (port != NULL && atomic_read(&garmin_data_p->resp_count)) {
894 /* send a terminate command */
895 status = garmin_write_bulk(port, GARMIN_STOP_TRANSFER_REQ,
896 sizeof(GARMIN_STOP_TRANSFER_REQ), 1);
897 }
898
899 /* flush all queued data */ 879 /* flush all queued data */
900 pkt_clear(garmin_data_p); 880 pkt_clear(garmin_data_p);
901 881
@@ -908,16 +888,12 @@ static int garmin_clear(struct garmin_data *garmin_data_p)
908} 888}
909 889
910 890
911
912
913
914
915static int garmin_init_session(struct usb_serial_port *port) 891static int garmin_init_session(struct usb_serial_port *port)
916{ 892{
917 unsigned long flags;
918 struct usb_serial *serial = port->serial; 893 struct usb_serial *serial = port->serial;
919 struct garmin_data *garmin_data_p = usb_get_serial_port_data(port); 894 struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
920 int status = 0; 895 int status = 0;
896 int i = 0;
921 897
922 if (status == 0) { 898 if (status == 0) {
923 usb_kill_urb(port->interrupt_in_urb); 899 usb_kill_urb(port->interrupt_in_urb);
@@ -931,30 +907,25 @@ static int garmin_init_session(struct usb_serial_port *port)
931 __func__, status); 907 __func__, status);
932 } 908 }
933 909
910 /*
911 * using the initialization method from gpsbabel. See comments in
912 * gpsbabel/jeeps/gpslibusb.c gusb_reset_toggles()
913 */
934 if (status == 0) { 914 if (status == 0) {
935 dbg("%s - starting session ...", __func__); 915 dbg("%s - starting session ...", __func__);
936 garmin_data_p->state = STATE_ACTIVE; 916 garmin_data_p->state = STATE_ACTIVE;
937 status = garmin_write_bulk(port, GARMIN_START_SESSION_REQ,
938 sizeof(GARMIN_START_SESSION_REQ), 0);
939
940 if (status >= 0) {
941
942 spin_lock_irqsave(&garmin_data_p->lock, flags);
943 garmin_data_p->ignorePkts++;
944 spin_unlock_irqrestore(&garmin_data_p->lock, flags);
945 917
946 /* not needed, but the win32 driver does it too ... */ 918 for (i = 0; i < 3; i++) {
947 status = garmin_write_bulk(port, 919 status = garmin_write_bulk(port,
948 GARMIN_START_SESSION_REQ2, 920 GARMIN_START_SESSION_REQ,
949 sizeof(GARMIN_START_SESSION_REQ2), 0); 921 sizeof(GARMIN_START_SESSION_REQ), 0);
950 if (status >= 0) { 922
951 status = 0; 923 if (status < 0)
952 spin_lock_irqsave(&garmin_data_p->lock, flags); 924 break;
953 garmin_data_p->ignorePkts++;
954 spin_unlock_irqrestore(&garmin_data_p->lock,
955 flags);
956 }
957 } 925 }
926
927 if (status > 0)
928 status = 0;
958 } 929 }
959 930
960 return status; 931 return status;
@@ -962,8 +933,6 @@ static int garmin_init_session(struct usb_serial_port *port)
962 933
963 934
964 935
965
966
967static int garmin_open(struct tty_struct *tty, 936static int garmin_open(struct tty_struct *tty,
968 struct usb_serial_port *port, struct file *filp) 937 struct usb_serial_port *port, struct file *filp)
969{ 938{
@@ -977,8 +946,6 @@ static int garmin_open(struct tty_struct *tty,
977 garmin_data_p->mode = initial_mode; 946 garmin_data_p->mode = initial_mode;
978 garmin_data_p->count = 0; 947 garmin_data_p->count = 0;
979 garmin_data_p->flags = 0; 948 garmin_data_p->flags = 0;
980 atomic_set(&garmin_data_p->req_count, 0);
981 atomic_set(&garmin_data_p->resp_count, 0);
982 spin_unlock_irqrestore(&garmin_data_p->lock, flags); 949 spin_unlock_irqrestore(&garmin_data_p->lock, flags);
983 950
984 /* shutdown any bulk reads that might be going on */ 951 /* shutdown any bulk reads that might be going on */
@@ -1006,6 +973,7 @@ static void garmin_close(struct usb_serial_port *port)
1006 return; 973 return;
1007 974
1008 mutex_lock(&port->serial->disc_mutex); 975 mutex_lock(&port->serial->disc_mutex);
976
1009 if (!port->serial->disconnected) 977 if (!port->serial->disconnected)
1010 garmin_clear(garmin_data_p); 978 garmin_clear(garmin_data_p);
1011 979
@@ -1013,25 +981,17 @@ static void garmin_close(struct usb_serial_port *port)
1013 usb_kill_urb(port->read_urb); 981 usb_kill_urb(port->read_urb);
1014 usb_kill_urb(port->write_urb); 982 usb_kill_urb(port->write_urb);
1015 983
1016 if (!port->serial->disconnected) { 984 /* keep reset state so we know that we must start a new session */
1017 if (noResponseFromAppLayer(garmin_data_p) || 985 if (garmin_data_p->state != STATE_RESET)
1018 ((garmin_data_p->flags & CLEAR_HALT_REQUIRED) != 0)) {
1019 process_resetdev_request(port);
1020 garmin_data_p->state = STATE_RESET;
1021 } else {
1022 garmin_data_p->state = STATE_DISCONNECTED;
1023 }
1024 } else {
1025 garmin_data_p->state = STATE_DISCONNECTED; 986 garmin_data_p->state = STATE_DISCONNECTED;
1026 } 987
1027 mutex_unlock(&port->serial->disc_mutex); 988 mutex_unlock(&port->serial->disc_mutex);
1028} 989}
1029 990
991
1030static void garmin_write_bulk_callback(struct urb *urb) 992static void garmin_write_bulk_callback(struct urb *urb)
1031{ 993{
1032 unsigned long flags;
1033 struct usb_serial_port *port = urb->context; 994 struct usb_serial_port *port = urb->context;
1034 int status = urb->status;
1035 995
1036 if (port) { 996 if (port) {
1037 struct garmin_data *garmin_data_p = 997 struct garmin_data *garmin_data_p =
@@ -1039,20 +999,13 @@ static void garmin_write_bulk_callback(struct urb *urb)
1039 999
1040 dbg("%s - port %d", __func__, port->number); 1000 dbg("%s - port %d", __func__, port->number);
1041 1001
1042 if (GARMIN_LAYERID_APPL == getLayerId(urb->transfer_buffer) 1002 if (GARMIN_LAYERID_APPL == getLayerId(urb->transfer_buffer)) {
1043 && (garmin_data_p->mode == MODE_GARMIN_SERIAL)) {
1044 gsp_send_ack(garmin_data_p,
1045 ((__u8 *)urb->transfer_buffer)[4]);
1046 }
1047 1003
1048 if (status) { 1004 if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
1049 dbg("%s - nonzero write bulk status received: %d", 1005 gsp_send_ack(garmin_data_p,
1050 __func__, status); 1006 ((__u8 *)urb->transfer_buffer)[4]);
1051 spin_lock_irqsave(&garmin_data_p->lock, flags); 1007 }
1052 garmin_data_p->flags |= CLEAR_HALT_REQUIRED;
1053 spin_unlock_irqrestore(&garmin_data_p->lock, flags);
1054 } 1008 }
1055
1056 usb_serial_port_softint(port); 1009 usb_serial_port_softint(port);
1057 } 1010 }
1058 1011
@@ -1108,7 +1061,11 @@ static int garmin_write_bulk(struct usb_serial_port *port,
1108 urb->transfer_flags |= URB_ZERO_PACKET; 1061 urb->transfer_flags |= URB_ZERO_PACKET;
1109 1062
1110 if (GARMIN_LAYERID_APPL == getLayerId(buffer)) { 1063 if (GARMIN_LAYERID_APPL == getLayerId(buffer)) {
1111 atomic_inc(&garmin_data_p->req_count); 1064
1065 spin_lock_irqsave(&garmin_data_p->lock, flags);
1066 garmin_data_p->flags |= APP_REQ_SEEN;
1067 spin_unlock_irqrestore(&garmin_data_p->lock, flags);
1068
1112 if (garmin_data_p->mode == MODE_GARMIN_SERIAL) { 1069 if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
1113 pkt_clear(garmin_data_p); 1070 pkt_clear(garmin_data_p);
1114 garmin_data_p->state = STATE_GSP_WAIT_DATA; 1071 garmin_data_p->state = STATE_GSP_WAIT_DATA;
@@ -1140,6 +1097,9 @@ static int garmin_write(struct tty_struct *tty, struct usb_serial_port *port,
1140 1097
1141 usb_serial_debug_data(debug, &port->dev, __func__, count, buf); 1098 usb_serial_debug_data(debug, &port->dev, __func__, count, buf);
1142 1099
1100 if (garmin_data_p->state == STATE_RESET)
1101 return -EIO;
1102
1143 /* check for our private packets */ 1103 /* check for our private packets */
1144 if (count >= GARMIN_PKTHDR_LENGTH) { 1104 if (count >= GARMIN_PKTHDR_LENGTH) {
1145 len = PRIVPKTSIZ; 1105 len = PRIVPKTSIZ;
@@ -1184,7 +1144,7 @@ static int garmin_write(struct tty_struct *tty, struct usb_serial_port *port,
1184 break; 1144 break;
1185 1145
1186 case PRIV_PKTID_RESET_REQ: 1146 case PRIV_PKTID_RESET_REQ:
1187 atomic_inc(&garmin_data_p->req_count); 1147 process_resetdev_request(port);
1188 break; 1148 break;
1189 1149
1190 case PRIV_PKTID_SET_DEF_MODE: 1150 case PRIV_PKTID_SET_DEF_MODE:
@@ -1200,8 +1160,6 @@ static int garmin_write(struct tty_struct *tty, struct usb_serial_port *port,
1200 } 1160 }
1201 } 1161 }
1202 1162
1203 garmin_data_p->ignorePkts = 0;
1204
1205 if (garmin_data_p->mode == MODE_GARMIN_SERIAL) { 1163 if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
1206 return gsp_receive(garmin_data_p, buf, count); 1164 return gsp_receive(garmin_data_p, buf, count);
1207 } else { /* MODE_NATIVE */ 1165 } else { /* MODE_NATIVE */
@@ -1224,31 +1182,33 @@ static int garmin_write_room(struct tty_struct *tty)
1224static void garmin_read_process(struct garmin_data *garmin_data_p, 1182static void garmin_read_process(struct garmin_data *garmin_data_p,
1225 unsigned char *data, unsigned data_length) 1183 unsigned char *data, unsigned data_length)
1226{ 1184{
1185 unsigned long flags;
1186
1227 if (garmin_data_p->flags & FLAGS_DROP_DATA) { 1187 if (garmin_data_p->flags & FLAGS_DROP_DATA) {
1228 /* abort-transfer cmd is actice */ 1188 /* abort-transfer cmd is actice */
1229 dbg("%s - pkt dropped", __func__); 1189 dbg("%s - pkt dropped", __func__);
1230 } else if (garmin_data_p->state != STATE_DISCONNECTED && 1190 } else if (garmin_data_p->state != STATE_DISCONNECTED &&
1231 garmin_data_p->state != STATE_RESET) { 1191 garmin_data_p->state != STATE_RESET) {
1232 1192
1233 /* remember any appl.layer packets, so we know
1234 if a reset is required or not when closing
1235 the device */
1236 if (0 == memcmp(data, GARMIN_APP_LAYER_REPLY,
1237 sizeof(GARMIN_APP_LAYER_REPLY))) {
1238 atomic_inc(&garmin_data_p->resp_count);
1239 }
1240
1241 /* if throttling is active or postprecessing is required 1193 /* if throttling is active or postprecessing is required
1242 put the received data in the input queue, otherwise 1194 put the received data in the input queue, otherwise
1243 send it directly to the tty port */ 1195 send it directly to the tty port */
1244 if (garmin_data_p->flags & FLAGS_QUEUING) { 1196 if (garmin_data_p->flags & FLAGS_QUEUING) {
1245 pkt_add(garmin_data_p, data, data_length); 1197 pkt_add(garmin_data_p, data, data_length);
1246 } else if (garmin_data_p->mode == MODE_GARMIN_SERIAL) { 1198 } else if (getLayerId(data) == GARMIN_LAYERID_APPL) {
1247 if (getLayerId(data) == GARMIN_LAYERID_APPL) 1199
1200 spin_lock_irqsave(&garmin_data_p->lock, flags);
1201 garmin_data_p->flags |= APP_RESP_SEEN;
1202 spin_unlock_irqrestore(&garmin_data_p->lock, flags);
1203
1204 if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
1248 pkt_add(garmin_data_p, data, data_length); 1205 pkt_add(garmin_data_p, data, data_length);
1249 } else { 1206 } else {
1250 send_to_tty(garmin_data_p->port, data, data_length); 1207 send_to_tty(garmin_data_p->port, data,
1208 data_length);
1209 }
1251 } 1210 }
1211 /* ignore system layer packets ... */
1252 } 1212 }
1253} 1213}
1254 1214
@@ -1363,8 +1323,6 @@ static void garmin_read_int_callback(struct urb *urb)
1363 } else { 1323 } else {
1364 spin_lock_irqsave(&garmin_data_p->lock, flags); 1324 spin_lock_irqsave(&garmin_data_p->lock, flags);
1365 garmin_data_p->flags |= FLAGS_BULK_IN_ACTIVE; 1325 garmin_data_p->flags |= FLAGS_BULK_IN_ACTIVE;
1366 /* do not send this packet to the user */
1367 garmin_data_p->ignorePkts = 1;
1368 spin_unlock_irqrestore(&garmin_data_p->lock, 1326 spin_unlock_irqrestore(&garmin_data_p->lock,
1369 flags); 1327 flags);
1370 } 1328 }
@@ -1391,17 +1349,7 @@ static void garmin_read_int_callback(struct urb *urb)
1391 __func__, garmin_data_p->serial_num); 1349 __func__, garmin_data_p->serial_num);
1392 } 1350 }
1393 1351
1394 if (garmin_data_p->ignorePkts) { 1352 garmin_read_process(garmin_data_p, data, urb->actual_length);
1395 /* this reply belongs to a request generated by the driver,
1396 ignore it. */
1397 dbg("%s - pkt ignored (%d)",
1398 __func__, garmin_data_p->ignorePkts);
1399 spin_lock_irqsave(&garmin_data_p->lock, flags);
1400 garmin_data_p->ignorePkts--;
1401 spin_unlock_irqrestore(&garmin_data_p->lock, flags);
1402 } else {
1403 garmin_read_process(garmin_data_p, data, urb->actual_length);
1404 }
1405 1353
1406 port->interrupt_in_urb->dev = port->serial->dev; 1354 port->interrupt_in_urb->dev = port->serial->dev;
1407 retval = usb_submit_urb(urb, GFP_ATOMIC); 1355 retval = usb_submit_urb(urb, GFP_ATOMIC);
@@ -1527,7 +1475,7 @@ static int garmin_attach(struct usb_serial *serial)
1527} 1475}
1528 1476
1529 1477
1530static void garmin_shutdown(struct usb_serial *serial) 1478static void garmin_disconnect(struct usb_serial *serial)
1531{ 1479{
1532 struct usb_serial_port *port = serial->port[0]; 1480 struct usb_serial_port *port = serial->port[0];
1533 struct garmin_data *garmin_data_p = usb_get_serial_port_data(port); 1481 struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
@@ -1536,8 +1484,17 @@ static void garmin_shutdown(struct usb_serial *serial)
1536 1484
1537 usb_kill_urb(port->interrupt_in_urb); 1485 usb_kill_urb(port->interrupt_in_urb);
1538 del_timer_sync(&garmin_data_p->timer); 1486 del_timer_sync(&garmin_data_p->timer);
1487}
1488
1489
1490static void garmin_release(struct usb_serial *serial)
1491{
1492 struct usb_serial_port *port = serial->port[0];
1493 struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
1494
1495 dbg("%s", __func__);
1496
1539 kfree(garmin_data_p); 1497 kfree(garmin_data_p);
1540 usb_set_serial_port_data(port, NULL);
1541} 1498}
1542 1499
1543 1500
@@ -1556,7 +1513,8 @@ static struct usb_serial_driver garmin_device = {
1556 .throttle = garmin_throttle, 1513 .throttle = garmin_throttle,
1557 .unthrottle = garmin_unthrottle, 1514 .unthrottle = garmin_unthrottle,
1558 .attach = garmin_attach, 1515 .attach = garmin_attach,
1559 .shutdown = garmin_shutdown, 1516 .disconnect = garmin_disconnect,
1517 .release = garmin_release,
1560 .write = garmin_write, 1518 .write = garmin_write,
1561 .write_room = garmin_write_room, 1519 .write_room = garmin_write_room,
1562 .write_bulk_callback = garmin_write_bulk_callback, 1520 .write_bulk_callback = garmin_write_bulk_callback,
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index be82ea956720..932d6241b787 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -63,7 +63,8 @@ struct usb_serial_driver usb_serial_generic_device = {
63 .id_table = generic_device_ids, 63 .id_table = generic_device_ids,
64 .usb_driver = &generic_driver, 64 .usb_driver = &generic_driver,
65 .num_ports = 1, 65 .num_ports = 1,
66 .shutdown = usb_serial_generic_shutdown, 66 .disconnect = usb_serial_generic_disconnect,
67 .release = usb_serial_generic_release,
67 .throttle = usb_serial_generic_throttle, 68 .throttle = usb_serial_generic_throttle,
68 .unthrottle = usb_serial_generic_unthrottle, 69 .unthrottle = usb_serial_generic_unthrottle,
69 .resume = usb_serial_generic_resume, 70 .resume = usb_serial_generic_resume,
@@ -190,6 +191,88 @@ void usb_serial_generic_close(struct usb_serial_port *port)
190 generic_cleanup(port); 191 generic_cleanup(port);
191} 192}
192 193
194static int usb_serial_multi_urb_write(struct tty_struct *tty,
195 struct usb_serial_port *port, const unsigned char *buf, int count)
196{
197 unsigned long flags;
198 struct urb *urb;
199 unsigned char *buffer;
200 int status;
201 int towrite;
202 int bwrite = 0;
203
204 dbg("%s - port %d", __func__, port->number);
205
206 if (count == 0)
207 dbg("%s - write request of 0 bytes", __func__);
208
209 while (count > 0) {
210 towrite = (count > port->bulk_out_size) ?
211 port->bulk_out_size : count;
212 spin_lock_irqsave(&port->lock, flags);
213 if (port->urbs_in_flight >
214 port->serial->type->max_in_flight_urbs) {
215 spin_unlock_irqrestore(&port->lock, flags);
216 dbg("%s - write limit hit\n", __func__);
217 return bwrite;
218 }
219 port->tx_bytes_flight += towrite;
220 port->urbs_in_flight++;
221 spin_unlock_irqrestore(&port->lock, flags);
222
223 buffer = kmalloc(towrite, GFP_ATOMIC);
224 if (!buffer) {
225 dev_err(&port->dev,
226 "%s ran out of kernel memory for urb ...\n", __func__);
227 goto error_no_buffer;
228 }
229
230 urb = usb_alloc_urb(0, GFP_ATOMIC);
231 if (!urb) {
232 dev_err(&port->dev, "%s - no more free urbs\n",
233 __func__);
234 goto error_no_urb;
235 }
236
237 /* Copy data */
238 memcpy(buffer, buf + bwrite, towrite);
239 usb_serial_debug_data(debug, &port->dev, __func__,
240 towrite, buffer);
241 /* fill the buffer and send it */
242 usb_fill_bulk_urb(urb, port->serial->dev,
243 usb_sndbulkpipe(port->serial->dev,
244 port->bulk_out_endpointAddress),
245 buffer, towrite,
246 usb_serial_generic_write_bulk_callback, port);
247
248 status = usb_submit_urb(urb, GFP_ATOMIC);
249 if (status) {
250 dev_err(&port->dev,
251 "%s - failed submitting write urb, error %d\n",
252 __func__, status);
253 goto error;
254 }
255
256 /* This urb is the responsibility of the host driver now */
257 usb_free_urb(urb);
258 dbg("%s write: %d", __func__, towrite);
259 count -= towrite;
260 bwrite += towrite;
261 }
262 return bwrite;
263
264error:
265 usb_free_urb(urb);
266error_no_urb:
267 kfree(buffer);
268error_no_buffer:
269 spin_lock_irqsave(&port->lock, flags);
270 port->urbs_in_flight--;
271 port->tx_bytes_flight -= towrite;
272 spin_unlock_irqrestore(&port->lock, flags);
273 return bwrite;
274}
275
193int usb_serial_generic_write(struct tty_struct *tty, 276int usb_serial_generic_write(struct tty_struct *tty,
194 struct usb_serial_port *port, const unsigned char *buf, int count) 277 struct usb_serial_port *port, const unsigned char *buf, int count)
195{ 278{
@@ -207,6 +290,11 @@ int usb_serial_generic_write(struct tty_struct *tty,
207 /* only do something if we have a bulk out endpoint */ 290 /* only do something if we have a bulk out endpoint */
208 if (serial->num_bulk_out) { 291 if (serial->num_bulk_out) {
209 unsigned long flags; 292 unsigned long flags;
293
294 if (serial->type->max_in_flight_urbs)
295 return usb_serial_multi_urb_write(tty, port,
296 buf, count);
297
210 spin_lock_irqsave(&port->lock, flags); 298 spin_lock_irqsave(&port->lock, flags);
211 if (port->write_urb_busy) { 299 if (port->write_urb_busy) {
212 spin_unlock_irqrestore(&port->lock, flags); 300 spin_unlock_irqrestore(&port->lock, flags);
@@ -252,20 +340,26 @@ int usb_serial_generic_write(struct tty_struct *tty,
252 /* no bulk out, so return 0 bytes written */ 340 /* no bulk out, so return 0 bytes written */
253 return 0; 341 return 0;
254} 342}
343EXPORT_SYMBOL_GPL(usb_serial_generic_write);
255 344
256int usb_serial_generic_write_room(struct tty_struct *tty) 345int usb_serial_generic_write_room(struct tty_struct *tty)
257{ 346{
258 struct usb_serial_port *port = tty->driver_data; 347 struct usb_serial_port *port = tty->driver_data;
259 struct usb_serial *serial = port->serial; 348 struct usb_serial *serial = port->serial;
349 unsigned long flags;
260 int room = 0; 350 int room = 0;
261 351
262 dbg("%s - port %d", __func__, port->number); 352 dbg("%s - port %d", __func__, port->number);
263 353 spin_lock_irqsave(&port->lock, flags);
264 /* FIXME: Locking */ 354 if (serial->type->max_in_flight_urbs) {
265 if (serial->num_bulk_out) { 355 if (port->urbs_in_flight < serial->type->max_in_flight_urbs)
266 if (!(port->write_urb_busy)) 356 room = port->bulk_out_size *
267 room = port->bulk_out_size; 357 (serial->type->max_in_flight_urbs -
358 port->urbs_in_flight);
359 } else if (serial->num_bulk_out && !(port->write_urb_busy)) {
360 room = port->bulk_out_size;
268 } 361 }
362 spin_unlock_irqrestore(&port->lock, flags);
269 363
270 dbg("%s - returns %d", __func__, room); 364 dbg("%s - returns %d", __func__, room);
271 return room; 365 return room;
@@ -276,11 +370,16 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
276 struct usb_serial_port *port = tty->driver_data; 370 struct usb_serial_port *port = tty->driver_data;
277 struct usb_serial *serial = port->serial; 371 struct usb_serial *serial = port->serial;
278 int chars = 0; 372 int chars = 0;
373 unsigned long flags;
279 374
280 dbg("%s - port %d", __func__, port->number); 375 dbg("%s - port %d", __func__, port->number);
281 376
282 /* FIXME: Locking */ 377 if (serial->type->max_in_flight_urbs) {
283 if (serial->num_bulk_out) { 378 spin_lock_irqsave(&port->lock, flags);
379 chars = port->tx_bytes_flight;
380 spin_unlock_irqrestore(&port->lock, flags);
381 } else if (serial->num_bulk_out) {
382 /* FIXME: Locking */
284 if (port->write_urb_busy) 383 if (port->write_urb_busy)
285 chars = port->write_urb->transfer_buffer_length; 384 chars = port->write_urb->transfer_buffer_length;
286 } 385 }
@@ -290,7 +389,8 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
290} 389}
291 390
292 391
293static void resubmit_read_urb(struct usb_serial_port *port, gfp_t mem_flags) 392void usb_serial_generic_resubmit_read_urb(struct usb_serial_port *port,
393 gfp_t mem_flags)
294{ 394{
295 struct urb *urb = port->read_urb; 395 struct urb *urb = port->read_urb;
296 struct usb_serial *serial = port->serial; 396 struct usb_serial *serial = port->serial;
@@ -311,25 +411,28 @@ static void resubmit_read_urb(struct usb_serial_port *port, gfp_t mem_flags)
311 "%s - failed resubmitting read urb, error %d\n", 411 "%s - failed resubmitting read urb, error %d\n",
312 __func__, result); 412 __func__, result);
313} 413}
414EXPORT_SYMBOL_GPL(usb_serial_generic_resubmit_read_urb);
314 415
315/* Push data to tty layer and resubmit the bulk read URB */ 416/* Push data to tty layer and resubmit the bulk read URB */
316static void flush_and_resubmit_read_urb(struct usb_serial_port *port) 417static void flush_and_resubmit_read_urb(struct usb_serial_port *port)
317{ 418{
318 struct urb *urb = port->read_urb; 419 struct urb *urb = port->read_urb;
319 struct tty_struct *tty = tty_port_tty_get(&port->port); 420 struct tty_struct *tty = tty_port_tty_get(&port->port);
320 int room; 421 char *ch = (char *)urb->transfer_buffer;
422 int i;
423
424 if (!tty)
425 goto done;
321 426
322 /* Push data to tty */ 427 /* Push data to tty */
323 if (tty && urb->actual_length) { 428 for (i = 0; i < urb->actual_length; i++, ch++) {
324 room = tty_buffer_request_room(tty, urb->actual_length); 429 if (!usb_serial_handle_sysrq_char(port, *ch))
325 if (room) { 430 tty_insert_flip_char(tty, *ch, TTY_NORMAL);
326 tty_insert_flip_string(tty, urb->transfer_buffer, room);
327 tty_flip_buffer_push(tty);
328 }
329 } 431 }
432 tty_flip_buffer_push(tty);
330 tty_kref_put(tty); 433 tty_kref_put(tty);
331 434done:
332 resubmit_read_urb(port, GFP_ATOMIC); 435 usb_serial_generic_resubmit_read_urb(port, GFP_ATOMIC);
333} 436}
334 437
335void usb_serial_generic_read_bulk_callback(struct urb *urb) 438void usb_serial_generic_read_bulk_callback(struct urb *urb)
@@ -363,12 +466,24 @@ EXPORT_SYMBOL_GPL(usb_serial_generic_read_bulk_callback);
363 466
364void usb_serial_generic_write_bulk_callback(struct urb *urb) 467void usb_serial_generic_write_bulk_callback(struct urb *urb)
365{ 468{
469 unsigned long flags;
366 struct usb_serial_port *port = urb->context; 470 struct usb_serial_port *port = urb->context;
367 int status = urb->status; 471 int status = urb->status;
368 472
369 dbg("%s - port %d", __func__, port->number); 473 dbg("%s - port %d", __func__, port->number);
370 474
371 port->write_urb_busy = 0; 475 if (port->serial->type->max_in_flight_urbs) {
476 spin_lock_irqsave(&port->lock, flags);
477 --port->urbs_in_flight;
478 port->tx_bytes_flight -= urb->transfer_buffer_length;
479 if (port->urbs_in_flight < 0)
480 port->urbs_in_flight = 0;
481 spin_unlock_irqrestore(&port->lock, flags);
482 } else {
483 /* Handle the case for single urb mode */
484 port->write_urb_busy = 0;
485 }
486
372 if (status) { 487 if (status) {
373 dbg("%s - nonzero write bulk status received: %d", 488 dbg("%s - nonzero write bulk status received: %d",
374 __func__, status); 489 __func__, status);
@@ -408,11 +523,36 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
408 523
409 if (was_throttled) { 524 if (was_throttled) {
410 /* Resume reading from device */ 525 /* Resume reading from device */
411 resubmit_read_urb(port, GFP_KERNEL); 526 usb_serial_generic_resubmit_read_urb(port, GFP_KERNEL);
527 }
528}
529
530int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
531{
532 if (port->sysrq && port->console) {
533 if (ch && time_before(jiffies, port->sysrq)) {
534 handle_sysrq(ch, tty_port_tty_get(&port->port));
535 port->sysrq = 0;
536 return 1;
537 }
538 port->sysrq = 0;
539 }
540 return 0;
541}
542EXPORT_SYMBOL_GPL(usb_serial_handle_sysrq_char);
543
544int usb_serial_handle_break(struct usb_serial_port *port)
545{
546 if (!port->sysrq) {
547 port->sysrq = jiffies + HZ*5;
548 return 1;
412 } 549 }
550 port->sysrq = 0;
551 return 0;
413} 552}
553EXPORT_SYMBOL_GPL(usb_serial_handle_break);
414 554
415void usb_serial_generic_shutdown(struct usb_serial *serial) 555void usb_serial_generic_disconnect(struct usb_serial *serial)
416{ 556{
417 int i; 557 int i;
418 558
@@ -423,3 +563,7 @@ void usb_serial_generic_shutdown(struct usb_serial *serial)
423 generic_cleanup(serial->port[i]); 563 generic_cleanup(serial->port[i]);
424} 564}
425 565
566void usb_serial_generic_release(struct usb_serial *serial)
567{
568 dbg("%s", __func__);
569}
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 53ef5996e33d..0191693625d6 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -224,7 +224,8 @@ static int edge_tiocmget(struct tty_struct *tty, struct file *file);
224static int edge_tiocmset(struct tty_struct *tty, struct file *file, 224static int edge_tiocmset(struct tty_struct *tty, struct file *file,
225 unsigned int set, unsigned int clear); 225 unsigned int set, unsigned int clear);
226static int edge_startup(struct usb_serial *serial); 226static int edge_startup(struct usb_serial *serial);
227static void edge_shutdown(struct usb_serial *serial); 227static void edge_disconnect(struct usb_serial *serial);
228static void edge_release(struct usb_serial *serial);
228 229
229#include "io_tables.h" /* all of the devices that this driver supports */ 230#include "io_tables.h" /* all of the devices that this driver supports */
230 231
@@ -3193,21 +3194,16 @@ static int edge_startup(struct usb_serial *serial)
3193 3194
3194 3195
3195/**************************************************************************** 3196/****************************************************************************
3196 * edge_shutdown 3197 * edge_disconnect
3197 * This function is called whenever the device is removed from the usb bus. 3198 * This function is called whenever the device is removed from the usb bus.
3198 ****************************************************************************/ 3199 ****************************************************************************/
3199static void edge_shutdown(struct usb_serial *serial) 3200static void edge_disconnect(struct usb_serial *serial)
3200{ 3201{
3201 struct edgeport_serial *edge_serial = usb_get_serial_data(serial); 3202 struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
3202 int i;
3203 3203
3204 dbg("%s", __func__); 3204 dbg("%s", __func__);
3205 3205
3206 /* stop reads and writes on all ports */ 3206 /* stop reads and writes on all ports */
3207 for (i = 0; i < serial->num_ports; ++i) {
3208 kfree(usb_get_serial_port_data(serial->port[i]));
3209 usb_set_serial_port_data(serial->port[i], NULL);
3210 }
3211 /* free up our endpoint stuff */ 3207 /* free up our endpoint stuff */
3212 if (edge_serial->is_epic) { 3208 if (edge_serial->is_epic) {
3213 usb_kill_urb(edge_serial->interrupt_read_urb); 3209 usb_kill_urb(edge_serial->interrupt_read_urb);
@@ -3218,9 +3214,24 @@ static void edge_shutdown(struct usb_serial *serial)
3218 usb_free_urb(edge_serial->read_urb); 3214 usb_free_urb(edge_serial->read_urb);
3219 kfree(edge_serial->bulk_in_buffer); 3215 kfree(edge_serial->bulk_in_buffer);
3220 } 3216 }
3217}
3218
3219
3220/****************************************************************************
3221 * edge_release
3222 * This function is called when the device structure is deallocated.
3223 ****************************************************************************/
3224static void edge_release(struct usb_serial *serial)
3225{
3226 struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
3227 int i;
3228
3229 dbg("%s", __func__);
3230
3231 for (i = 0; i < serial->num_ports; ++i)
3232 kfree(usb_get_serial_port_data(serial->port[i]));
3221 3233
3222 kfree(edge_serial); 3234 kfree(edge_serial);
3223 usb_set_serial_data(serial, NULL);
3224} 3235}
3225 3236
3226 3237
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index 7eb9d67b81b6..9241d3147513 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -117,7 +117,8 @@ static struct usb_serial_driver edgeport_2port_device = {
117 .throttle = edge_throttle, 117 .throttle = edge_throttle,
118 .unthrottle = edge_unthrottle, 118 .unthrottle = edge_unthrottle,
119 .attach = edge_startup, 119 .attach = edge_startup,
120 .shutdown = edge_shutdown, 120 .disconnect = edge_disconnect,
121 .release = edge_release,
121 .ioctl = edge_ioctl, 122 .ioctl = edge_ioctl,
122 .set_termios = edge_set_termios, 123 .set_termios = edge_set_termios,
123 .tiocmget = edge_tiocmget, 124 .tiocmget = edge_tiocmget,
@@ -145,7 +146,8 @@ static struct usb_serial_driver edgeport_4port_device = {
145 .throttle = edge_throttle, 146 .throttle = edge_throttle,
146 .unthrottle = edge_unthrottle, 147 .unthrottle = edge_unthrottle,
147 .attach = edge_startup, 148 .attach = edge_startup,
148 .shutdown = edge_shutdown, 149 .disconnect = edge_disconnect,
150 .release = edge_release,
149 .ioctl = edge_ioctl, 151 .ioctl = edge_ioctl,
150 .set_termios = edge_set_termios, 152 .set_termios = edge_set_termios,
151 .tiocmget = edge_tiocmget, 153 .tiocmget = edge_tiocmget,
@@ -173,7 +175,8 @@ static struct usb_serial_driver edgeport_8port_device = {
173 .throttle = edge_throttle, 175 .throttle = edge_throttle,
174 .unthrottle = edge_unthrottle, 176 .unthrottle = edge_unthrottle,
175 .attach = edge_startup, 177 .attach = edge_startup,
176 .shutdown = edge_shutdown, 178 .disconnect = edge_disconnect,
179 .release = edge_release,
177 .ioctl = edge_ioctl, 180 .ioctl = edge_ioctl,
178 .set_termios = edge_set_termios, 181 .set_termios = edge_set_termios,
179 .tiocmget = edge_tiocmget, 182 .tiocmget = edge_tiocmget,
@@ -200,7 +203,8 @@ static struct usb_serial_driver epic_device = {
200 .throttle = edge_throttle, 203 .throttle = edge_throttle,
201 .unthrottle = edge_unthrottle, 204 .unthrottle = edge_unthrottle,
202 .attach = edge_startup, 205 .attach = edge_startup,
203 .shutdown = edge_shutdown, 206 .disconnect = edge_disconnect,
207 .release = edge_release,
204 .ioctl = edge_ioctl, 208 .ioctl = edge_ioctl,
205 .set_termios = edge_set_termios, 209 .set_termios = edge_set_termios,
206 .tiocmget = edge_tiocmget, 210 .tiocmget = edge_tiocmget,
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index db964db42d3c..e8bc42f92e79 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2663,7 +2663,7 @@ cleanup:
2663 return -ENOMEM; 2663 return -ENOMEM;
2664} 2664}
2665 2665
2666static void edge_shutdown(struct usb_serial *serial) 2666static void edge_disconnect(struct usb_serial *serial)
2667{ 2667{
2668 int i; 2668 int i;
2669 struct edgeport_port *edge_port; 2669 struct edgeport_port *edge_port;
@@ -2673,12 +2673,22 @@ static void edge_shutdown(struct usb_serial *serial)
2673 for (i = 0; i < serial->num_ports; ++i) { 2673 for (i = 0; i < serial->num_ports; ++i) {
2674 edge_port = usb_get_serial_port_data(serial->port[i]); 2674 edge_port = usb_get_serial_port_data(serial->port[i]);
2675 edge_remove_sysfs_attrs(edge_port->port); 2675 edge_remove_sysfs_attrs(edge_port->port);
2676 }
2677}
2678
2679static void edge_release(struct usb_serial *serial)
2680{
2681 int i;
2682 struct edgeport_port *edge_port;
2683
2684 dbg("%s", __func__);
2685
2686 for (i = 0; i < serial->num_ports; ++i) {
2687 edge_port = usb_get_serial_port_data(serial->port[i]);
2676 edge_buf_free(edge_port->ep_out_buf); 2688 edge_buf_free(edge_port->ep_out_buf);
2677 kfree(edge_port); 2689 kfree(edge_port);
2678 usb_set_serial_port_data(serial->port[i], NULL);
2679 } 2690 }
2680 kfree(usb_get_serial_data(serial)); 2691 kfree(usb_get_serial_data(serial));
2681 usb_set_serial_data(serial, NULL);
2682} 2692}
2683 2693
2684 2694
@@ -2915,7 +2925,8 @@ static struct usb_serial_driver edgeport_1port_device = {
2915 .throttle = edge_throttle, 2925 .throttle = edge_throttle,
2916 .unthrottle = edge_unthrottle, 2926 .unthrottle = edge_unthrottle,
2917 .attach = edge_startup, 2927 .attach = edge_startup,
2918 .shutdown = edge_shutdown, 2928 .disconnect = edge_disconnect,
2929 .release = edge_release,
2919 .port_probe = edge_create_sysfs_attrs, 2930 .port_probe = edge_create_sysfs_attrs,
2920 .ioctl = edge_ioctl, 2931 .ioctl = edge_ioctl,
2921 .set_termios = edge_set_termios, 2932 .set_termios = edge_set_termios,
@@ -2944,7 +2955,8 @@ static struct usb_serial_driver edgeport_2port_device = {
2944 .throttle = edge_throttle, 2955 .throttle = edge_throttle,
2945 .unthrottle = edge_unthrottle, 2956 .unthrottle = edge_unthrottle,
2946 .attach = edge_startup, 2957 .attach = edge_startup,
2947 .shutdown = edge_shutdown, 2958 .disconnect = edge_disconnect,
2959 .release = edge_release,
2948 .port_probe = edge_create_sysfs_attrs, 2960 .port_probe = edge_create_sysfs_attrs,
2949 .ioctl = edge_ioctl, 2961 .ioctl = edge_ioctl,
2950 .set_termios = edge_set_termios, 2962 .set_termios = edge_set_termios,
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index c610a99fa477..2545d45ce16f 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -79,7 +79,6 @@ static int ipaq_open(struct tty_struct *tty,
79static void ipaq_close(struct usb_serial_port *port); 79static void ipaq_close(struct usb_serial_port *port);
80static int ipaq_calc_num_ports(struct usb_serial *serial); 80static int ipaq_calc_num_ports(struct usb_serial *serial);
81static int ipaq_startup(struct usb_serial *serial); 81static int ipaq_startup(struct usb_serial *serial);
82static void ipaq_shutdown(struct usb_serial *serial);
83static int ipaq_write(struct tty_struct *tty, struct usb_serial_port *port, 82static int ipaq_write(struct tty_struct *tty, struct usb_serial_port *port,
84 const unsigned char *buf, int count); 83 const unsigned char *buf, int count);
85static int ipaq_write_bulk(struct usb_serial_port *port, 84static int ipaq_write_bulk(struct usb_serial_port *port,
@@ -576,7 +575,6 @@ static struct usb_serial_driver ipaq_device = {
576 .close = ipaq_close, 575 .close = ipaq_close,
577 .attach = ipaq_startup, 576 .attach = ipaq_startup,
578 .calc_num_ports = ipaq_calc_num_ports, 577 .calc_num_ports = ipaq_calc_num_ports,
579 .shutdown = ipaq_shutdown,
580 .write = ipaq_write, 578 .write = ipaq_write,
581 .write_room = ipaq_write_room, 579 .write_room = ipaq_write_room,
582 .chars_in_buffer = ipaq_chars_in_buffer, 580 .chars_in_buffer = ipaq_chars_in_buffer,
@@ -990,11 +988,6 @@ static int ipaq_startup(struct usb_serial *serial)
990 return usb_reset_configuration(serial->dev); 988 return usb_reset_configuration(serial->dev);
991} 989}
992 990
993static void ipaq_shutdown(struct usb_serial *serial)
994{
995 dbg("%s", __func__);
996}
997
998static int __init ipaq_init(void) 991static int __init ipaq_init(void)
999{ 992{
1000 int retval; 993 int retval;
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 76a3cc327bb9..96873a7a32b0 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -121,8 +121,8 @@ static int iuu_startup(struct usb_serial *serial)
121 return 0; 121 return 0;
122} 122}
123 123
124/* Shutdown function */ 124/* Release function */
125static void iuu_shutdown(struct usb_serial *serial) 125static void iuu_release(struct usb_serial *serial)
126{ 126{
127 struct usb_serial_port *port = serial->port[0]; 127 struct usb_serial_port *port = serial->port[0];
128 struct iuu_private *priv = usb_get_serial_port_data(port); 128 struct iuu_private *priv = usb_get_serial_port_data(port);
@@ -1202,7 +1202,7 @@ static struct usb_serial_driver iuu_device = {
1202 .tiocmset = iuu_tiocmset, 1202 .tiocmset = iuu_tiocmset,
1203 .set_termios = iuu_set_termios, 1203 .set_termios = iuu_set_termios,
1204 .attach = iuu_startup, 1204 .attach = iuu_startup,
1205 .shutdown = iuu_shutdown, 1205 .release = iuu_release,
1206}; 1206};
1207 1207
1208static int __init iuu_init(void) 1208static int __init iuu_init(void)
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index f1195a98f316..2594b8743d3f 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -2689,7 +2689,7 @@ static int keyspan_startup(struct usb_serial *serial)
2689 return 0; 2689 return 0;
2690} 2690}
2691 2691
2692static void keyspan_shutdown(struct usb_serial *serial) 2692static void keyspan_disconnect(struct usb_serial *serial)
2693{ 2693{
2694 int i, j; 2694 int i, j;
2695 struct usb_serial_port *port; 2695 struct usb_serial_port *port;
@@ -2729,6 +2729,17 @@ static void keyspan_shutdown(struct usb_serial *serial)
2729 usb_free_urb(p_priv->out_urbs[j]); 2729 usb_free_urb(p_priv->out_urbs[j]);
2730 } 2730 }
2731 } 2731 }
2732}
2733
2734static void keyspan_release(struct usb_serial *serial)
2735{
2736 int i;
2737 struct usb_serial_port *port;
2738 struct keyspan_serial_private *s_priv;
2739
2740 dbg("%s", __func__);
2741
2742 s_priv = usb_get_serial_data(serial);
2732 2743
2733 /* dbg("Freeing serial->private."); */ 2744 /* dbg("Freeing serial->private."); */
2734 kfree(s_priv); 2745 kfree(s_priv);
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 0d4569b60768..3107ed15af64 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -41,7 +41,8 @@ static int keyspan_open (struct tty_struct *tty,
41static void keyspan_close (struct usb_serial_port *port); 41static void keyspan_close (struct usb_serial_port *port);
42static void keyspan_dtr_rts (struct usb_serial_port *port, int on); 42static void keyspan_dtr_rts (struct usb_serial_port *port, int on);
43static int keyspan_startup (struct usb_serial *serial); 43static int keyspan_startup (struct usb_serial *serial);
44static void keyspan_shutdown (struct usb_serial *serial); 44static void keyspan_disconnect (struct usb_serial *serial);
45static void keyspan_release (struct usb_serial *serial);
45static int keyspan_write_room (struct tty_struct *tty); 46static int keyspan_write_room (struct tty_struct *tty);
46 47
47static int keyspan_write (struct tty_struct *tty, 48static int keyspan_write (struct tty_struct *tty,
@@ -569,7 +570,8 @@ static struct usb_serial_driver keyspan_1port_device = {
569 .tiocmget = keyspan_tiocmget, 570 .tiocmget = keyspan_tiocmget,
570 .tiocmset = keyspan_tiocmset, 571 .tiocmset = keyspan_tiocmset,
571 .attach = keyspan_startup, 572 .attach = keyspan_startup,
572 .shutdown = keyspan_shutdown, 573 .disconnect = keyspan_disconnect,
574 .release = keyspan_release,
573}; 575};
574 576
575static struct usb_serial_driver keyspan_2port_device = { 577static struct usb_serial_driver keyspan_2port_device = {
@@ -590,7 +592,8 @@ static struct usb_serial_driver keyspan_2port_device = {
590 .tiocmget = keyspan_tiocmget, 592 .tiocmget = keyspan_tiocmget,
591 .tiocmset = keyspan_tiocmset, 593 .tiocmset = keyspan_tiocmset,
592 .attach = keyspan_startup, 594 .attach = keyspan_startup,
593 .shutdown = keyspan_shutdown, 595 .disconnect = keyspan_disconnect,
596 .release = keyspan_release,
594}; 597};
595 598
596static struct usb_serial_driver keyspan_4port_device = { 599static struct usb_serial_driver keyspan_4port_device = {
@@ -611,7 +614,8 @@ static struct usb_serial_driver keyspan_4port_device = {
611 .tiocmget = keyspan_tiocmget, 614 .tiocmget = keyspan_tiocmget,
612 .tiocmset = keyspan_tiocmset, 615 .tiocmset = keyspan_tiocmset,
613 .attach = keyspan_startup, 616 .attach = keyspan_startup,
614 .shutdown = keyspan_shutdown, 617 .disconnect = keyspan_disconnect,
618 .release = keyspan_release,
615}; 619};
616 620
617#endif 621#endif
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index ab769dbea1b3..d0b12e40c2b1 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -809,7 +809,7 @@ static int keyspan_pda_startup(struct usb_serial *serial)
809 return 0; 809 return 0;
810} 810}
811 811
812static void keyspan_pda_shutdown(struct usb_serial *serial) 812static void keyspan_pda_release(struct usb_serial *serial)
813{ 813{
814 dbg("%s", __func__); 814 dbg("%s", __func__);
815 815
@@ -869,7 +869,7 @@ static struct usb_serial_driver keyspan_pda_device = {
869 .tiocmget = keyspan_pda_tiocmget, 869 .tiocmget = keyspan_pda_tiocmget,
870 .tiocmset = keyspan_pda_tiocmset, 870 .tiocmset = keyspan_pda_tiocmset,
871 .attach = keyspan_pda_startup, 871 .attach = keyspan_pda_startup,
872 .shutdown = keyspan_pda_shutdown, 872 .release = keyspan_pda_release,
873}; 873};
874 874
875 875
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index fa817c66b3e8..0f44bb8e8d4f 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -73,7 +73,8 @@ static int debug;
73 * Function prototypes 73 * Function prototypes
74 */ 74 */
75static int klsi_105_startup(struct usb_serial *serial); 75static int klsi_105_startup(struct usb_serial *serial);
76static void klsi_105_shutdown(struct usb_serial *serial); 76static void klsi_105_disconnect(struct usb_serial *serial);
77static void klsi_105_release(struct usb_serial *serial);
77static int klsi_105_open(struct tty_struct *tty, 78static int klsi_105_open(struct tty_struct *tty,
78 struct usb_serial_port *port, struct file *filp); 79 struct usb_serial_port *port, struct file *filp);
79static void klsi_105_close(struct usb_serial_port *port); 80static void klsi_105_close(struct usb_serial_port *port);
@@ -131,7 +132,8 @@ static struct usb_serial_driver kl5kusb105d_device = {
131 .tiocmget = klsi_105_tiocmget, 132 .tiocmget = klsi_105_tiocmget,
132 .tiocmset = klsi_105_tiocmset, 133 .tiocmset = klsi_105_tiocmset,
133 .attach = klsi_105_startup, 134 .attach = klsi_105_startup,
134 .shutdown = klsi_105_shutdown, 135 .disconnect = klsi_105_disconnect,
136 .release = klsi_105_release,
135 .throttle = klsi_105_throttle, 137 .throttle = klsi_105_throttle,
136 .unthrottle = klsi_105_unthrottle, 138 .unthrottle = klsi_105_unthrottle,
137}; 139};
@@ -315,7 +317,7 @@ err_cleanup:
315} /* klsi_105_startup */ 317} /* klsi_105_startup */
316 318
317 319
318static void klsi_105_shutdown(struct usb_serial *serial) 320static void klsi_105_disconnect(struct usb_serial *serial)
319{ 321{
320 int i; 322 int i;
321 323
@@ -325,33 +327,36 @@ static void klsi_105_shutdown(struct usb_serial *serial)
325 for (i = 0; i < serial->num_ports; ++i) { 327 for (i = 0; i < serial->num_ports; ++i) {
326 struct klsi_105_private *priv = 328 struct klsi_105_private *priv =
327 usb_get_serial_port_data(serial->port[i]); 329 usb_get_serial_port_data(serial->port[i]);
328 unsigned long flags;
329 330
330 if (priv) { 331 if (priv) {
331 /* kill our write urb pool */ 332 /* kill our write urb pool */
332 int j; 333 int j;
333 struct urb **write_urbs = priv->write_urb_pool; 334 struct urb **write_urbs = priv->write_urb_pool;
334 spin_lock_irqsave(&priv->lock, flags);
335 335
336 for (j = 0; j < NUM_URBS; j++) { 336 for (j = 0; j < NUM_URBS; j++) {
337 if (write_urbs[j]) { 337 if (write_urbs[j]) {
338 /* FIXME - uncomment the following 338 usb_kill_urb(write_urbs[j]);
339 * usb_kill_urb call when the host
340 * controllers get fixed to set
341 * urb->dev = NULL after the urb is
342 * finished. Otherwise this call
343 * oopses. */
344 /* usb_kill_urb(write_urbs[j]); */
345 kfree(write_urbs[j]->transfer_buffer);
346 usb_free_urb(write_urbs[j]); 339 usb_free_urb(write_urbs[j]);
347 } 340 }
348 } 341 }
349 spin_unlock_irqrestore(&priv->lock, flags);
350 kfree(priv);
351 usb_set_serial_port_data(serial->port[i], NULL);
352 } 342 }
353 } 343 }
354} /* klsi_105_shutdown */ 344} /* klsi_105_disconnect */
345
346
347static void klsi_105_release(struct usb_serial *serial)
348{
349 int i;
350
351 dbg("%s", __func__);
352
353 for (i = 0; i < serial->num_ports; ++i) {
354 struct klsi_105_private *priv =
355 usb_get_serial_port_data(serial->port[i]);
356
357 kfree(priv);
358 }
359} /* klsi_105_release */
355 360
356static int klsi_105_open(struct tty_struct *tty, 361static int klsi_105_open(struct tty_struct *tty,
357 struct usb_serial_port *port, struct file *filp) 362 struct usb_serial_port *port, struct file *filp)
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 6b570498287f..6db0e561f680 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -69,7 +69,7 @@ static int debug;
69 69
70/* Function prototypes */ 70/* Function prototypes */
71static int kobil_startup(struct usb_serial *serial); 71static int kobil_startup(struct usb_serial *serial);
72static void kobil_shutdown(struct usb_serial *serial); 72static void kobil_release(struct usb_serial *serial);
73static int kobil_open(struct tty_struct *tty, 73static int kobil_open(struct tty_struct *tty,
74 struct usb_serial_port *port, struct file *filp); 74 struct usb_serial_port *port, struct file *filp);
75static void kobil_close(struct usb_serial_port *port); 75static void kobil_close(struct usb_serial_port *port);
@@ -117,7 +117,7 @@ static struct usb_serial_driver kobil_device = {
117 .id_table = id_table, 117 .id_table = id_table,
118 .num_ports = 1, 118 .num_ports = 1,
119 .attach = kobil_startup, 119 .attach = kobil_startup,
120 .shutdown = kobil_shutdown, 120 .release = kobil_release,
121 .ioctl = kobil_ioctl, 121 .ioctl = kobil_ioctl,
122 .set_termios = kobil_set_termios, 122 .set_termios = kobil_set_termios,
123 .tiocmget = kobil_tiocmget, 123 .tiocmget = kobil_tiocmget,
@@ -201,17 +201,13 @@ static int kobil_startup(struct usb_serial *serial)
201} 201}
202 202
203 203
204static void kobil_shutdown(struct usb_serial *serial) 204static void kobil_release(struct usb_serial *serial)
205{ 205{
206 int i; 206 int i;
207 dbg("%s - port %d", __func__, serial->port[0]->number); 207 dbg("%s - port %d", __func__, serial->port[0]->number);
208 208
209 for (i = 0; i < serial->num_ports; ++i) { 209 for (i = 0; i < serial->num_ports; ++i)
210 while (serial->port[i]->port.count > 0)
211 kobil_close(serial->port[i]);
212 kfree(usb_get_serial_port_data(serial->port[i])); 210 kfree(usb_get_serial_port_data(serial->port[i]));
213 usb_set_serial_port_data(serial->port[i], NULL);
214 }
215} 211}
216 212
217 213
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 873795548fc0..d8825e159aa5 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -92,7 +92,7 @@ static int debug;
92 * Function prototypes 92 * Function prototypes
93 */ 93 */
94static int mct_u232_startup(struct usb_serial *serial); 94static int mct_u232_startup(struct usb_serial *serial);
95static void mct_u232_shutdown(struct usb_serial *serial); 95static void mct_u232_release(struct usb_serial *serial);
96static int mct_u232_open(struct tty_struct *tty, 96static int mct_u232_open(struct tty_struct *tty,
97 struct usb_serial_port *port, struct file *filp); 97 struct usb_serial_port *port, struct file *filp);
98static void mct_u232_close(struct usb_serial_port *port); 98static void mct_u232_close(struct usb_serial_port *port);
@@ -149,7 +149,7 @@ static struct usb_serial_driver mct_u232_device = {
149 .tiocmget = mct_u232_tiocmget, 149 .tiocmget = mct_u232_tiocmget,
150 .tiocmset = mct_u232_tiocmset, 150 .tiocmset = mct_u232_tiocmset,
151 .attach = mct_u232_startup, 151 .attach = mct_u232_startup,
152 .shutdown = mct_u232_shutdown, 152 .release = mct_u232_release,
153}; 153};
154 154
155 155
@@ -407,7 +407,7 @@ static int mct_u232_startup(struct usb_serial *serial)
407} /* mct_u232_startup */ 407} /* mct_u232_startup */
408 408
409 409
410static void mct_u232_shutdown(struct usb_serial *serial) 410static void mct_u232_release(struct usb_serial *serial)
411{ 411{
412 struct mct_u232_private *priv; 412 struct mct_u232_private *priv;
413 int i; 413 int i;
@@ -417,12 +417,9 @@ static void mct_u232_shutdown(struct usb_serial *serial)
417 for (i = 0; i < serial->num_ports; ++i) { 417 for (i = 0; i < serial->num_ports; ++i) {
418 /* My special items, the standard routines free my urbs */ 418 /* My special items, the standard routines free my urbs */
419 priv = usb_get_serial_port_data(serial->port[i]); 419 priv = usb_get_serial_port_data(serial->port[i]);
420 if (priv) { 420 kfree(priv);
421 usb_set_serial_port_data(serial->port[i], NULL);
422 kfree(priv);
423 }
424 } 421 }
425} /* mct_u232_shutdown */ 422} /* mct_u232_release */
426 423
427static int mct_u232_open(struct tty_struct *tty, 424static int mct_u232_open(struct tty_struct *tty,
428 struct usb_serial_port *port, struct file *filp) 425 struct usb_serial_port *port, struct file *filp)
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 9e1a013ee7f6..bfc5ce000ef9 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1521,19 +1521,16 @@ static int mos7720_startup(struct usb_serial *serial)
1521 return 0; 1521 return 0;
1522} 1522}
1523 1523
1524static void mos7720_shutdown(struct usb_serial *serial) 1524static void mos7720_release(struct usb_serial *serial)
1525{ 1525{
1526 int i; 1526 int i;
1527 1527
1528 /* free private structure allocated for serial port */ 1528 /* free private structure allocated for serial port */
1529 for (i = 0; i < serial->num_ports; ++i) { 1529 for (i = 0; i < serial->num_ports; ++i)
1530 kfree(usb_get_serial_port_data(serial->port[i])); 1530 kfree(usb_get_serial_port_data(serial->port[i]));
1531 usb_set_serial_port_data(serial->port[i], NULL);
1532 }
1533 1531
1534 /* free private structure allocated for serial device */ 1532 /* free private structure allocated for serial device */
1535 kfree(usb_get_serial_data(serial)); 1533 kfree(usb_get_serial_data(serial));
1536 usb_set_serial_data(serial, NULL);
1537} 1534}
1538 1535
1539static struct usb_driver usb_driver = { 1536static struct usb_driver usb_driver = {
@@ -1558,7 +1555,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
1558 .throttle = mos7720_throttle, 1555 .throttle = mos7720_throttle,
1559 .unthrottle = mos7720_unthrottle, 1556 .unthrottle = mos7720_unthrottle,
1560 .attach = mos7720_startup, 1557 .attach = mos7720_startup,
1561 .shutdown = mos7720_shutdown, 1558 .release = mos7720_release,
1562 .ioctl = mos7720_ioctl, 1559 .ioctl = mos7720_ioctl,
1563 .set_termios = mos7720_set_termios, 1560 .set_termios = mos7720_set_termios,
1564 .write = mos7720_write, 1561 .write = mos7720_write,
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 10b78a37214f..c40f95c1951c 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -238,7 +238,7 @@ static int mos7840_set_reg_sync(struct usb_serial_port *port, __u16 reg,
238{ 238{
239 struct usb_device *dev = port->serial->dev; 239 struct usb_device *dev = port->serial->dev;
240 val = val & 0x00ff; 240 val = val & 0x00ff;
241 dbg("mos7840_set_reg_sync offset is %x, value %x\n", reg, val); 241 dbg("mos7840_set_reg_sync offset is %x, value %x", reg, val);
242 242
243 return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ, 243 return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ,
244 MCS_WR_RTYPE, val, reg, NULL, 0, 244 MCS_WR_RTYPE, val, reg, NULL, 0,
@@ -260,7 +260,7 @@ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg,
260 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, 260 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
261 MCS_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH, 261 MCS_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH,
262 MOS_WDR_TIMEOUT); 262 MOS_WDR_TIMEOUT);
263 dbg("mos7840_get_reg_sync offset is %x, return val %x\n", reg, *val); 263 dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val);
264 *val = (*val) & 0x00ff; 264 *val = (*val) & 0x00ff;
265 return ret; 265 return ret;
266} 266}
@@ -282,18 +282,18 @@ static int mos7840_set_uart_reg(struct usb_serial_port *port, __u16 reg,
282 if (port->serial->num_ports == 4) { 282 if (port->serial->num_ports == 4) {
283 val |= (((__u16) port->number - 283 val |= (((__u16) port->number -
284 (__u16) (port->serial->minor)) + 1) << 8; 284 (__u16) (port->serial->minor)) + 1) << 8;
285 dbg("mos7840_set_uart_reg application number is %x\n", val); 285 dbg("mos7840_set_uart_reg application number is %x", val);
286 } else { 286 } else {
287 if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) { 287 if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) {
288 val |= (((__u16) port->number - 288 val |= (((__u16) port->number -
289 (__u16) (port->serial->minor)) + 1) << 8; 289 (__u16) (port->serial->minor)) + 1) << 8;
290 dbg("mos7840_set_uart_reg application number is %x\n", 290 dbg("mos7840_set_uart_reg application number is %x",
291 val); 291 val);
292 } else { 292 } else {
293 val |= 293 val |=
294 (((__u16) port->number - 294 (((__u16) port->number -
295 (__u16) (port->serial->minor)) + 2) << 8; 295 (__u16) (port->serial->minor)) + 2) << 8;
296 dbg("mos7840_set_uart_reg application number is %x\n", 296 dbg("mos7840_set_uart_reg application number is %x",
297 val); 297 val);
298 } 298 }
299 } 299 }
@@ -315,24 +315,24 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
315 int ret = 0; 315 int ret = 0;
316 __u16 Wval; 316 __u16 Wval;
317 317
318 /* dbg("application number is %4x \n", 318 /* dbg("application number is %4x",
319 (((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */ 319 (((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */
320 /* Wval is same as application number */ 320 /* Wval is same as application number */
321 if (port->serial->num_ports == 4) { 321 if (port->serial->num_ports == 4) {
322 Wval = 322 Wval =
323 (((__u16) port->number - (__u16) (port->serial->minor)) + 323 (((__u16) port->number - (__u16) (port->serial->minor)) +
324 1) << 8; 324 1) << 8;
325 dbg("mos7840_get_uart_reg application number is %x\n", Wval); 325 dbg("mos7840_get_uart_reg application number is %x", Wval);
326 } else { 326 } else {
327 if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) { 327 if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) {
328 Wval = (((__u16) port->number - 328 Wval = (((__u16) port->number -
329 (__u16) (port->serial->minor)) + 1) << 8; 329 (__u16) (port->serial->minor)) + 1) << 8;
330 dbg("mos7840_get_uart_reg application number is %x\n", 330 dbg("mos7840_get_uart_reg application number is %x",
331 Wval); 331 Wval);
332 } else { 332 } else {
333 Wval = (((__u16) port->number - 333 Wval = (((__u16) port->number -
334 (__u16) (port->serial->minor)) + 2) << 8; 334 (__u16) (port->serial->minor)) + 2) << 8;
335 dbg("mos7840_get_uart_reg application number is %x\n", 335 dbg("mos7840_get_uart_reg application number is %x",
336 Wval); 336 Wval);
337 } 337 }
338 } 338 }
@@ -346,11 +346,11 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
346static void mos7840_dump_serial_port(struct moschip_port *mos7840_port) 346static void mos7840_dump_serial_port(struct moschip_port *mos7840_port)
347{ 347{
348 348
349 dbg("***************************************\n"); 349 dbg("***************************************");
350 dbg("SpRegOffset is %2x\n", mos7840_port->SpRegOffset); 350 dbg("SpRegOffset is %2x", mos7840_port->SpRegOffset);
351 dbg("ControlRegOffset is %2x \n", mos7840_port->ControlRegOffset); 351 dbg("ControlRegOffset is %2x", mos7840_port->ControlRegOffset);
352 dbg("DCRRegOffset is %2x \n", mos7840_port->DcrRegOffset); 352 dbg("DCRRegOffset is %2x", mos7840_port->DcrRegOffset);
353 dbg("***************************************\n"); 353 dbg("***************************************");
354 354
355} 355}
356 356
@@ -474,12 +474,12 @@ static void mos7840_control_callback(struct urb *urb)
474 goto exit; 474 goto exit;
475 } 475 }
476 476
477 dbg("%s urb buffer size is %d\n", __func__, urb->actual_length); 477 dbg("%s urb buffer size is %d", __func__, urb->actual_length);
478 dbg("%s mos7840_port->MsrLsr is %d port %d\n", __func__, 478 dbg("%s mos7840_port->MsrLsr is %d port %d", __func__,
479 mos7840_port->MsrLsr, mos7840_port->port_num); 479 mos7840_port->MsrLsr, mos7840_port->port_num);
480 data = urb->transfer_buffer; 480 data = urb->transfer_buffer;
481 regval = (__u8) data[0]; 481 regval = (__u8) data[0];
482 dbg("%s data is %x\n", __func__, regval); 482 dbg("%s data is %x", __func__, regval);
483 if (mos7840_port->MsrLsr == 0) 483 if (mos7840_port->MsrLsr == 0)
484 mos7840_handle_new_msr(mos7840_port, regval); 484 mos7840_handle_new_msr(mos7840_port, regval);
485 else if (mos7840_port->MsrLsr == 1) 485 else if (mos7840_port->MsrLsr == 1)
@@ -538,7 +538,7 @@ static void mos7840_interrupt_callback(struct urb *urb)
538 __u16 wval, wreg = 0; 538 __u16 wval, wreg = 0;
539 int status = urb->status; 539 int status = urb->status;
540 540
541 dbg("%s", " : Entering\n"); 541 dbg("%s", " : Entering");
542 542
543 switch (status) { 543 switch (status) {
544 case 0: 544 case 0:
@@ -570,7 +570,7 @@ static void mos7840_interrupt_callback(struct urb *urb)
570 * Byte 5 FIFO status for both */ 570 * Byte 5 FIFO status for both */
571 571
572 if (length && length > 5) { 572 if (length && length > 5) {
573 dbg("%s \n", "Wrong data !!!"); 573 dbg("%s", "Wrong data !!!");
574 return; 574 return;
575 } 575 }
576 576
@@ -587,17 +587,17 @@ static void mos7840_interrupt_callback(struct urb *urb)
587 (__u16) (serial->minor)) + 1) << 8; 587 (__u16) (serial->minor)) + 1) << 8;
588 if (mos7840_port->open) { 588 if (mos7840_port->open) {
589 if (sp[i] & 0x01) { 589 if (sp[i] & 0x01) {
590 dbg("SP%d No Interrupt !!!\n", i); 590 dbg("SP%d No Interrupt !!!", i);
591 } else { 591 } else {
592 switch (sp[i] & 0x0f) { 592 switch (sp[i] & 0x0f) {
593 case SERIAL_IIR_RLS: 593 case SERIAL_IIR_RLS:
594 dbg("Serial Port %d: Receiver status error or ", i); 594 dbg("Serial Port %d: Receiver status error or ", i);
595 dbg("address bit detected in 9-bit mode\n"); 595 dbg("address bit detected in 9-bit mode");
596 mos7840_port->MsrLsr = 1; 596 mos7840_port->MsrLsr = 1;
597 wreg = LINE_STATUS_REGISTER; 597 wreg = LINE_STATUS_REGISTER;
598 break; 598 break;
599 case SERIAL_IIR_MS: 599 case SERIAL_IIR_MS:
600 dbg("Serial Port %d: Modem status change\n", i); 600 dbg("Serial Port %d: Modem status change", i);
601 mos7840_port->MsrLsr = 0; 601 mos7840_port->MsrLsr = 0;
602 wreg = MODEM_STATUS_REGISTER; 602 wreg = MODEM_STATUS_REGISTER;
603 break; 603 break;
@@ -689,7 +689,7 @@ static void mos7840_bulk_in_callback(struct urb *urb)
689 689
690 mos7840_port = urb->context; 690 mos7840_port = urb->context;
691 if (!mos7840_port) { 691 if (!mos7840_port) {
692 dbg("%s", "NULL mos7840_port pointer \n"); 692 dbg("%s", "NULL mos7840_port pointer");
693 mos7840_port->read_urb_busy = false; 693 mos7840_port->read_urb_busy = false;
694 return; 694 return;
695 } 695 }
@@ -702,41 +702,41 @@ static void mos7840_bulk_in_callback(struct urb *urb)
702 702
703 port = (struct usb_serial_port *)mos7840_port->port; 703 port = (struct usb_serial_port *)mos7840_port->port;
704 if (mos7840_port_paranoia_check(port, __func__)) { 704 if (mos7840_port_paranoia_check(port, __func__)) {
705 dbg("%s", "Port Paranoia failed \n"); 705 dbg("%s", "Port Paranoia failed");
706 mos7840_port->read_urb_busy = false; 706 mos7840_port->read_urb_busy = false;
707 return; 707 return;
708 } 708 }
709 709
710 serial = mos7840_get_usb_serial(port, __func__); 710 serial = mos7840_get_usb_serial(port, __func__);
711 if (!serial) { 711 if (!serial) {
712 dbg("%s\n", "Bad serial pointer "); 712 dbg("%s", "Bad serial pointer");
713 mos7840_port->read_urb_busy = false; 713 mos7840_port->read_urb_busy = false;
714 return; 714 return;
715 } 715 }
716 716
717 dbg("%s\n", "Entering... \n"); 717 dbg("%s", "Entering... ");
718 718
719 data = urb->transfer_buffer; 719 data = urb->transfer_buffer;
720 720
721 dbg("%s", "Entering ........... \n"); 721 dbg("%s", "Entering ...........");
722 722
723 if (urb->actual_length) { 723 if (urb->actual_length) {
724 tty = tty_port_tty_get(&mos7840_port->port->port); 724 tty = tty_port_tty_get(&mos7840_port->port->port);
725 if (tty) { 725 if (tty) {
726 tty_buffer_request_room(tty, urb->actual_length); 726 tty_buffer_request_room(tty, urb->actual_length);
727 tty_insert_flip_string(tty, data, urb->actual_length); 727 tty_insert_flip_string(tty, data, urb->actual_length);
728 dbg(" %s \n", data); 728 dbg(" %s ", data);
729 tty_flip_buffer_push(tty); 729 tty_flip_buffer_push(tty);
730 tty_kref_put(tty); 730 tty_kref_put(tty);
731 } 731 }
732 mos7840_port->icount.rx += urb->actual_length; 732 mos7840_port->icount.rx += urb->actual_length;
733 smp_wmb(); 733 smp_wmb();
734 dbg("mos7840_port->icount.rx is %d:\n", 734 dbg("mos7840_port->icount.rx is %d:",
735 mos7840_port->icount.rx); 735 mos7840_port->icount.rx);
736 } 736 }
737 737
738 if (!mos7840_port->read_urb) { 738 if (!mos7840_port->read_urb) {
739 dbg("%s", "URB KILLED !!!\n"); 739 dbg("%s", "URB KILLED !!!");
740 mos7840_port->read_urb_busy = false; 740 mos7840_port->read_urb_busy = false;
741 return; 741 return;
742 } 742 }
@@ -777,16 +777,16 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
777 spin_unlock(&mos7840_port->pool_lock); 777 spin_unlock(&mos7840_port->pool_lock);
778 778
779 if (status) { 779 if (status) {
780 dbg("nonzero write bulk status received:%d\n", status); 780 dbg("nonzero write bulk status received:%d", status);
781 return; 781 return;
782 } 782 }
783 783
784 if (mos7840_port_paranoia_check(mos7840_port->port, __func__)) { 784 if (mos7840_port_paranoia_check(mos7840_port->port, __func__)) {
785 dbg("%s", "Port Paranoia failed \n"); 785 dbg("%s", "Port Paranoia failed");
786 return; 786 return;
787 } 787 }
788 788
789 dbg("%s \n", "Entering ........."); 789 dbg("%s", "Entering .........");
790 790
791 tty = tty_port_tty_get(&mos7840_port->port->port); 791 tty = tty_port_tty_get(&mos7840_port->port->port);
792 if (tty && mos7840_port->open) 792 if (tty && mos7840_port->open)
@@ -830,15 +830,17 @@ static int mos7840_open(struct tty_struct *tty,
830 struct moschip_port *mos7840_port; 830 struct moschip_port *mos7840_port;
831 struct moschip_port *port0; 831 struct moschip_port *port0;
832 832
833 dbg ("%s enter", __func__);
834
833 if (mos7840_port_paranoia_check(port, __func__)) { 835 if (mos7840_port_paranoia_check(port, __func__)) {
834 dbg("%s", "Port Paranoia failed \n"); 836 dbg("%s", "Port Paranoia failed");
835 return -ENODEV; 837 return -ENODEV;
836 } 838 }
837 839
838 serial = port->serial; 840 serial = port->serial;
839 841
840 if (mos7840_serial_paranoia_check(serial, __func__)) { 842 if (mos7840_serial_paranoia_check(serial, __func__)) {
841 dbg("%s", "Serial Paranoia failed \n"); 843 dbg("%s", "Serial Paranoia failed");
842 return -ENODEV; 844 return -ENODEV;
843 } 845 }
844 846
@@ -891,20 +893,20 @@ static int mos7840_open(struct tty_struct *tty,
891 Data = 0x0; 893 Data = 0x0;
892 status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data); 894 status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
893 if (status < 0) { 895 if (status < 0) {
894 dbg("Reading Spreg failed\n"); 896 dbg("Reading Spreg failed");
895 return -1; 897 return -1;
896 } 898 }
897 Data |= 0x80; 899 Data |= 0x80;
898 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); 900 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
899 if (status < 0) { 901 if (status < 0) {
900 dbg("writing Spreg failed\n"); 902 dbg("writing Spreg failed");
901 return -1; 903 return -1;
902 } 904 }
903 905
904 Data &= ~0x80; 906 Data &= ~0x80;
905 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data); 907 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
906 if (status < 0) { 908 if (status < 0) {
907 dbg("writing Spreg failed\n"); 909 dbg("writing Spreg failed");
908 return -1; 910 return -1;
909 } 911 }
910 /* End of block to be checked */ 912 /* End of block to be checked */
@@ -913,7 +915,7 @@ static int mos7840_open(struct tty_struct *tty,
913 status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset, 915 status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset,
914 &Data); 916 &Data);
915 if (status < 0) { 917 if (status < 0) {
916 dbg("Reading Controlreg failed\n"); 918 dbg("Reading Controlreg failed");
917 return -1; 919 return -1;
918 } 920 }
919 Data |= 0x08; /* Driver done bit */ 921 Data |= 0x08; /* Driver done bit */
@@ -921,7 +923,7 @@ static int mos7840_open(struct tty_struct *tty,
921 status = mos7840_set_reg_sync(port, 923 status = mos7840_set_reg_sync(port,
922 mos7840_port->ControlRegOffset, Data); 924 mos7840_port->ControlRegOffset, Data);
923 if (status < 0) { 925 if (status < 0) {
924 dbg("writing Controlreg failed\n"); 926 dbg("writing Controlreg failed");
925 return -1; 927 return -1;
926 } 928 }
927 /* do register settings here */ 929 /* do register settings here */
@@ -932,21 +934,21 @@ static int mos7840_open(struct tty_struct *tty,
932 Data = 0x00; 934 Data = 0x00;
933 status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data); 935 status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
934 if (status < 0) { 936 if (status < 0) {
935 dbg("disableing interrupts failed\n"); 937 dbg("disabling interrupts failed");
936 return -1; 938 return -1;
937 } 939 }
938 /* Set FIFO_CONTROL_REGISTER to the default value */ 940 /* Set FIFO_CONTROL_REGISTER to the default value */
939 Data = 0x00; 941 Data = 0x00;
940 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); 942 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
941 if (status < 0) { 943 if (status < 0) {
942 dbg("Writing FIFO_CONTROL_REGISTER failed\n"); 944 dbg("Writing FIFO_CONTROL_REGISTER failed");
943 return -1; 945 return -1;
944 } 946 }
945 947
946 Data = 0xcf; 948 Data = 0xcf;
947 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data); 949 status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
948 if (status < 0) { 950 if (status < 0) {
949 dbg("Writing FIFO_CONTROL_REGISTER failed\n"); 951 dbg("Writing FIFO_CONTROL_REGISTER failed");
950 return -1; 952 return -1;
951 } 953 }
952 954
@@ -1043,12 +1045,12 @@ static int mos7840_open(struct tty_struct *tty,
1043 * (can't set it up in mos7840_startup as the * 1045 * (can't set it up in mos7840_startup as the *
1044 * structures were not set up at that time.) */ 1046 * structures were not set up at that time.) */
1045 1047
1046 dbg("port number is %d \n", port->number); 1048 dbg("port number is %d", port->number);
1047 dbg("serial number is %d \n", port->serial->minor); 1049 dbg("serial number is %d", port->serial->minor);
1048 dbg("Bulkin endpoint is %d \n", port->bulk_in_endpointAddress); 1050 dbg("Bulkin endpoint is %d", port->bulk_in_endpointAddress);
1049 dbg("BulkOut endpoint is %d \n", port->bulk_out_endpointAddress); 1051 dbg("BulkOut endpoint is %d", port->bulk_out_endpointAddress);
1050 dbg("Interrupt endpoint is %d \n", port->interrupt_in_endpointAddress); 1052 dbg("Interrupt endpoint is %d", port->interrupt_in_endpointAddress);
1051 dbg("port's number in the device is %d\n", mos7840_port->port_num); 1053 dbg("port's number in the device is %d", mos7840_port->port_num);
1052 mos7840_port->read_urb = port->read_urb; 1054 mos7840_port->read_urb = port->read_urb;
1053 1055
1054 /* set up our bulk in urb */ 1056 /* set up our bulk in urb */
@@ -1061,7 +1063,7 @@ static int mos7840_open(struct tty_struct *tty,
1061 mos7840_port->read_urb->transfer_buffer_length, 1063 mos7840_port->read_urb->transfer_buffer_length,
1062 mos7840_bulk_in_callback, mos7840_port); 1064 mos7840_bulk_in_callback, mos7840_port);
1063 1065
1064 dbg("mos7840_open: bulkin endpoint is %d\n", 1066 dbg("mos7840_open: bulkin endpoint is %d",
1065 port->bulk_in_endpointAddress); 1067 port->bulk_in_endpointAddress);
1066 mos7840_port->read_urb_busy = true; 1068 mos7840_port->read_urb_busy = true;
1067 response = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL); 1069 response = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
@@ -1087,9 +1089,11 @@ static int mos7840_open(struct tty_struct *tty,
1087 mos7840_port->icount.tx = 0; 1089 mos7840_port->icount.tx = 0;
1088 mos7840_port->icount.rx = 0; 1090 mos7840_port->icount.rx = 0;
1089 1091
1090 dbg("\n\nusb_serial serial:%p mos7840_port:%p\n usb_serial_port port:%p\n\n", 1092 dbg("usb_serial serial:%p mos7840_port:%p\n usb_serial_port port:%p",
1091 serial, mos7840_port, port); 1093 serial, mos7840_port, port);
1092 1094
1095 dbg ("%s leave", __func__);
1096
1093 return 0; 1097 return 0;
1094 1098
1095} 1099}
@@ -1112,16 +1116,16 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
1112 unsigned long flags; 1116 unsigned long flags;
1113 struct moschip_port *mos7840_port; 1117 struct moschip_port *mos7840_port;
1114 1118
1115 dbg("%s \n", " mos7840_chars_in_buffer:entering ..........."); 1119 dbg("%s", " mos7840_chars_in_buffer:entering ...........");
1116 1120
1117 if (mos7840_port_paranoia_check(port, __func__)) { 1121 if (mos7840_port_paranoia_check(port, __func__)) {
1118 dbg("%s", "Invalid port \n"); 1122 dbg("%s", "Invalid port");
1119 return 0; 1123 return 0;
1120 } 1124 }
1121 1125
1122 mos7840_port = mos7840_get_port_private(port); 1126 mos7840_port = mos7840_get_port_private(port);
1123 if (mos7840_port == NULL) { 1127 if (mos7840_port == NULL) {
1124 dbg("%s \n", "mos7840_break:leaving ..........."); 1128 dbg("%s", "mos7840_break:leaving ...........");
1125 return 0; 1129 return 0;
1126 } 1130 }
1127 1131
@@ -1148,16 +1152,16 @@ static void mos7840_close(struct usb_serial_port *port)
1148 int j; 1152 int j;
1149 __u16 Data; 1153 __u16 Data;
1150 1154
1151 dbg("%s\n", "mos7840_close:entering..."); 1155 dbg("%s", "mos7840_close:entering...");
1152 1156
1153 if (mos7840_port_paranoia_check(port, __func__)) { 1157 if (mos7840_port_paranoia_check(port, __func__)) {
1154 dbg("%s", "Port Paranoia failed \n"); 1158 dbg("%s", "Port Paranoia failed");
1155 return; 1159 return;
1156 } 1160 }
1157 1161
1158 serial = mos7840_get_usb_serial(port, __func__); 1162 serial = mos7840_get_usb_serial(port, __func__);
1159 if (!serial) { 1163 if (!serial) {
1160 dbg("%s", "Serial Paranoia failed \n"); 1164 dbg("%s", "Serial Paranoia failed");
1161 return; 1165 return;
1162 } 1166 }
1163 1167
@@ -1185,27 +1189,27 @@ static void mos7840_close(struct usb_serial_port *port)
1185 * and interrupt read if they exists */ 1189 * and interrupt read if they exists */
1186 if (serial->dev) { 1190 if (serial->dev) {
1187 if (mos7840_port->write_urb) { 1191 if (mos7840_port->write_urb) {
1188 dbg("%s", "Shutdown bulk write\n"); 1192 dbg("%s", "Shutdown bulk write");
1189 usb_kill_urb(mos7840_port->write_urb); 1193 usb_kill_urb(mos7840_port->write_urb);
1190 } 1194 }
1191 if (mos7840_port->read_urb) { 1195 if (mos7840_port->read_urb) {
1192 dbg("%s", "Shutdown bulk read\n"); 1196 dbg("%s", "Shutdown bulk read");
1193 usb_kill_urb(mos7840_port->read_urb); 1197 usb_kill_urb(mos7840_port->read_urb);
1194 mos7840_port->read_urb_busy = false; 1198 mos7840_port->read_urb_busy = false;
1195 } 1199 }
1196 if ((&mos7840_port->control_urb)) { 1200 if ((&mos7840_port->control_urb)) {
1197 dbg("%s", "Shutdown control read\n"); 1201 dbg("%s", "Shutdown control read");
1198 /*/ usb_kill_urb (mos7840_port->control_urb); */ 1202 /*/ usb_kill_urb (mos7840_port->control_urb); */
1199 } 1203 }
1200 } 1204 }
1201/* if(mos7840_port->ctrl_buf != NULL) */ 1205/* if(mos7840_port->ctrl_buf != NULL) */
1202/* kfree(mos7840_port->ctrl_buf); */ 1206/* kfree(mos7840_port->ctrl_buf); */
1203 port0->open_ports--; 1207 port0->open_ports--;
1204 dbg("mos7840_num_open_ports in close%d:in port%d\n", 1208 dbg("mos7840_num_open_ports in close%d:in port%d",
1205 port0->open_ports, port->number); 1209 port0->open_ports, port->number);
1206 if (port0->open_ports == 0) { 1210 if (port0->open_ports == 0) {
1207 if (serial->port[0]->interrupt_in_urb) { 1211 if (serial->port[0]->interrupt_in_urb) {
1208 dbg("%s", "Shutdown interrupt_in_urb\n"); 1212 dbg("%s", "Shutdown interrupt_in_urb");
1209 usb_kill_urb(serial->port[0]->interrupt_in_urb); 1213 usb_kill_urb(serial->port[0]->interrupt_in_urb);
1210 } 1214 }
1211 } 1215 }
@@ -1225,7 +1229,7 @@ static void mos7840_close(struct usb_serial_port *port)
1225 1229
1226 mos7840_port->open = 0; 1230 mos7840_port->open = 0;
1227 1231
1228 dbg("%s \n", "Leaving ............"); 1232 dbg("%s", "Leaving ............");
1229} 1233}
1230 1234
1231/************************************************************************ 1235/************************************************************************
@@ -1280,17 +1284,17 @@ static void mos7840_break(struct tty_struct *tty, int break_state)
1280 struct usb_serial *serial; 1284 struct usb_serial *serial;
1281 struct moschip_port *mos7840_port; 1285 struct moschip_port *mos7840_port;
1282 1286
1283 dbg("%s \n", "Entering ..........."); 1287 dbg("%s", "Entering ...........");
1284 dbg("mos7840_break: Start\n"); 1288 dbg("mos7840_break: Start");
1285 1289
1286 if (mos7840_port_paranoia_check(port, __func__)) { 1290 if (mos7840_port_paranoia_check(port, __func__)) {
1287 dbg("%s", "Port Paranoia failed \n"); 1291 dbg("%s", "Port Paranoia failed");
1288 return; 1292 return;
1289 } 1293 }
1290 1294
1291 serial = mos7840_get_usb_serial(port, __func__); 1295 serial = mos7840_get_usb_serial(port, __func__);
1292 if (!serial) { 1296 if (!serial) {
1293 dbg("%s", "Serial Paranoia failed \n"); 1297 dbg("%s", "Serial Paranoia failed");
1294 return; 1298 return;
1295 } 1299 }
1296 1300
@@ -1310,7 +1314,7 @@ static void mos7840_break(struct tty_struct *tty, int break_state)
1310 1314
1311 /* FIXME: no locking on shadowLCR anywhere in driver */ 1315 /* FIXME: no locking on shadowLCR anywhere in driver */
1312 mos7840_port->shadowLCR = data; 1316 mos7840_port->shadowLCR = data;
1313 dbg("mcs7840_break mos7840_port->shadowLCR is %x\n", 1317 dbg("mcs7840_break mos7840_port->shadowLCR is %x",
1314 mos7840_port->shadowLCR); 1318 mos7840_port->shadowLCR);
1315 mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, 1319 mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER,
1316 mos7840_port->shadowLCR); 1320 mos7840_port->shadowLCR);
@@ -1334,17 +1338,17 @@ static int mos7840_write_room(struct tty_struct *tty)
1334 unsigned long flags; 1338 unsigned long flags;
1335 struct moschip_port *mos7840_port; 1339 struct moschip_port *mos7840_port;
1336 1340
1337 dbg("%s \n", " mos7840_write_room:entering ..........."); 1341 dbg("%s", " mos7840_write_room:entering ...........");
1338 1342
1339 if (mos7840_port_paranoia_check(port, __func__)) { 1343 if (mos7840_port_paranoia_check(port, __func__)) {
1340 dbg("%s", "Invalid port \n"); 1344 dbg("%s", "Invalid port");
1341 dbg("%s \n", " mos7840_write_room:leaving ..........."); 1345 dbg("%s", " mos7840_write_room:leaving ...........");
1342 return -1; 1346 return -1;
1343 } 1347 }
1344 1348
1345 mos7840_port = mos7840_get_port_private(port); 1349 mos7840_port = mos7840_get_port_private(port);
1346 if (mos7840_port == NULL) { 1350 if (mos7840_port == NULL) {
1347 dbg("%s \n", "mos7840_break:leaving ..........."); 1351 dbg("%s", "mos7840_break:leaving ...........");
1348 return -1; 1352 return -1;
1349 } 1353 }
1350 1354
@@ -1384,16 +1388,16 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
1384 /* __u16 Data; */ 1388 /* __u16 Data; */
1385 const unsigned char *current_position = data; 1389 const unsigned char *current_position = data;
1386 unsigned char *data1; 1390 unsigned char *data1;
1387 dbg("%s \n", "entering ..........."); 1391 dbg("%s", "entering ...........");
1388 /* dbg("mos7840_write: mos7840_port->shadowLCR is %x\n", 1392 /* dbg("mos7840_write: mos7840_port->shadowLCR is %x",
1389 mos7840_port->shadowLCR); */ 1393 mos7840_port->shadowLCR); */
1390 1394
1391#ifdef NOTMOS7840 1395#ifdef NOTMOS7840
1392 Data = 0x00; 1396 Data = 0x00;
1393 status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data); 1397 status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data);
1394 mos7840_port->shadowLCR = Data; 1398 mos7840_port->shadowLCR = Data;
1395 dbg("mos7840_write: LINE_CONTROL_REGISTER is %x\n", Data); 1399 dbg("mos7840_write: LINE_CONTROL_REGISTER is %x", Data);
1396 dbg("mos7840_write: mos7840_port->shadowLCR is %x\n", 1400 dbg("mos7840_write: mos7840_port->shadowLCR is %x",
1397 mos7840_port->shadowLCR); 1401 mos7840_port->shadowLCR);
1398 1402
1399 /* Data = 0x03; */ 1403 /* Data = 0x03; */
@@ -1407,32 +1411,32 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
1407 /* status = mos7840_set_uart_reg(port,DIVISOR_LATCH_LSB,Data); */ 1411 /* status = mos7840_set_uart_reg(port,DIVISOR_LATCH_LSB,Data); */
1408 Data = 0x00; 1412 Data = 0x00;
1409 status = mos7840_get_uart_reg(port, DIVISOR_LATCH_LSB, &Data); 1413 status = mos7840_get_uart_reg(port, DIVISOR_LATCH_LSB, &Data);
1410 dbg("mos7840_write:DLL value is %x\n", Data); 1414 dbg("mos7840_write:DLL value is %x", Data);
1411 1415
1412 Data = 0x0; 1416 Data = 0x0;
1413 status = mos7840_get_uart_reg(port, DIVISOR_LATCH_MSB, &Data); 1417 status = mos7840_get_uart_reg(port, DIVISOR_LATCH_MSB, &Data);
1414 dbg("mos7840_write:DLM value is %x\n", Data); 1418 dbg("mos7840_write:DLM value is %x", Data);
1415 1419
1416 Data = Data & ~SERIAL_LCR_DLAB; 1420 Data = Data & ~SERIAL_LCR_DLAB;
1417 dbg("mos7840_write: mos7840_port->shadowLCR is %x\n", 1421 dbg("mos7840_write: mos7840_port->shadowLCR is %x",
1418 mos7840_port->shadowLCR); 1422 mos7840_port->shadowLCR);
1419 status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data); 1423 status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
1420#endif 1424#endif
1421 1425
1422 if (mos7840_port_paranoia_check(port, __func__)) { 1426 if (mos7840_port_paranoia_check(port, __func__)) {
1423 dbg("%s", "Port Paranoia failed \n"); 1427 dbg("%s", "Port Paranoia failed");
1424 return -1; 1428 return -1;
1425 } 1429 }
1426 1430
1427 serial = port->serial; 1431 serial = port->serial;
1428 if (mos7840_serial_paranoia_check(serial, __func__)) { 1432 if (mos7840_serial_paranoia_check(serial, __func__)) {
1429 dbg("%s", "Serial Paranoia failed \n"); 1433 dbg("%s", "Serial Paranoia failed");
1430 return -1; 1434 return -1;
1431 } 1435 }
1432 1436
1433 mos7840_port = mos7840_get_port_private(port); 1437 mos7840_port = mos7840_get_port_private(port);
1434 if (mos7840_port == NULL) { 1438 if (mos7840_port == NULL) {
1435 dbg("%s", "mos7840_port is NULL\n"); 1439 dbg("%s", "mos7840_port is NULL");
1436 return -1; 1440 return -1;
1437 } 1441 }
1438 1442
@@ -1444,7 +1448,7 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
1444 if (!mos7840_port->busy[i]) { 1448 if (!mos7840_port->busy[i]) {
1445 mos7840_port->busy[i] = 1; 1449 mos7840_port->busy[i] = 1;
1446 urb = mos7840_port->write_urb_pool[i]; 1450 urb = mos7840_port->write_urb_pool[i];
1447 dbg("\nURB:%d", i); 1451 dbg("URB:%d", i);
1448 break; 1452 break;
1449 } 1453 }
1450 } 1454 }
@@ -1479,7 +1483,7 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
1479 mos7840_bulk_out_data_callback, mos7840_port); 1483 mos7840_bulk_out_data_callback, mos7840_port);
1480 1484
1481 data1 = urb->transfer_buffer; 1485 data1 = urb->transfer_buffer;
1482 dbg("\nbulkout endpoint is %d", port->bulk_out_endpointAddress); 1486 dbg("bulkout endpoint is %d", port->bulk_out_endpointAddress);
1483 1487
1484 /* send it down the pipe */ 1488 /* send it down the pipe */
1485 status = usb_submit_urb(urb, GFP_ATOMIC); 1489 status = usb_submit_urb(urb, GFP_ATOMIC);
@@ -1494,7 +1498,7 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
1494 bytes_sent = transfer_size; 1498 bytes_sent = transfer_size;
1495 mos7840_port->icount.tx += transfer_size; 1499 mos7840_port->icount.tx += transfer_size;
1496 smp_wmb(); 1500 smp_wmb();
1497 dbg("mos7840_port->icount.tx is %d:\n", mos7840_port->icount.tx); 1501 dbg("mos7840_port->icount.tx is %d:", mos7840_port->icount.tx);
1498exit: 1502exit:
1499 return bytes_sent; 1503 return bytes_sent;
1500 1504
@@ -1513,11 +1517,11 @@ static void mos7840_throttle(struct tty_struct *tty)
1513 int status; 1517 int status;
1514 1518
1515 if (mos7840_port_paranoia_check(port, __func__)) { 1519 if (mos7840_port_paranoia_check(port, __func__)) {
1516 dbg("%s", "Invalid port \n"); 1520 dbg("%s", "Invalid port");
1517 return; 1521 return;
1518 } 1522 }
1519 1523
1520 dbg("- port %d\n", port->number); 1524 dbg("- port %d", port->number);
1521 1525
1522 mos7840_port = mos7840_get_port_private(port); 1526 mos7840_port = mos7840_get_port_private(port);
1523 1527
@@ -1525,11 +1529,11 @@ static void mos7840_throttle(struct tty_struct *tty)
1525 return; 1529 return;
1526 1530
1527 if (!mos7840_port->open) { 1531 if (!mos7840_port->open) {
1528 dbg("%s\n", "port not opened"); 1532 dbg("%s", "port not opened");
1529 return; 1533 return;
1530 } 1534 }
1531 1535
1532 dbg("%s", "Entering .......... \n"); 1536 dbg("%s", "Entering ..........");
1533 1537
1534 /* if we are implementing XON/XOFF, send the stop character */ 1538 /* if we are implementing XON/XOFF, send the stop character */
1535 if (I_IXOFF(tty)) { 1539 if (I_IXOFF(tty)) {
@@ -1563,7 +1567,7 @@ static void mos7840_unthrottle(struct tty_struct *tty)
1563 struct moschip_port *mos7840_port = mos7840_get_port_private(port); 1567 struct moschip_port *mos7840_port = mos7840_get_port_private(port);
1564 1568
1565 if (mos7840_port_paranoia_check(port, __func__)) { 1569 if (mos7840_port_paranoia_check(port, __func__)) {
1566 dbg("%s", "Invalid port \n"); 1570 dbg("%s", "Invalid port");
1567 return; 1571 return;
1568 } 1572 }
1569 1573
@@ -1575,7 +1579,7 @@ static void mos7840_unthrottle(struct tty_struct *tty)
1575 return; 1579 return;
1576 } 1580 }
1577 1581
1578 dbg("%s", "Entering .......... \n"); 1582 dbg("%s", "Entering ..........");
1579 1583
1580 /* if we are implementing XON/XOFF, send the start character */ 1584 /* if we are implementing XON/XOFF, send the start character */
1581 if (I_IXOFF(tty)) { 1585 if (I_IXOFF(tty)) {
@@ -1660,7 +1664,7 @@ static int mos7840_tiocmset(struct tty_struct *tty, struct file *file,
1660 1664
1661 status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr); 1665 status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr);
1662 if (status < 0) { 1666 if (status < 0) {
1663 dbg("setting MODEM_CONTROL_REGISTER Failed\n"); 1667 dbg("setting MODEM_CONTROL_REGISTER Failed");
1664 return status; 1668 return status;
1665 } 1669 }
1666 1670
@@ -1729,11 +1733,11 @@ static int mos7840_calc_baud_rate_divisor(int baudRate, int *divisor,
1729 custom++; 1733 custom++;
1730 *divisor = custom; 1734 *divisor = custom;
1731 1735
1732 dbg(" Baud %d = %d\n", baudrate, custom); 1736 dbg(" Baud %d = %d", baudrate, custom);
1733 return 0; 1737 return 0;
1734 } 1738 }
1735 1739
1736 dbg("%s\n", " Baud calculation Failed..."); 1740 dbg("%s", " Baud calculation Failed...");
1737 return -1; 1741 return -1;
1738#endif 1742#endif
1739} 1743}
@@ -1759,16 +1763,16 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
1759 1763
1760 port = (struct usb_serial_port *)mos7840_port->port; 1764 port = (struct usb_serial_port *)mos7840_port->port;
1761 if (mos7840_port_paranoia_check(port, __func__)) { 1765 if (mos7840_port_paranoia_check(port, __func__)) {
1762 dbg("%s", "Invalid port \n"); 1766 dbg("%s", "Invalid port");
1763 return -1; 1767 return -1;
1764 } 1768 }
1765 1769
1766 if (mos7840_serial_paranoia_check(port->serial, __func__)) { 1770 if (mos7840_serial_paranoia_check(port->serial, __func__)) {
1767 dbg("%s", "Invalid Serial \n"); 1771 dbg("%s", "Invalid Serial");
1768 return -1; 1772 return -1;
1769 } 1773 }
1770 1774
1771 dbg("%s", "Entering .......... \n"); 1775 dbg("%s", "Entering ..........");
1772 1776
1773 number = mos7840_port->port->number - mos7840_port->port->serial->minor; 1777 number = mos7840_port->port->number - mos7840_port->port->serial->minor;
1774 1778
@@ -1784,7 +1788,7 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
1784 status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, 1788 status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
1785 Data); 1789 Data);
1786 if (status < 0) { 1790 if (status < 0) {
1787 dbg("Writing spreg failed in set_serial_baud\n"); 1791 dbg("Writing spreg failed in set_serial_baud");
1788 return -1; 1792 return -1;
1789 } 1793 }
1790#endif 1794#endif
@@ -1797,7 +1801,7 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
1797 status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, 1801 status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
1798 Data); 1802 Data);
1799 if (status < 0) { 1803 if (status < 0) {
1800 dbg("Writing spreg failed in set_serial_baud\n"); 1804 dbg("Writing spreg failed in set_serial_baud");
1801 return -1; 1805 return -1;
1802 } 1806 }
1803#endif 1807#endif
@@ -1812,14 +1816,14 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
1812 status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, 1816 status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset,
1813 &Data); 1817 &Data);
1814 if (status < 0) { 1818 if (status < 0) {
1815 dbg("reading spreg failed in set_serial_baud\n"); 1819 dbg("reading spreg failed in set_serial_baud");
1816 return -1; 1820 return -1;
1817 } 1821 }
1818 Data = (Data & 0x8f) | clk_sel_val; 1822 Data = (Data & 0x8f) | clk_sel_val;
1819 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, 1823 status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset,
1820 Data); 1824 Data);
1821 if (status < 0) { 1825 if (status < 0) {
1822 dbg("Writing spreg failed in set_serial_baud\n"); 1826 dbg("Writing spreg failed in set_serial_baud");
1823 return -1; 1827 return -1;
1824 } 1828 }
1825 /* Calculate the Divisor */ 1829 /* Calculate the Divisor */
@@ -1835,11 +1839,11 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
1835 1839
1836 /* Write the divisor */ 1840 /* Write the divisor */
1837 Data = (unsigned char)(divisor & 0xff); 1841 Data = (unsigned char)(divisor & 0xff);
1838 dbg("set_serial_baud Value to write DLL is %x\n", Data); 1842 dbg("set_serial_baud Value to write DLL is %x", Data);
1839 mos7840_set_uart_reg(port, DIVISOR_LATCH_LSB, Data); 1843 mos7840_set_uart_reg(port, DIVISOR_LATCH_LSB, Data);
1840 1844
1841 Data = (unsigned char)((divisor & 0xff00) >> 8); 1845 Data = (unsigned char)((divisor & 0xff00) >> 8);
1842 dbg("set_serial_baud Value to write DLM is %x\n", Data); 1846 dbg("set_serial_baud Value to write DLM is %x", Data);
1843 mos7840_set_uart_reg(port, DIVISOR_LATCH_MSB, Data); 1847 mos7840_set_uart_reg(port, DIVISOR_LATCH_MSB, Data);
1844 1848
1845 /* Disable access to divisor latch */ 1849 /* Disable access to divisor latch */
@@ -1877,12 +1881,12 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
1877 port = (struct usb_serial_port *)mos7840_port->port; 1881 port = (struct usb_serial_port *)mos7840_port->port;
1878 1882
1879 if (mos7840_port_paranoia_check(port, __func__)) { 1883 if (mos7840_port_paranoia_check(port, __func__)) {
1880 dbg("%s", "Invalid port \n"); 1884 dbg("%s", "Invalid port");
1881 return; 1885 return;
1882 } 1886 }
1883 1887
1884 if (mos7840_serial_paranoia_check(port->serial, __func__)) { 1888 if (mos7840_serial_paranoia_check(port->serial, __func__)) {
1885 dbg("%s", "Invalid Serial \n"); 1889 dbg("%s", "Invalid Serial");
1886 return; 1890 return;
1887 } 1891 }
1888 1892
@@ -1895,7 +1899,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
1895 return; 1899 return;
1896 } 1900 }
1897 1901
1898 dbg("%s", "Entering .......... \n"); 1902 dbg("%s", "Entering ..........");
1899 1903
1900 lData = LCR_BITS_8; 1904 lData = LCR_BITS_8;
1901 lStop = LCR_STOP_1; 1905 lStop = LCR_STOP_1;
@@ -1955,7 +1959,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
1955 ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK); 1959 ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
1956 mos7840_port->shadowLCR |= (lData | lParity | lStop); 1960 mos7840_port->shadowLCR |= (lData | lParity | lStop);
1957 1961
1958 dbg("mos7840_change_port_settings mos7840_port->shadowLCR is %x\n", 1962 dbg("mos7840_change_port_settings mos7840_port->shadowLCR is %x",
1959 mos7840_port->shadowLCR); 1963 mos7840_port->shadowLCR);
1960 /* Disable Interrupts */ 1964 /* Disable Interrupts */
1961 Data = 0x00; 1965 Data = 0x00;
@@ -1997,7 +2001,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
1997 2001
1998 if (!baud) { 2002 if (!baud) {
1999 /* pick a default, any default... */ 2003 /* pick a default, any default... */
2000 dbg("%s\n", "Picked default baud..."); 2004 dbg("%s", "Picked default baud...");
2001 baud = 9600; 2005 baud = 9600;
2002 } 2006 }
2003 2007
@@ -2020,7 +2024,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
2020 } 2024 }
2021 wake_up(&mos7840_port->delta_msr_wait); 2025 wake_up(&mos7840_port->delta_msr_wait);
2022 mos7840_port->delta_msr_cond = 1; 2026 mos7840_port->delta_msr_cond = 1;
2023 dbg("mos7840_change_port_settings mos7840_port->shadowLCR is End %x\n", 2027 dbg("mos7840_change_port_settings mos7840_port->shadowLCR is End %x",
2024 mos7840_port->shadowLCR); 2028 mos7840_port->shadowLCR);
2025 2029
2026 return; 2030 return;
@@ -2040,16 +2044,16 @@ static void mos7840_set_termios(struct tty_struct *tty,
2040 unsigned int cflag; 2044 unsigned int cflag;
2041 struct usb_serial *serial; 2045 struct usb_serial *serial;
2042 struct moschip_port *mos7840_port; 2046 struct moschip_port *mos7840_port;
2043 dbg("mos7840_set_termios: START\n"); 2047 dbg("mos7840_set_termios: START");
2044 if (mos7840_port_paranoia_check(port, __func__)) { 2048 if (mos7840_port_paranoia_check(port, __func__)) {
2045 dbg("%s", "Invalid port \n"); 2049 dbg("%s", "Invalid port");
2046 return; 2050 return;
2047 } 2051 }
2048 2052
2049 serial = port->serial; 2053 serial = port->serial;
2050 2054
2051 if (mos7840_serial_paranoia_check(serial, __func__)) { 2055 if (mos7840_serial_paranoia_check(serial, __func__)) {
2052 dbg("%s", "Invalid Serial \n"); 2056 dbg("%s", "Invalid Serial");
2053 return; 2057 return;
2054 } 2058 }
2055 2059
@@ -2063,7 +2067,7 @@ static void mos7840_set_termios(struct tty_struct *tty,
2063 return; 2067 return;
2064 } 2068 }
2065 2069
2066 dbg("%s\n", "setting termios - "); 2070 dbg("%s", "setting termios - ");
2067 2071
2068 cflag = tty->termios->c_cflag; 2072 cflag = tty->termios->c_cflag;
2069 2073
@@ -2078,7 +2082,7 @@ static void mos7840_set_termios(struct tty_struct *tty,
2078 mos7840_change_port_settings(tty, mos7840_port, old_termios); 2082 mos7840_change_port_settings(tty, mos7840_port, old_termios);
2079 2083
2080 if (!mos7840_port->read_urb) { 2084 if (!mos7840_port->read_urb) {
2081 dbg("%s", "URB KILLED !!!!!\n"); 2085 dbg("%s", "URB KILLED !!!!!");
2082 return; 2086 return;
2083 } 2087 }
2084 2088
@@ -2144,7 +2148,7 @@ static int mos7840_set_modem_info(struct moschip_port *mos7840_port,
2144 2148
2145 port = (struct usb_serial_port *)mos7840_port->port; 2149 port = (struct usb_serial_port *)mos7840_port->port;
2146 if (mos7840_port_paranoia_check(port, __func__)) { 2150 if (mos7840_port_paranoia_check(port, __func__)) {
2147 dbg("%s", "Invalid port \n"); 2151 dbg("%s", "Invalid port");
2148 return -1; 2152 return -1;
2149 } 2153 }
2150 2154
@@ -2189,7 +2193,7 @@ static int mos7840_set_modem_info(struct moschip_port *mos7840_port,
2189 status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); 2193 status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
2190 unlock_kernel(); 2194 unlock_kernel();
2191 if (status < 0) { 2195 if (status < 0) {
2192 dbg("setting MODEM_CONTROL_REGISTER Failed\n"); 2196 dbg("setting MODEM_CONTROL_REGISTER Failed");
2193 return -1; 2197 return -1;
2194 } 2198 }
2195 2199
@@ -2274,7 +2278,7 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file,
2274 int mosret = 0; 2278 int mosret = 0;
2275 2279
2276 if (mos7840_port_paranoia_check(port, __func__)) { 2280 if (mos7840_port_paranoia_check(port, __func__)) {
2277 dbg("%s", "Invalid port \n"); 2281 dbg("%s", "Invalid port");
2278 return -1; 2282 return -1;
2279 } 2283 }
2280 2284
@@ -2374,9 +2378,8 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
2374{ 2378{
2375 int mos7840_num_ports = 0; 2379 int mos7840_num_ports = 0;
2376 2380
2377 dbg("numberofendpoints: %d \n", 2381 dbg("numberofendpoints: cur %d, alt %d",
2378 (int)serial->interface->cur_altsetting->desc.bNumEndpoints); 2382 (int)serial->interface->cur_altsetting->desc.bNumEndpoints,
2379 dbg("numberofendpoints: %d \n",
2380 (int)serial->interface->altsetting->desc.bNumEndpoints); 2383 (int)serial->interface->altsetting->desc.bNumEndpoints);
2381 if (serial->interface->cur_altsetting->desc.bNumEndpoints == 5) { 2384 if (serial->interface->cur_altsetting->desc.bNumEndpoints == 5) {
2382 mos7840_num_ports = serial->num_ports = 2; 2385 mos7840_num_ports = serial->num_ports = 2;
@@ -2385,7 +2388,7 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
2385 serial->num_bulk_out = 4; 2388 serial->num_bulk_out = 4;
2386 mos7840_num_ports = serial->num_ports = 4; 2389 mos7840_num_ports = serial->num_ports = 4;
2387 } 2390 }
2388 2391 dbg ("mos7840_num_ports = %d", mos7840_num_ports);
2389 return mos7840_num_ports; 2392 return mos7840_num_ports;
2390} 2393}
2391 2394
@@ -2400,22 +2403,24 @@ static int mos7840_startup(struct usb_serial *serial)
2400 int i, status; 2403 int i, status;
2401 2404
2402 __u16 Data; 2405 __u16 Data;
2403 dbg("%s \n", " mos7840_startup :entering.........."); 2406 dbg("%s", "mos7840_startup :Entering..........");
2404 2407
2405 if (!serial) { 2408 if (!serial) {
2406 dbg("%s\n", "Invalid Handler"); 2409 dbg("%s", "Invalid Handler");
2407 return -1; 2410 return -1;
2408 } 2411 }
2409 2412
2410 dev = serial->dev; 2413 dev = serial->dev;
2411 2414
2412 dbg("%s\n", "Entering..."); 2415 dbg("%s", "Entering...");
2416 dbg ("mos7840_startup: serial = %p", serial);
2413 2417
2414 /* we set up the pointers to the endpoints in the mos7840_open * 2418 /* we set up the pointers to the endpoints in the mos7840_open *
2415 * function, as the structures aren't created yet. */ 2419 * function, as the structures aren't created yet. */
2416 2420
2417 /* set up port private structures */ 2421 /* set up port private structures */
2418 for (i = 0; i < serial->num_ports; ++i) { 2422 for (i = 0; i < serial->num_ports; ++i) {
2423 dbg ("mos7840_startup: configuring port %d............", i);
2419 mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL); 2424 mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
2420 if (mos7840_port == NULL) { 2425 if (mos7840_port == NULL) {
2421 dev_err(&dev->dev, "%s - Out of memory\n", __func__); 2426 dev_err(&dev->dev, "%s - Out of memory\n", __func__);
@@ -2473,10 +2478,10 @@ static int mos7840_startup(struct usb_serial *serial)
2473 status = mos7840_get_reg_sync(serial->port[i], 2478 status = mos7840_get_reg_sync(serial->port[i],
2474 mos7840_port->ControlRegOffset, &Data); 2479 mos7840_port->ControlRegOffset, &Data);
2475 if (status < 0) { 2480 if (status < 0) {
2476 dbg("Reading ControlReg failed status-0x%x\n", status); 2481 dbg("Reading ControlReg failed status-0x%x", status);
2477 break; 2482 break;
2478 } else 2483 } else
2479 dbg("ControlReg Reading success val is %x, status%d\n", 2484 dbg("ControlReg Reading success val is %x, status%d",
2480 Data, status); 2485 Data, status);
2481 Data |= 0x08; /* setting driver done bit */ 2486 Data |= 0x08; /* setting driver done bit */
2482 Data |= 0x04; /* sp1_bit to have cts change reflect in 2487 Data |= 0x04; /* sp1_bit to have cts change reflect in
@@ -2486,10 +2491,10 @@ static int mos7840_startup(struct usb_serial *serial)
2486 status = mos7840_set_reg_sync(serial->port[i], 2491 status = mos7840_set_reg_sync(serial->port[i],
2487 mos7840_port->ControlRegOffset, Data); 2492 mos7840_port->ControlRegOffset, Data);
2488 if (status < 0) { 2493 if (status < 0) {
2489 dbg("Writing ControlReg failed(rx_disable) status-0x%x\n", status); 2494 dbg("Writing ControlReg failed(rx_disable) status-0x%x", status);
2490 break; 2495 break;
2491 } else 2496 } else
2492 dbg("ControlReg Writing success(rx_disable) status%d\n", 2497 dbg("ControlReg Writing success(rx_disable) status%d",
2493 status); 2498 status);
2494 2499
2495 /* Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2 2500 /* Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2
@@ -2498,48 +2503,48 @@ static int mos7840_startup(struct usb_serial *serial)
2498 status = mos7840_set_reg_sync(serial->port[i], 2503 status = mos7840_set_reg_sync(serial->port[i],
2499 (__u16) (mos7840_port->DcrRegOffset + 0), Data); 2504 (__u16) (mos7840_port->DcrRegOffset + 0), Data);
2500 if (status < 0) { 2505 if (status < 0) {
2501 dbg("Writing DCR0 failed status-0x%x\n", status); 2506 dbg("Writing DCR0 failed status-0x%x", status);
2502 break; 2507 break;
2503 } else 2508 } else
2504 dbg("DCR0 Writing success status%d\n", status); 2509 dbg("DCR0 Writing success status%d", status);
2505 2510
2506 Data = 0x05; 2511 Data = 0x05;
2507 status = mos7840_set_reg_sync(serial->port[i], 2512 status = mos7840_set_reg_sync(serial->port[i],
2508 (__u16) (mos7840_port->DcrRegOffset + 1), Data); 2513 (__u16) (mos7840_port->DcrRegOffset + 1), Data);
2509 if (status < 0) { 2514 if (status < 0) {
2510 dbg("Writing DCR1 failed status-0x%x\n", status); 2515 dbg("Writing DCR1 failed status-0x%x", status);
2511 break; 2516 break;
2512 } else 2517 } else
2513 dbg("DCR1 Writing success status%d\n", status); 2518 dbg("DCR1 Writing success status%d", status);
2514 2519
2515 Data = 0x24; 2520 Data = 0x24;
2516 status = mos7840_set_reg_sync(serial->port[i], 2521 status = mos7840_set_reg_sync(serial->port[i],
2517 (__u16) (mos7840_port->DcrRegOffset + 2), Data); 2522 (__u16) (mos7840_port->DcrRegOffset + 2), Data);
2518 if (status < 0) { 2523 if (status < 0) {
2519 dbg("Writing DCR2 failed status-0x%x\n", status); 2524 dbg("Writing DCR2 failed status-0x%x", status);
2520 break; 2525 break;
2521 } else 2526 } else
2522 dbg("DCR2 Writing success status%d\n", status); 2527 dbg("DCR2 Writing success status%d", status);
2523 2528
2524 /* write values in clkstart0x0 and clkmulti 0x20 */ 2529 /* write values in clkstart0x0 and clkmulti 0x20 */
2525 Data = 0x0; 2530 Data = 0x0;
2526 status = mos7840_set_reg_sync(serial->port[i], 2531 status = mos7840_set_reg_sync(serial->port[i],
2527 CLK_START_VALUE_REGISTER, Data); 2532 CLK_START_VALUE_REGISTER, Data);
2528 if (status < 0) { 2533 if (status < 0) {
2529 dbg("Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status); 2534 dbg("Writing CLK_START_VALUE_REGISTER failed status-0x%x", status);
2530 break; 2535 break;
2531 } else 2536 } else
2532 dbg("CLK_START_VALUE_REGISTER Writing success status%d\n", status); 2537 dbg("CLK_START_VALUE_REGISTER Writing success status%d", status);
2533 2538
2534 Data = 0x20; 2539 Data = 0x20;
2535 status = mos7840_set_reg_sync(serial->port[i], 2540 status = mos7840_set_reg_sync(serial->port[i],
2536 CLK_MULTI_REGISTER, Data); 2541 CLK_MULTI_REGISTER, Data);
2537 if (status < 0) { 2542 if (status < 0) {
2538 dbg("Writing CLK_MULTI_REGISTER failed status-0x%x\n", 2543 dbg("Writing CLK_MULTI_REGISTER failed status-0x%x",
2539 status); 2544 status);
2540 goto error; 2545 goto error;
2541 } else 2546 } else
2542 dbg("CLK_MULTI_REGISTER Writing success status%d\n", 2547 dbg("CLK_MULTI_REGISTER Writing success status%d",
2543 status); 2548 status);
2544 2549
2545 /* write value 0x0 to scratchpad register */ 2550 /* write value 0x0 to scratchpad register */
@@ -2547,11 +2552,11 @@ static int mos7840_startup(struct usb_serial *serial)
2547 status = mos7840_set_uart_reg(serial->port[i], 2552 status = mos7840_set_uart_reg(serial->port[i],
2548 SCRATCH_PAD_REGISTER, Data); 2553 SCRATCH_PAD_REGISTER, Data);
2549 if (status < 0) { 2554 if (status < 0) {
2550 dbg("Writing SCRATCH_PAD_REGISTER failed status-0x%x\n", 2555 dbg("Writing SCRATCH_PAD_REGISTER failed status-0x%x",
2551 status); 2556 status);
2552 break; 2557 break;
2553 } else 2558 } else
2554 dbg("SCRATCH_PAD_REGISTER Writing success status%d\n", 2559 dbg("SCRATCH_PAD_REGISTER Writing success status%d",
2555 status); 2560 status);
2556 2561
2557 /* Zero Length flag register */ 2562 /* Zero Length flag register */
@@ -2562,30 +2567,30 @@ static int mos7840_startup(struct usb_serial *serial)
2562 status = mos7840_set_reg_sync(serial->port[i], 2567 status = mos7840_set_reg_sync(serial->port[i],
2563 (__u16) (ZLP_REG1 + 2568 (__u16) (ZLP_REG1 +
2564 ((__u16)mos7840_port->port_num)), Data); 2569 ((__u16)mos7840_port->port_num)), Data);
2565 dbg("ZLIP offset%x\n", 2570 dbg("ZLIP offset %x",
2566 (__u16) (ZLP_REG1 + 2571 (__u16) (ZLP_REG1 +
2567 ((__u16) mos7840_port->port_num))); 2572 ((__u16) mos7840_port->port_num)));
2568 if (status < 0) { 2573 if (status < 0) {
2569 dbg("Writing ZLP_REG%d failed status-0x%x\n", 2574 dbg("Writing ZLP_REG%d failed status-0x%x",
2570 i + 2, status); 2575 i + 2, status);
2571 break; 2576 break;
2572 } else 2577 } else
2573 dbg("ZLP_REG%d Writing success status%d\n", 2578 dbg("ZLP_REG%d Writing success status%d",
2574 i + 2, status); 2579 i + 2, status);
2575 } else { 2580 } else {
2576 Data = 0xff; 2581 Data = 0xff;
2577 status = mos7840_set_reg_sync(serial->port[i], 2582 status = mos7840_set_reg_sync(serial->port[i],
2578 (__u16) (ZLP_REG1 + 2583 (__u16) (ZLP_REG1 +
2579 ((__u16)mos7840_port->port_num) - 0x1), Data); 2584 ((__u16)mos7840_port->port_num) - 0x1), Data);
2580 dbg("ZLIP offset%x\n", 2585 dbg("ZLIP offset %x",
2581 (__u16) (ZLP_REG1 + 2586 (__u16) (ZLP_REG1 +
2582 ((__u16) mos7840_port->port_num) - 0x1)); 2587 ((__u16) mos7840_port->port_num) - 0x1));
2583 if (status < 0) { 2588 if (status < 0) {
2584 dbg("Writing ZLP_REG%d failed status-0x%x\n", 2589 dbg("Writing ZLP_REG%d failed status-0x%x",
2585 i + 1, status); 2590 i + 1, status);
2586 break; 2591 break;
2587 } else 2592 } else
2588 dbg("ZLP_REG%d Writing success status%d\n", 2593 dbg("ZLP_REG%d Writing success status%d",
2589 i + 1, status); 2594 i + 1, status);
2590 2595
2591 } 2596 }
@@ -2599,15 +2604,16 @@ static int mos7840_startup(struct usb_serial *serial)
2599 goto error; 2604 goto error;
2600 } 2605 }
2601 } 2606 }
2607 dbg ("mos7840_startup: all ports configured...........");
2602 2608
2603 /* Zero Length flag enable */ 2609 /* Zero Length flag enable */
2604 Data = 0x0f; 2610 Data = 0x0f;
2605 status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data); 2611 status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data);
2606 if (status < 0) { 2612 if (status < 0) {
2607 dbg("Writing ZLP_REG5 failed status-0x%x\n", status); 2613 dbg("Writing ZLP_REG5 failed status-0x%x", status);
2608 goto error; 2614 goto error;
2609 } else 2615 } else
2610 dbg("ZLP_REG5 Writing success status%d\n", status); 2616 dbg("ZLP_REG5 Writing success status%d", status);
2611 2617
2612 /* setting configuration feature to one */ 2618 /* setting configuration feature to one */
2613 usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 2619 usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
@@ -2627,19 +2633,19 @@ error:
2627} 2633}
2628 2634
2629/**************************************************************************** 2635/****************************************************************************
2630 * mos7840_shutdown 2636 * mos7840_disconnect
2631 * This function is called whenever the device is removed from the usb bus. 2637 * This function is called whenever the device is removed from the usb bus.
2632 ****************************************************************************/ 2638 ****************************************************************************/
2633 2639
2634static void mos7840_shutdown(struct usb_serial *serial) 2640static void mos7840_disconnect(struct usb_serial *serial)
2635{ 2641{
2636 int i; 2642 int i;
2637 unsigned long flags; 2643 unsigned long flags;
2638 struct moschip_port *mos7840_port; 2644 struct moschip_port *mos7840_port;
2639 dbg("%s \n", " shutdown :entering.........."); 2645 dbg("%s", " disconnect :entering..........");
2640 2646
2641 if (!serial) { 2647 if (!serial) {
2642 dbg("%s", "Invalid Handler \n"); 2648 dbg("%s", "Invalid Handler");
2643 return; 2649 return;
2644 } 2650 }
2645 2651
@@ -2656,14 +2662,45 @@ static void mos7840_shutdown(struct usb_serial *serial)
2656 mos7840_port->zombie = 1; 2662 mos7840_port->zombie = 1;
2657 spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); 2663 spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
2658 usb_kill_urb(mos7840_port->control_urb); 2664 usb_kill_urb(mos7840_port->control_urb);
2665 }
2666 }
2667
2668 dbg("%s", "Thank u :: ");
2669
2670}
2671
2672/****************************************************************************
2673 * mos7840_release
2674 * This function is called when the usb_serial structure is freed.
2675 ****************************************************************************/
2676
2677static void mos7840_release(struct usb_serial *serial)
2678{
2679 int i;
2680 struct moschip_port *mos7840_port;
2681 dbg("%s", " release :entering..........");
2682
2683 if (!serial) {
2684 dbg("%s", "Invalid Handler");
2685 return;
2686 }
2687
2688 /* check for the ports to be closed,close the ports and disconnect */
2689
2690 /* free private structure allocated for serial port *
2691 * stop reads and writes on all ports */
2692
2693 for (i = 0; i < serial->num_ports; ++i) {
2694 mos7840_port = mos7840_get_port_private(serial->port[i]);
2695 dbg("mos7840_port %d = %p", i, mos7840_port);
2696 if (mos7840_port) {
2659 kfree(mos7840_port->ctrl_buf); 2697 kfree(mos7840_port->ctrl_buf);
2660 kfree(mos7840_port->dr); 2698 kfree(mos7840_port->dr);
2661 kfree(mos7840_port); 2699 kfree(mos7840_port);
2662 } 2700 }
2663 mos7840_set_port_private(serial->port[i], NULL);
2664 } 2701 }
2665 2702
2666 dbg("%s\n", "Thank u :: "); 2703 dbg("%s", "Thank u :: ");
2667 2704
2668} 2705}
2669 2706
@@ -2701,7 +2738,8 @@ static struct usb_serial_driver moschip7840_4port_device = {
2701 .tiocmget = mos7840_tiocmget, 2738 .tiocmget = mos7840_tiocmget,
2702 .tiocmset = mos7840_tiocmset, 2739 .tiocmset = mos7840_tiocmset,
2703 .attach = mos7840_startup, 2740 .attach = mos7840_startup,
2704 .shutdown = mos7840_shutdown, 2741 .disconnect = mos7840_disconnect,
2742 .release = mos7840_release,
2705 .read_bulk_callback = mos7840_bulk_in_callback, 2743 .read_bulk_callback = mos7840_bulk_in_callback,
2706 .read_int_callback = mos7840_interrupt_callback, 2744 .read_int_callback = mos7840_interrupt_callback,
2707}; 2745};
@@ -2714,7 +2752,7 @@ static int __init moschip7840_init(void)
2714{ 2752{
2715 int retval; 2753 int retval;
2716 2754
2717 dbg("%s \n", " mos7840_init :entering.........."); 2755 dbg("%s", " mos7840_init :entering..........");
2718 2756
2719 /* Register with the usb serial */ 2757 /* Register with the usb serial */
2720 retval = usb_serial_register(&moschip7840_4port_device); 2758 retval = usb_serial_register(&moschip7840_4port_device);
@@ -2722,14 +2760,14 @@ static int __init moschip7840_init(void)
2722 if (retval) 2760 if (retval)
2723 goto failed_port_device_register; 2761 goto failed_port_device_register;
2724 2762
2725 dbg("%s\n", "Entring..."); 2763 dbg("%s", "Entering...");
2726 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" 2764 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
2727 DRIVER_DESC "\n"); 2765 DRIVER_DESC "\n");
2728 2766
2729 /* Register with the usb */ 2767 /* Register with the usb */
2730 retval = usb_register(&io_driver); 2768 retval = usb_register(&io_driver);
2731 if (retval == 0) { 2769 if (retval == 0) {
2732 dbg("%s\n", "Leaving..."); 2770 dbg("%s", "Leaving...");
2733 return 0; 2771 return 0;
2734 } 2772 }
2735 usb_serial_deregister(&moschip7840_4port_device); 2773 usb_serial_deregister(&moschip7840_4port_device);
@@ -2744,13 +2782,13 @@ failed_port_device_register:
2744static void __exit moschip7840_exit(void) 2782static void __exit moschip7840_exit(void)
2745{ 2783{
2746 2784
2747 dbg("%s \n", " mos7840_exit :entering.........."); 2785 dbg("%s", " mos7840_exit :entering..........");
2748 2786
2749 usb_deregister(&io_driver); 2787 usb_deregister(&io_driver);
2750 2788
2751 usb_serial_deregister(&moschip7840_4port_device); 2789 usb_serial_deregister(&moschip7840_4port_device);
2752 2790
2753 dbg("%s\n", "Entring..."); 2791 dbg("%s", "Entering...");
2754} 2792}
2755 2793
2756module_init(moschip7840_init); 2794module_init(moschip7840_init);
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 1104617334f5..56857ddbd70b 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -72,7 +72,8 @@ static void omninet_write_bulk_callback(struct urb *urb);
72static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port, 72static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
73 const unsigned char *buf, int count); 73 const unsigned char *buf, int count);
74static int omninet_write_room(struct tty_struct *tty); 74static int omninet_write_room(struct tty_struct *tty);
75static void omninet_shutdown(struct usb_serial *serial); 75static void omninet_disconnect(struct usb_serial *serial);
76static void omninet_release(struct usb_serial *serial);
76static int omninet_attach(struct usb_serial *serial); 77static int omninet_attach(struct usb_serial *serial);
77 78
78static struct usb_device_id id_table[] = { 79static struct usb_device_id id_table[] = {
@@ -108,7 +109,8 @@ static struct usb_serial_driver zyxel_omninet_device = {
108 .write_room = omninet_write_room, 109 .write_room = omninet_write_room,
109 .read_bulk_callback = omninet_read_bulk_callback, 110 .read_bulk_callback = omninet_read_bulk_callback,
110 .write_bulk_callback = omninet_write_bulk_callback, 111 .write_bulk_callback = omninet_write_bulk_callback,
111 .shutdown = omninet_shutdown, 112 .disconnect = omninet_disconnect,
113 .release = omninet_release,
112}; 114};
113 115
114 116
@@ -345,13 +347,22 @@ static void omninet_write_bulk_callback(struct urb *urb)
345} 347}
346 348
347 349
348static void omninet_shutdown(struct usb_serial *serial) 350static void omninet_disconnect(struct usb_serial *serial)
349{ 351{
350 struct usb_serial_port *wport = serial->port[1]; 352 struct usb_serial_port *wport = serial->port[1];
351 struct usb_serial_port *port = serial->port[0]; 353
352 dbg("%s", __func__); 354 dbg("%s", __func__);
353 355
354 usb_kill_urb(wport->write_urb); 356 usb_kill_urb(wport->write_urb);
357}
358
359
360static void omninet_release(struct usb_serial *serial)
361{
362 struct usb_serial_port *port = serial->port[0];
363
364 dbg("%s", __func__);
365
355 kfree(usb_get_serial_port_data(port)); 366 kfree(usb_get_serial_port_data(port));
356} 367}
357 368
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index c20480aa9755..336bba79ad32 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -463,7 +463,7 @@ error:
463 return retval; 463 return retval;
464} 464}
465 465
466static void opticon_shutdown(struct usb_serial *serial) 466static void opticon_disconnect(struct usb_serial *serial)
467{ 467{
468 struct opticon_private *priv = usb_get_serial_data(serial); 468 struct opticon_private *priv = usb_get_serial_data(serial);
469 469
@@ -471,9 +471,16 @@ static void opticon_shutdown(struct usb_serial *serial)
471 471
472 usb_kill_urb(priv->bulk_read_urb); 472 usb_kill_urb(priv->bulk_read_urb);
473 usb_free_urb(priv->bulk_read_urb); 473 usb_free_urb(priv->bulk_read_urb);
474}
475
476static void opticon_release(struct usb_serial *serial)
477{
478 struct opticon_private *priv = usb_get_serial_data(serial);
479
480 dbg("%s", __func__);
481
474 kfree(priv->bulk_in_buffer); 482 kfree(priv->bulk_in_buffer);
475 kfree(priv); 483 kfree(priv);
476 usb_set_serial_data(serial, NULL);
477} 484}
478 485
479static int opticon_suspend(struct usb_interface *intf, pm_message_t message) 486static int opticon_suspend(struct usb_interface *intf, pm_message_t message)
@@ -524,7 +531,8 @@ static struct usb_serial_driver opticon_device = {
524 .close = opticon_close, 531 .close = opticon_close,
525 .write = opticon_write, 532 .write = opticon_write,
526 .write_room = opticon_write_room, 533 .write_room = opticon_write_room,
527 .shutdown = opticon_shutdown, 534 .disconnect = opticon_disconnect,
535 .release = opticon_release,
528 .throttle = opticon_throttle, 536 .throttle = opticon_throttle,
529 .unthrottle = opticon_unthrottle, 537 .unthrottle = opticon_unthrottle,
530 .ioctl = opticon_ioctl, 538 .ioctl = opticon_ioctl,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index a16d69fadba1..575816e6ba37 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -43,13 +43,16 @@
43#include <linux/usb/serial.h> 43#include <linux/usb/serial.h>
44 44
45/* Function prototypes */ 45/* Function prototypes */
46static int option_probe(struct usb_serial *serial,
47 const struct usb_device_id *id);
46static int option_open(struct tty_struct *tty, struct usb_serial_port *port, 48static int option_open(struct tty_struct *tty, struct usb_serial_port *port,
47 struct file *filp); 49 struct file *filp);
48static void option_close(struct usb_serial_port *port); 50static void option_close(struct usb_serial_port *port);
49static void option_dtr_rts(struct usb_serial_port *port, int on); 51static void option_dtr_rts(struct usb_serial_port *port, int on);
50 52
51static int option_startup(struct usb_serial *serial); 53static int option_startup(struct usb_serial *serial);
52static void option_shutdown(struct usb_serial *serial); 54static void option_disconnect(struct usb_serial *serial);
55static void option_release(struct usb_serial *serial);
53static int option_write_room(struct tty_struct *tty); 56static int option_write_room(struct tty_struct *tty);
54 57
55static void option_instat_callback(struct urb *urb); 58static void option_instat_callback(struct urb *urb);
@@ -202,9 +205,9 @@ static int option_resume(struct usb_serial *serial);
202#define NOVATELWIRELESS_PRODUCT_MC727 0x4100 205#define NOVATELWIRELESS_PRODUCT_MC727 0x4100
203#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 206#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
204#define NOVATELWIRELESS_PRODUCT_U727 0x5010 207#define NOVATELWIRELESS_PRODUCT_U727 0x5010
208#define NOVATELWIRELESS_PRODUCT_MC760 0x6000
205 209
206/* FUTURE NOVATEL PRODUCTS */ 210/* FUTURE NOVATEL PRODUCTS */
207#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0X6000
208#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001 211#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001
209#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000 212#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000
210#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001 213#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001
@@ -305,6 +308,10 @@ static int option_resume(struct usb_serial *serial);
305#define DLINK_PRODUCT_DWM_652 0x3e04 308#define DLINK_PRODUCT_DWM_652 0x3e04
306 309
307 310
311/* TOSHIBA PRODUCTS */
312#define TOSHIBA_VENDOR_ID 0x0930
313#define TOSHIBA_PRODUCT_HSDPA_MINICARD 0x1302
314
308static struct usb_device_id option_ids[] = { 315static struct usb_device_id option_ids[] = {
309 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 316 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
310 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 317 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -422,7 +429,7 @@ static struct usb_device_id option_ids[] = {
422 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ 429 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
423 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ 430 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
424 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ 431 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
425 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) }, /* Novatel EVDO product */ 432 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */
426 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */ 433 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */
427 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */ 434 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */
428 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */ 435 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */
@@ -523,6 +530,7 @@ static struct usb_device_id option_ids[] = {
523 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 530 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
524 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 531 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
525 { USB_DEVICE(0x1da5, 0x4515) }, /* BenQ H20 */ 532 { USB_DEVICE(0x1da5, 0x4515) }, /* BenQ H20 */
533 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
526 { } /* Terminating entry */ 534 { } /* Terminating entry */
527}; 535};
528MODULE_DEVICE_TABLE(usb, option_ids); 536MODULE_DEVICE_TABLE(usb, option_ids);
@@ -550,6 +558,7 @@ static struct usb_serial_driver option_1port_device = {
550 .usb_driver = &option_driver, 558 .usb_driver = &option_driver,
551 .id_table = option_ids, 559 .id_table = option_ids,
552 .num_ports = 1, 560 .num_ports = 1,
561 .probe = option_probe,
553 .open = option_open, 562 .open = option_open,
554 .close = option_close, 563 .close = option_close,
555 .dtr_rts = option_dtr_rts, 564 .dtr_rts = option_dtr_rts,
@@ -560,7 +569,8 @@ static struct usb_serial_driver option_1port_device = {
560 .tiocmget = option_tiocmget, 569 .tiocmget = option_tiocmget,
561 .tiocmset = option_tiocmset, 570 .tiocmset = option_tiocmset,
562 .attach = option_startup, 571 .attach = option_startup,
563 .shutdown = option_shutdown, 572 .disconnect = option_disconnect,
573 .release = option_release,
564 .read_int_callback = option_instat_callback, 574 .read_int_callback = option_instat_callback,
565 .suspend = option_suspend, 575 .suspend = option_suspend,
566 .resume = option_resume, 576 .resume = option_resume,
@@ -626,6 +636,18 @@ static void __exit option_exit(void)
626module_init(option_init); 636module_init(option_init);
627module_exit(option_exit); 637module_exit(option_exit);
628 638
639static int option_probe(struct usb_serial *serial,
640 const struct usb_device_id *id)
641{
642 /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
643 if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
644 serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
645 serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8)
646 return -ENODEV;
647
648 return 0;
649}
650
629static void option_set_termios(struct tty_struct *tty, 651static void option_set_termios(struct tty_struct *tty,
630 struct usb_serial_port *port, struct ktermios *old_termios) 652 struct usb_serial_port *port, struct ktermios *old_termios)
631{ 653{
@@ -1129,7 +1151,14 @@ static void stop_read_write_urbs(struct usb_serial *serial)
1129 } 1151 }
1130} 1152}
1131 1153
1132static void option_shutdown(struct usb_serial *serial) 1154static void option_disconnect(struct usb_serial *serial)
1155{
1156 dbg("%s", __func__);
1157
1158 stop_read_write_urbs(serial);
1159}
1160
1161static void option_release(struct usb_serial *serial)
1133{ 1162{
1134 int i, j; 1163 int i, j;
1135 struct usb_serial_port *port; 1164 struct usb_serial_port *port;
@@ -1137,8 +1166,6 @@ static void option_shutdown(struct usb_serial *serial)
1137 1166
1138 dbg("%s", __func__); 1167 dbg("%s", __func__);
1139 1168
1140 stop_read_write_urbs(serial);
1141
1142 /* Now free them */ 1169 /* Now free them */
1143 for (i = 0; i < serial->num_ports; ++i) { 1170 for (i = 0; i < serial->num_ports; ++i) {
1144 port = serial->port[i]; 1171 port = serial->port[i];
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 7de54781fe61..3cece27325e7 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -159,7 +159,7 @@ static int oti6858_tiocmget(struct tty_struct *tty, struct file *file);
159static int oti6858_tiocmset(struct tty_struct *tty, struct file *file, 159static int oti6858_tiocmset(struct tty_struct *tty, struct file *file,
160 unsigned int set, unsigned int clear); 160 unsigned int set, unsigned int clear);
161static int oti6858_startup(struct usb_serial *serial); 161static int oti6858_startup(struct usb_serial *serial);
162static void oti6858_shutdown(struct usb_serial *serial); 162static void oti6858_release(struct usb_serial *serial);
163 163
164/* functions operating on buffers */ 164/* functions operating on buffers */
165static struct oti6858_buf *oti6858_buf_alloc(unsigned int size); 165static struct oti6858_buf *oti6858_buf_alloc(unsigned int size);
@@ -194,7 +194,7 @@ static struct usb_serial_driver oti6858_device = {
194 .write_room = oti6858_write_room, 194 .write_room = oti6858_write_room,
195 .chars_in_buffer = oti6858_chars_in_buffer, 195 .chars_in_buffer = oti6858_chars_in_buffer,
196 .attach = oti6858_startup, 196 .attach = oti6858_startup,
197 .shutdown = oti6858_shutdown, 197 .release = oti6858_release,
198}; 198};
199 199
200struct oti6858_private { 200struct oti6858_private {
@@ -782,7 +782,7 @@ static int oti6858_ioctl(struct tty_struct *tty, struct file *file,
782} 782}
783 783
784 784
785static void oti6858_shutdown(struct usb_serial *serial) 785static void oti6858_release(struct usb_serial *serial)
786{ 786{
787 struct oti6858_private *priv; 787 struct oti6858_private *priv;
788 int i; 788 int i;
@@ -794,7 +794,6 @@ static void oti6858_shutdown(struct usb_serial *serial)
794 if (priv) { 794 if (priv) {
795 oti6858_buf_free(priv->buf); 795 oti6858_buf_free(priv->buf);
796 kfree(priv); 796 kfree(priv);
797 usb_set_serial_port_data(serial->port[i], NULL);
798 } 797 }
799 } 798 }
800} 799}
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index e02dc3d643c7..ec6c132a25b5 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -878,7 +878,7 @@ static void pl2303_break_ctl(struct tty_struct *tty, int break_state)
878 dbg("%s - error sending break = %d", __func__, result); 878 dbg("%s - error sending break = %d", __func__, result);
879} 879}
880 880
881static void pl2303_shutdown(struct usb_serial *serial) 881static void pl2303_release(struct usb_serial *serial)
882{ 882{
883 int i; 883 int i;
884 struct pl2303_private *priv; 884 struct pl2303_private *priv;
@@ -890,7 +890,6 @@ static void pl2303_shutdown(struct usb_serial *serial)
890 if (priv) { 890 if (priv) {
891 pl2303_buf_free(priv->buf); 891 pl2303_buf_free(priv->buf);
892 kfree(priv); 892 kfree(priv);
893 usb_set_serial_port_data(serial->port[i], NULL);
894 } 893 }
895 } 894 }
896} 895}
@@ -927,6 +926,8 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
927 spin_lock_irqsave(&priv->lock, flags); 926 spin_lock_irqsave(&priv->lock, flags);
928 priv->line_status = data[status_idx]; 927 priv->line_status = data[status_idx];
929 spin_unlock_irqrestore(&priv->lock, flags); 928 spin_unlock_irqrestore(&priv->lock, flags);
929 if (priv->line_status & UART_BREAK_ERROR)
930 usb_serial_handle_break(port);
930 wake_up_interruptible(&priv->delta_msr_wait); 931 wake_up_interruptible(&priv->delta_msr_wait);
931} 932}
932 933
@@ -1037,7 +1038,8 @@ static void pl2303_read_bulk_callback(struct urb *urb)
1037 if (line_status & UART_OVERRUN_ERROR) 1038 if (line_status & UART_OVERRUN_ERROR)
1038 tty_insert_flip_char(tty, 0, TTY_OVERRUN); 1039 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1039 for (i = 0; i < urb->actual_length; ++i) 1040 for (i = 0; i < urb->actual_length; ++i)
1040 tty_insert_flip_char(tty, data[i], tty_flag); 1041 if (!usb_serial_handle_sysrq_char(port, data[i]))
1042 tty_insert_flip_char(tty, data[i], tty_flag);
1041 tty_flip_buffer_push(tty); 1043 tty_flip_buffer_push(tty);
1042 } 1044 }
1043 tty_kref_put(tty); 1045 tty_kref_put(tty);
@@ -1120,7 +1122,7 @@ static struct usb_serial_driver pl2303_device = {
1120 .write_room = pl2303_write_room, 1122 .write_room = pl2303_write_room,
1121 .chars_in_buffer = pl2303_chars_in_buffer, 1123 .chars_in_buffer = pl2303_chars_in_buffer,
1122 .attach = pl2303_startup, 1124 .attach = pl2303_startup,
1123 .shutdown = pl2303_shutdown, 1125 .release = pl2303_release,
1124}; 1126};
1125 1127
1126static int __init pl2303_init(void) 1128static int __init pl2303_init(void)
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 17ac34f4d668..032f7aeb40a4 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -1,7 +1,10 @@
1/* 1/*
2 USB Driver for Sierra Wireless 2 USB Driver for Sierra Wireless
3 3
4 Copyright (C) 2006, 2007, 2008 Kevin Lloyd <klloyd@sierrawireless.com> 4 Copyright (C) 2006, 2007, 2008 Kevin Lloyd <klloyd@sierrawireless.com>,
5
6 Copyright (C) 2008, 2009 Elina Pasheva, Matthew Safar, Rory Filer
7 <linux@sierrawireless.com>
5 8
6 IMPORTANT DISCLAIMER: This driver is not commercially supported by 9 IMPORTANT DISCLAIMER: This driver is not commercially supported by
7 Sierra Wireless. Use at your own risk. 10 Sierra Wireless. Use at your own risk.
@@ -14,8 +17,8 @@
14 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> 17 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
15*/ 18*/
16 19
17#define DRIVER_VERSION "v.1.3.3" 20#define DRIVER_VERSION "v.1.3.7"
18#define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>" 21#define DRIVER_AUTHOR "Kevin Lloyd, Elina Pasheva, Matthew Safar, Rory Filer"
19#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" 22#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
20 23
21#include <linux/kernel.h> 24#include <linux/kernel.h>
@@ -30,10 +33,15 @@
30#define SWIMS_USB_REQUEST_SetPower 0x00 33#define SWIMS_USB_REQUEST_SetPower 0x00
31#define SWIMS_USB_REQUEST_SetNmea 0x07 34#define SWIMS_USB_REQUEST_SetNmea 0x07
32 35
33#define N_IN_URB 4 36#define N_IN_URB 8
34#define N_OUT_URB 4 37#define N_OUT_URB 64
35#define IN_BUFLEN 4096 38#define IN_BUFLEN 4096
36 39
40#define MAX_TRANSFER (PAGE_SIZE - 512)
41/* MAX_TRANSFER is chosen so that the VM is not stressed by
42 allocations > PAGE_SIZE and the number of packets in a page
43 is an integer 512 is the largest possible packet on EHCI */
44
37static int debug; 45static int debug;
38static int nmea; 46static int nmea;
39 47
@@ -46,7 +54,7 @@ struct sierra_iface_info {
46static int sierra_set_power_state(struct usb_device *udev, __u16 swiState) 54static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
47{ 55{
48 int result; 56 int result;
49 dev_dbg(&udev->dev, "%s", __func__); 57 dev_dbg(&udev->dev, "%s\n", __func__);
50 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 58 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
51 SWIMS_USB_REQUEST_SetPower, /* __u8 request */ 59 SWIMS_USB_REQUEST_SetPower, /* __u8 request */
52 USB_TYPE_VENDOR, /* __u8 request type */ 60 USB_TYPE_VENDOR, /* __u8 request type */
@@ -61,7 +69,7 @@ static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
61static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable) 69static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable)
62{ 70{
63 int result; 71 int result;
64 dev_dbg(&udev->dev, "%s", __func__); 72 dev_dbg(&udev->dev, "%s\n", __func__);
65 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 73 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
66 SWIMS_USB_REQUEST_SetNmea, /* __u8 request */ 74 SWIMS_USB_REQUEST_SetNmea, /* __u8 request */
67 USB_TYPE_VENDOR, /* __u8 request type */ 75 USB_TYPE_VENDOR, /* __u8 request type */
@@ -75,18 +83,22 @@ static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable)
75 83
76static int sierra_calc_num_ports(struct usb_serial *serial) 84static int sierra_calc_num_ports(struct usb_serial *serial)
77{ 85{
78 int result; 86 int num_ports = 0;
79 int *num_ports = usb_get_serial_data(serial); 87 u8 ifnum, numendpoints;
80 dev_dbg(&serial->dev->dev, "%s", __func__);
81 88
82 result = *num_ports; 89 dev_dbg(&serial->dev->dev, "%s\n", __func__);
83 90
84 if (result) { 91 ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
85 kfree(num_ports); 92 numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints;
86 usb_set_serial_data(serial, NULL);
87 }
88 93
89 return result; 94 /* Dummy interface present on some SKUs should be ignored */
95 if (ifnum == 0x99)
96 num_ports = 0;
97 else if (numendpoints <= 3)
98 num_ports = 1;
99 else
100 num_ports = (numendpoints-1)/2;
101 return num_ports;
90} 102}
91 103
92static int is_blacklisted(const u8 ifnum, 104static int is_blacklisted(const u8 ifnum,
@@ -111,7 +123,7 @@ static int sierra_calc_interface(struct usb_serial *serial)
111 int interface; 123 int interface;
112 struct usb_interface *p_interface; 124 struct usb_interface *p_interface;
113 struct usb_host_interface *p_host_interface; 125 struct usb_host_interface *p_host_interface;
114 dev_dbg(&serial->dev->dev, "%s", __func__); 126 dev_dbg(&serial->dev->dev, "%s\n", __func__);
115 127
116 /* Get the interface structure pointer from the serial struct */ 128 /* Get the interface structure pointer from the serial struct */
117 p_interface = serial->interface; 129 p_interface = serial->interface;
@@ -132,23 +144,12 @@ static int sierra_probe(struct usb_serial *serial,
132{ 144{
133 int result = 0; 145 int result = 0;
134 struct usb_device *udev; 146 struct usb_device *udev;
135 int *num_ports;
136 u8 ifnum; 147 u8 ifnum;
137 u8 numendpoints;
138 148
139 dev_dbg(&serial->dev->dev, "%s", __func__);
140
141 num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL);
142 if (!num_ports)
143 return -ENOMEM;
144
145 ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
146 numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints;
147 udev = serial->dev; 149 udev = serial->dev;
150 dev_dbg(&udev->dev, "%s\n", __func__);
148 151
149 /* Figure out the interface number from the serial structure */
150 ifnum = sierra_calc_interface(serial); 152 ifnum = sierra_calc_interface(serial);
151
152 /* 153 /*
153 * If this interface supports more than 1 alternate 154 * If this interface supports more than 1 alternate
154 * select the 2nd one 155 * select the 2nd one
@@ -160,20 +161,6 @@ static int sierra_probe(struct usb_serial *serial,
160 usb_set_interface(udev, ifnum, 1); 161 usb_set_interface(udev, ifnum, 1);
161 } 162 }
162 163
163 /* Dummy interface present on some SKUs should be ignored */
164 if (ifnum == 0x99)
165 *num_ports = 0;
166 else if (numendpoints <= 3)
167 *num_ports = 1;
168 else
169 *num_ports = (numendpoints-1)/2;
170
171 /*
172 * save off our num_ports info so that we can use it in the
173 * calc_num_ports callback
174 */
175 usb_set_serial_data(serial, (void *)num_ports);
176
177 /* ifnum could have changed - by calling usb_set_interface */ 164 /* ifnum could have changed - by calling usb_set_interface */
178 ifnum = sierra_calc_interface(serial); 165 ifnum = sierra_calc_interface(serial);
179 166
@@ -289,7 +276,7 @@ static int sierra_send_setup(struct usb_serial_port *port)
289 __u16 interface = 0; 276 __u16 interface = 0;
290 int val = 0; 277 int val = 0;
291 278
292 dev_dbg(&port->dev, "%s", __func__); 279 dev_dbg(&port->dev, "%s\n", __func__);
293 280
294 portdata = usb_get_serial_port_data(port); 281 portdata = usb_get_serial_port_data(port);
295 282
@@ -332,7 +319,7 @@ static int sierra_send_setup(struct usb_serial_port *port)
332static void sierra_set_termios(struct tty_struct *tty, 319static void sierra_set_termios(struct tty_struct *tty,
333 struct usb_serial_port *port, struct ktermios *old_termios) 320 struct usb_serial_port *port, struct ktermios *old_termios)
334{ 321{
335 dev_dbg(&port->dev, "%s", __func__); 322 dev_dbg(&port->dev, "%s\n", __func__);
336 tty_termios_copy_hw(tty->termios, old_termios); 323 tty_termios_copy_hw(tty->termios, old_termios);
337 sierra_send_setup(port); 324 sierra_send_setup(port);
338} 325}
@@ -343,7 +330,7 @@ static int sierra_tiocmget(struct tty_struct *tty, struct file *file)
343 unsigned int value; 330 unsigned int value;
344 struct sierra_port_private *portdata; 331 struct sierra_port_private *portdata;
345 332
346 dev_dbg(&port->dev, "%s", __func__); 333 dev_dbg(&port->dev, "%s\n", __func__);
347 portdata = usb_get_serial_port_data(port); 334 portdata = usb_get_serial_port_data(port);
348 335
349 value = ((portdata->rts_state) ? TIOCM_RTS : 0) | 336 value = ((portdata->rts_state) ? TIOCM_RTS : 0) |
@@ -394,14 +381,14 @@ static void sierra_outdat_callback(struct urb *urb)
394 int status = urb->status; 381 int status = urb->status;
395 unsigned long flags; 382 unsigned long flags;
396 383
397 dev_dbg(&port->dev, "%s - port %d", __func__, port->number); 384 dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number);
398 385
399 /* free up the transfer buffer, as usb_free_urb() does not do this */ 386 /* free up the transfer buffer, as usb_free_urb() does not do this */
400 kfree(urb->transfer_buffer); 387 kfree(urb->transfer_buffer);
401 388
402 if (status) 389 if (status)
403 dev_dbg(&port->dev, "%s - nonzero write bulk status " 390 dev_dbg(&port->dev, "%s - nonzero write bulk status "
404 "received: %d", __func__, status); 391 "received: %d\n", __func__, status);
405 392
406 spin_lock_irqsave(&portdata->lock, flags); 393 spin_lock_irqsave(&portdata->lock, flags);
407 --portdata->outstanding_urbs; 394 --portdata->outstanding_urbs;
@@ -419,50 +406,61 @@ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
419 unsigned long flags; 406 unsigned long flags;
420 unsigned char *buffer; 407 unsigned char *buffer;
421 struct urb *urb; 408 struct urb *urb;
422 int status; 409 size_t writesize = min((size_t)count, (size_t)MAX_TRANSFER);
410 int retval = 0;
411
412 /* verify that we actually have some data to write */
413 if (count == 0)
414 return 0;
423 415
424 portdata = usb_get_serial_port_data(port); 416 portdata = usb_get_serial_port_data(port);
425 417
426 dev_dbg(&port->dev, "%s: write (%d chars)", __func__, count); 418 dev_dbg(&port->dev, "%s: write (%zd bytes)\n", __func__, writesize);
427 419
428 spin_lock_irqsave(&portdata->lock, flags); 420 spin_lock_irqsave(&portdata->lock, flags);
421 dev_dbg(&port->dev, "%s - outstanding_urbs: %d\n", __func__,
422 portdata->outstanding_urbs);
429 if (portdata->outstanding_urbs > N_OUT_URB) { 423 if (portdata->outstanding_urbs > N_OUT_URB) {
430 spin_unlock_irqrestore(&portdata->lock, flags); 424 spin_unlock_irqrestore(&portdata->lock, flags);
431 dev_dbg(&port->dev, "%s - write limit hit\n", __func__); 425 dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
432 return 0; 426 return 0;
433 } 427 }
434 portdata->outstanding_urbs++; 428 portdata->outstanding_urbs++;
429 dev_dbg(&port->dev, "%s - 1, outstanding_urbs: %d\n", __func__,
430 portdata->outstanding_urbs);
435 spin_unlock_irqrestore(&portdata->lock, flags); 431 spin_unlock_irqrestore(&portdata->lock, flags);
436 432
437 buffer = kmalloc(count, GFP_ATOMIC); 433 buffer = kmalloc(writesize, GFP_ATOMIC);
438 if (!buffer) { 434 if (!buffer) {
439 dev_err(&port->dev, "out of memory\n"); 435 dev_err(&port->dev, "out of memory\n");
440 count = -ENOMEM; 436 retval = -ENOMEM;
441 goto error_no_buffer; 437 goto error_no_buffer;
442 } 438 }
443 439
444 urb = usb_alloc_urb(0, GFP_ATOMIC); 440 urb = usb_alloc_urb(0, GFP_ATOMIC);
445 if (!urb) { 441 if (!urb) {
446 dev_err(&port->dev, "no more free urbs\n"); 442 dev_err(&port->dev, "no more free urbs\n");
447 count = -ENOMEM; 443 retval = -ENOMEM;
448 goto error_no_urb; 444 goto error_no_urb;
449 } 445 }
450 446
451 memcpy(buffer, buf, count); 447 memcpy(buffer, buf, writesize);
452 448
453 usb_serial_debug_data(debug, &port->dev, __func__, count, buffer); 449 usb_serial_debug_data(debug, &port->dev, __func__, writesize, buffer);
454 450
455 usb_fill_bulk_urb(urb, serial->dev, 451 usb_fill_bulk_urb(urb, serial->dev,
456 usb_sndbulkpipe(serial->dev, 452 usb_sndbulkpipe(serial->dev,
457 port->bulk_out_endpointAddress), 453 port->bulk_out_endpointAddress),
458 buffer, count, sierra_outdat_callback, port); 454 buffer, writesize, sierra_outdat_callback, port);
455
456 /* Handle the need to send a zero length packet */
457 urb->transfer_flags |= URB_ZERO_PACKET;
459 458
460 /* send it down the pipe */ 459 /* send it down the pipe */
461 status = usb_submit_urb(urb, GFP_ATOMIC); 460 retval = usb_submit_urb(urb, GFP_ATOMIC);
462 if (status) { 461 if (retval) {
463 dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed " 462 dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed "
464 "with status = %d\n", __func__, status); 463 "with status = %d\n", __func__, retval);
465 count = status;
466 goto error; 464 goto error;
467 } 465 }
468 466
@@ -470,7 +468,7 @@ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
470 * really free it when it is finished with it */ 468 * really free it when it is finished with it */
471 usb_free_urb(urb); 469 usb_free_urb(urb);
472 470
473 return count; 471 return writesize;
474error: 472error:
475 usb_free_urb(urb); 473 usb_free_urb(urb);
476error_no_urb: 474error_no_urb:
@@ -478,8 +476,10 @@ error_no_urb:
478error_no_buffer: 476error_no_buffer:
479 spin_lock_irqsave(&portdata->lock, flags); 477 spin_lock_irqsave(&portdata->lock, flags);
480 --portdata->outstanding_urbs; 478 --portdata->outstanding_urbs;
479 dev_dbg(&port->dev, "%s - 2. outstanding_urbs: %d\n", __func__,
480 portdata->outstanding_urbs);
481 spin_unlock_irqrestore(&portdata->lock, flags); 481 spin_unlock_irqrestore(&portdata->lock, flags);
482 return count; 482 return retval;
483} 483}
484 484
485static void sierra_indat_callback(struct urb *urb) 485static void sierra_indat_callback(struct urb *urb)
@@ -491,33 +491,39 @@ static void sierra_indat_callback(struct urb *urb)
491 unsigned char *data = urb->transfer_buffer; 491 unsigned char *data = urb->transfer_buffer;
492 int status = urb->status; 492 int status = urb->status;
493 493
494 dbg("%s: %p", __func__, urb);
495
496 endpoint = usb_pipeendpoint(urb->pipe); 494 endpoint = usb_pipeendpoint(urb->pipe);
497 port = urb->context; 495 port = urb->context;
496
497 dev_dbg(&port->dev, "%s: %p\n", __func__, urb);
498 498
499 if (status) { 499 if (status) {
500 dev_dbg(&port->dev, "%s: nonzero status: %d on" 500 dev_dbg(&port->dev, "%s: nonzero status: %d on"
501 " endpoint %02x.", __func__, status, endpoint); 501 " endpoint %02x\n", __func__, status, endpoint);
502 } else { 502 } else {
503 if (urb->actual_length) { 503 if (urb->actual_length) {
504 tty = tty_port_tty_get(&port->port); 504 tty = tty_port_tty_get(&port->port);
505
505 tty_buffer_request_room(tty, urb->actual_length); 506 tty_buffer_request_room(tty, urb->actual_length);
506 tty_insert_flip_string(tty, data, urb->actual_length); 507 tty_insert_flip_string(tty, data, urb->actual_length);
507 tty_flip_buffer_push(tty); 508 tty_flip_buffer_push(tty);
509
508 tty_kref_put(tty); 510 tty_kref_put(tty);
509 } else 511 usb_serial_debug_data(debug, &port->dev, __func__,
512 urb->actual_length, data);
513 } else {
510 dev_dbg(&port->dev, "%s: empty read urb" 514 dev_dbg(&port->dev, "%s: empty read urb"
511 " received", __func__); 515 " received\n", __func__);
512
513 /* Resubmit urb so we continue receiving */
514 if (port->port.count && status != -ESHUTDOWN && status != -EPERM) {
515 err = usb_submit_urb(urb, GFP_ATOMIC);
516 if (err)
517 dev_err(&port->dev, "resubmit read urb failed."
518 "(%d)\n", err);
519 } 516 }
520 } 517 }
518
519 /* Resubmit urb so we continue receiving */
520 if (port->port.count && status != -ESHUTDOWN && status != -EPERM) {
521 err = usb_submit_urb(urb, GFP_ATOMIC);
522 if (err)
523 dev_err(&port->dev, "resubmit read urb failed."
524 "(%d)\n", err);
525 }
526
521 return; 527 return;
522} 528}
523 529
@@ -529,8 +535,7 @@ static void sierra_instat_callback(struct urb *urb)
529 struct sierra_port_private *portdata = usb_get_serial_port_data(port); 535 struct sierra_port_private *portdata = usb_get_serial_port_data(port);
530 struct usb_serial *serial = port->serial; 536 struct usb_serial *serial = port->serial;
531 537
532 dev_dbg(&port->dev, "%s", __func__); 538 dev_dbg(&port->dev, "%s: urb %p port %p has data %p\n", __func__,
533 dev_dbg(&port->dev, "%s: urb %p port %p has data %p", __func__,
534 urb, port, portdata); 539 urb, port, portdata);
535 540
536 if (status == 0) { 541 if (status == 0) {
@@ -550,7 +555,7 @@ static void sierra_instat_callback(struct urb *urb)
550 sizeof(struct usb_ctrlrequest)); 555 sizeof(struct usb_ctrlrequest));
551 struct tty_struct *tty; 556 struct tty_struct *tty;
552 557
553 dev_dbg(&port->dev, "%s: signal x%x", __func__, 558 dev_dbg(&port->dev, "%s: signal x%x\n", __func__,
554 signals); 559 signals);
555 560
556 old_dcd_state = portdata->dcd_state; 561 old_dcd_state = portdata->dcd_state;
@@ -565,20 +570,20 @@ static void sierra_instat_callback(struct urb *urb)
565 tty_hangup(tty); 570 tty_hangup(tty);
566 tty_kref_put(tty); 571 tty_kref_put(tty);
567 } else { 572 } else {
568 dev_dbg(&port->dev, "%s: type %x req %x", 573 dev_dbg(&port->dev, "%s: type %x req %x\n",
569 __func__, req_pkt->bRequestType, 574 __func__, req_pkt->bRequestType,
570 req_pkt->bRequest); 575 req_pkt->bRequest);
571 } 576 }
572 } else 577 } else
573 dev_dbg(&port->dev, "%s: error %d", __func__, status); 578 dev_dbg(&port->dev, "%s: error %d\n", __func__, status);
574 579
575 /* Resubmit urb so we continue receiving IRQ data */ 580 /* Resubmit urb so we continue receiving IRQ data */
576 if (status != -ESHUTDOWN) { 581 if (port->port.count && status != -ESHUTDOWN && status != -ENOENT) {
577 urb->dev = serial->dev; 582 urb->dev = serial->dev;
578 err = usb_submit_urb(urb, GFP_ATOMIC); 583 err = usb_submit_urb(urb, GFP_ATOMIC);
579 if (err) 584 if (err)
580 dev_dbg(&port->dev, "%s: resubmit intr urb " 585 dev_err(&port->dev, "%s: resubmit intr urb "
581 "failed. (%d)", __func__, err); 586 "failed. (%d)\n", __func__, err);
582 } 587 }
583} 588}
584 589
@@ -588,7 +593,7 @@ static int sierra_write_room(struct tty_struct *tty)
588 struct sierra_port_private *portdata = usb_get_serial_port_data(port); 593 struct sierra_port_private *portdata = usb_get_serial_port_data(port);
589 unsigned long flags; 594 unsigned long flags;
590 595
591 dev_dbg(&port->dev, "%s - port %d", __func__, port->number); 596 dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number);
592 597
593 /* try to give a good number back based on if we have any free urbs at 598 /* try to give a good number back based on if we have any free urbs at
594 * this point in time */ 599 * this point in time */
@@ -729,7 +734,7 @@ static int sierra_open(struct tty_struct *tty,
729 734
730 portdata = usb_get_serial_port_data(port); 735 portdata = usb_get_serial_port_data(port);
731 736
732 dev_dbg(&port->dev, "%s", __func__); 737 dev_dbg(&port->dev, "%s\n", __func__);
733 738
734 /* Set some sane defaults */ 739 /* Set some sane defaults */
735 portdata->rts_state = 1; 740 portdata->rts_state = 1;
@@ -782,7 +787,7 @@ static int sierra_startup(struct usb_serial *serial)
782 struct sierra_port_private *portdata; 787 struct sierra_port_private *portdata;
783 int i; 788 int i;
784 789
785 dev_dbg(&serial->dev->dev, "%s", __func__); 790 dev_dbg(&serial->dev->dev, "%s\n", __func__);
786 791
787 /* Set Device mode to D0 */ 792 /* Set Device mode to D0 */
788 sierra_set_power_state(serial->dev, 0x0000); 793 sierra_set_power_state(serial->dev, 0x0000);
@@ -797,7 +802,7 @@ static int sierra_startup(struct usb_serial *serial)
797 portdata = kzalloc(sizeof(*portdata), GFP_KERNEL); 802 portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
798 if (!portdata) { 803 if (!portdata) {
799 dev_dbg(&port->dev, "%s: kmalloc for " 804 dev_dbg(&port->dev, "%s: kmalloc for "
800 "sierra_port_private (%d) failed!.", 805 "sierra_port_private (%d) failed!.\n",
801 __func__, i); 806 __func__, i);
802 return -ENOMEM; 807 return -ENOMEM;
803 } 808 }
@@ -809,13 +814,13 @@ static int sierra_startup(struct usb_serial *serial)
809 return 0; 814 return 0;
810} 815}
811 816
812static void sierra_shutdown(struct usb_serial *serial) 817static void sierra_disconnect(struct usb_serial *serial)
813{ 818{
814 int i; 819 int i;
815 struct usb_serial_port *port; 820 struct usb_serial_port *port;
816 struct sierra_port_private *portdata; 821 struct sierra_port_private *portdata;
817 822
818 dev_dbg(&serial->dev->dev, "%s", __func__); 823 dev_dbg(&serial->dev->dev, "%s\n", __func__);
819 824
820 for (i = 0; i < serial->num_ports; ++i) { 825 for (i = 0; i < serial->num_ports; ++i) {
821 port = serial->port[i]; 826 port = serial->port[i];
@@ -848,7 +853,7 @@ static struct usb_serial_driver sierra_device = {
848 .tiocmget = sierra_tiocmget, 853 .tiocmget = sierra_tiocmget,
849 .tiocmset = sierra_tiocmset, 854 .tiocmset = sierra_tiocmset,
850 .attach = sierra_startup, 855 .attach = sierra_startup,
851 .shutdown = sierra_shutdown, 856 .disconnect = sierra_disconnect,
852 .read_int_callback = sierra_instat_callback, 857 .read_int_callback = sierra_instat_callback,
853}; 858};
854 859
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 8f7ed8f13996..3c249d8e8b8e 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -356,7 +356,7 @@ cleanup:
356} 356}
357 357
358/* call when the device plug out. free all the memory alloced by probe */ 358/* call when the device plug out. free all the memory alloced by probe */
359static void spcp8x5_shutdown(struct usb_serial *serial) 359static void spcp8x5_release(struct usb_serial *serial)
360{ 360{
361 int i; 361 int i;
362 struct spcp8x5_private *priv; 362 struct spcp8x5_private *priv;
@@ -366,7 +366,6 @@ static void spcp8x5_shutdown(struct usb_serial *serial)
366 if (priv) { 366 if (priv) {
367 free_ringbuf(priv->buf); 367 free_ringbuf(priv->buf);
368 kfree(priv); 368 kfree(priv);
369 usb_set_serial_port_data(serial->port[i] , NULL);
370 } 369 }
371 } 370 }
372} 371}
@@ -1020,7 +1019,7 @@ static struct usb_serial_driver spcp8x5_device = {
1020 .write_bulk_callback = spcp8x5_write_bulk_callback, 1019 .write_bulk_callback = spcp8x5_write_bulk_callback,
1021 .chars_in_buffer = spcp8x5_chars_in_buffer, 1020 .chars_in_buffer = spcp8x5_chars_in_buffer,
1022 .attach = spcp8x5_startup, 1021 .attach = spcp8x5_startup,
1023 .shutdown = spcp8x5_shutdown, 1022 .release = spcp8x5_release,
1024}; 1023};
1025 1024
1026static int __init spcp8x5_init(void) 1025static int __init spcp8x5_init(void)
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 8b07ebc6baeb..6157fac9366b 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -267,7 +267,7 @@ error:
267 return retval; 267 return retval;
268} 268}
269 269
270static void symbol_shutdown(struct usb_serial *serial) 270static void symbol_disconnect(struct usb_serial *serial)
271{ 271{
272 struct symbol_private *priv = usb_get_serial_data(serial); 272 struct symbol_private *priv = usb_get_serial_data(serial);
273 273
@@ -275,9 +275,16 @@ static void symbol_shutdown(struct usb_serial *serial)
275 275
276 usb_kill_urb(priv->int_urb); 276 usb_kill_urb(priv->int_urb);
277 usb_free_urb(priv->int_urb); 277 usb_free_urb(priv->int_urb);
278}
279
280static void symbol_release(struct usb_serial *serial)
281{
282 struct symbol_private *priv = usb_get_serial_data(serial);
283
284 dbg("%s", __func__);
285
278 kfree(priv->int_buffer); 286 kfree(priv->int_buffer);
279 kfree(priv); 287 kfree(priv);
280 usb_set_serial_data(serial, NULL);
281} 288}
282 289
283static struct usb_driver symbol_driver = { 290static struct usb_driver symbol_driver = {
@@ -299,7 +306,8 @@ static struct usb_serial_driver symbol_device = {
299 .attach = symbol_startup, 306 .attach = symbol_startup,
300 .open = symbol_open, 307 .open = symbol_open,
301 .close = symbol_close, 308 .close = symbol_close,
302 .shutdown = symbol_shutdown, 309 .disconnect = symbol_disconnect,
310 .release = symbol_release,
303 .throttle = symbol_throttle, 311 .throttle = symbol_throttle,
304 .unthrottle = symbol_unthrottle, 312 .unthrottle = symbol_unthrottle,
305}; 313};
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 42cb04c403be..991d8232e376 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -97,7 +97,7 @@ struct ti_device {
97/* Function Declarations */ 97/* Function Declarations */
98 98
99static int ti_startup(struct usb_serial *serial); 99static int ti_startup(struct usb_serial *serial);
100static void ti_shutdown(struct usb_serial *serial); 100static void ti_release(struct usb_serial *serial);
101static int ti_open(struct tty_struct *tty, struct usb_serial_port *port, 101static int ti_open(struct tty_struct *tty, struct usb_serial_port *port,
102 struct file *file); 102 struct file *file);
103static void ti_close(struct usb_serial_port *port); 103static void ti_close(struct usb_serial_port *port);
@@ -230,7 +230,7 @@ static struct usb_serial_driver ti_1port_device = {
230 .id_table = ti_id_table_3410, 230 .id_table = ti_id_table_3410,
231 .num_ports = 1, 231 .num_ports = 1,
232 .attach = ti_startup, 232 .attach = ti_startup,
233 .shutdown = ti_shutdown, 233 .release = ti_release,
234 .open = ti_open, 234 .open = ti_open,
235 .close = ti_close, 235 .close = ti_close,
236 .write = ti_write, 236 .write = ti_write,
@@ -258,7 +258,7 @@ static struct usb_serial_driver ti_2port_device = {
258 .id_table = ti_id_table_5052, 258 .id_table = ti_id_table_5052,
259 .num_ports = 2, 259 .num_ports = 2,
260 .attach = ti_startup, 260 .attach = ti_startup,
261 .shutdown = ti_shutdown, 261 .release = ti_release,
262 .open = ti_open, 262 .open = ti_open,
263 .close = ti_close, 263 .close = ti_close,
264 .write = ti_write, 264 .write = ti_write,
@@ -473,7 +473,7 @@ free_tdev:
473} 473}
474 474
475 475
476static void ti_shutdown(struct usb_serial *serial) 476static void ti_release(struct usb_serial *serial)
477{ 477{
478 int i; 478 int i;
479 struct ti_device *tdev = usb_get_serial_data(serial); 479 struct ti_device *tdev = usb_get_serial_data(serial);
@@ -486,12 +486,10 @@ static void ti_shutdown(struct usb_serial *serial)
486 if (tport) { 486 if (tport) {
487 ti_buf_free(tport->tp_write_buf); 487 ti_buf_free(tport->tp_write_buf);
488 kfree(tport); 488 kfree(tport);
489 usb_set_serial_port_data(serial->port[i], NULL);
490 } 489 }
491 } 490 }
492 491
493 kfree(tdev); 492 kfree(tdev);
494 usb_set_serial_data(serial, NULL);
495} 493}
496 494
497 495
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 1967a7edc10c..d595aa5586a7 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -141,6 +141,14 @@ static void destroy_serial(struct kref *kref)
141 if (serial->minor != SERIAL_TTY_NO_MINOR) 141 if (serial->minor != SERIAL_TTY_NO_MINOR)
142 return_serial(serial); 142 return_serial(serial);
143 143
144 serial->type->release(serial);
145
146 for (i = 0; i < serial->num_ports; ++i) {
147 port = serial->port[i];
148 if (port)
149 put_device(&port->dev);
150 }
151
144 /* If this is a "fake" port, we have to clean it up here, as it will 152 /* If this is a "fake" port, we have to clean it up here, as it will
145 * not get cleaned up in port_release() as it was never registered with 153 * not get cleaned up in port_release() as it was never registered with
146 * the driver core */ 154 * the driver core */
@@ -148,9 +156,8 @@ static void destroy_serial(struct kref *kref)
148 for (i = serial->num_ports; 156 for (i = serial->num_ports;
149 i < serial->num_port_pointers; ++i) { 157 i < serial->num_port_pointers; ++i) {
150 port = serial->port[i]; 158 port = serial->port[i];
151 if (!port) 159 if (port)
152 continue; 160 port_free(port);
153 port_free(port);
154 } 161 }
155 } 162 }
156 163
@@ -1046,10 +1053,15 @@ int usb_serial_probe(struct usb_interface *interface,
1046 1053
1047 dev_set_name(&port->dev, "ttyUSB%d", port->number); 1054 dev_set_name(&port->dev, "ttyUSB%d", port->number);
1048 dbg ("%s - registering %s", __func__, dev_name(&port->dev)); 1055 dbg ("%s - registering %s", __func__, dev_name(&port->dev));
1056 port->dev_state = PORT_REGISTERING;
1049 retval = device_register(&port->dev); 1057 retval = device_register(&port->dev);
1050 if (retval) 1058 if (retval) {
1051 dev_err(&port->dev, "Error registering port device, " 1059 dev_err(&port->dev, "Error registering port device, "
1052 "continuing\n"); 1060 "continuing\n");
1061 port->dev_state = PORT_UNREGISTERED;
1062 } else {
1063 port->dev_state = PORT_REGISTERED;
1064 }
1053 } 1065 }
1054 1066
1055 usb_serial_console_init(debug, minor); 1067 usb_serial_console_init(debug, minor);
@@ -1113,10 +1125,6 @@ void usb_serial_disconnect(struct usb_interface *interface)
1113 serial->disconnected = 1; 1125 serial->disconnected = 1;
1114 mutex_unlock(&serial->disc_mutex); 1126 mutex_unlock(&serial->disc_mutex);
1115 1127
1116 /* Unfortunately, many of the sub-drivers expect the port structures
1117 * to exist when their shutdown method is called, so we have to go
1118 * through this awkward two-step unregistration procedure.
1119 */
1120 for (i = 0; i < serial->num_ports; ++i) { 1128 for (i = 0; i < serial->num_ports; ++i) {
1121 port = serial->port[i]; 1129 port = serial->port[i];
1122 if (port) { 1130 if (port) {
@@ -1130,17 +1138,25 @@ void usb_serial_disconnect(struct usb_interface *interface)
1130 } 1138 }
1131 kill_traffic(port); 1139 kill_traffic(port);
1132 cancel_work_sync(&port->work); 1140 cancel_work_sync(&port->work);
1133 device_del(&port->dev); 1141 if (port->dev_state == PORT_REGISTERED) {
1134 } 1142
1135 } 1143 /* Make sure the port is bound so that the
1136 serial->type->shutdown(serial); 1144 * driver's port_remove method is called.
1137 for (i = 0; i < serial->num_ports; ++i) { 1145 */
1138 port = serial->port[i]; 1146 if (!port->dev.driver) {
1139 if (port) { 1147 int rc;
1140 put_device(&port->dev); 1148
1141 serial->port[i] = NULL; 1149 port->dev.driver =
1150 &serial->type->driver;
1151 rc = device_bind_driver(&port->dev);
1152 }
1153 port->dev_state = PORT_UNREGISTERING;
1154 device_del(&port->dev);
1155 port->dev_state = PORT_UNREGISTERED;
1156 }
1142 } 1157 }
1143 } 1158 }
1159 serial->type->disconnect(serial);
1144 1160
1145 /* let the last holder of this object 1161 /* let the last holder of this object
1146 * cause it to be cleaned up */ 1162 * cause it to be cleaned up */
@@ -1318,7 +1334,8 @@ static void fixup_generic(struct usb_serial_driver *device)
1318 set_to_generic_if_null(device, chars_in_buffer); 1334 set_to_generic_if_null(device, chars_in_buffer);
1319 set_to_generic_if_null(device, read_bulk_callback); 1335 set_to_generic_if_null(device, read_bulk_callback);
1320 set_to_generic_if_null(device, write_bulk_callback); 1336 set_to_generic_if_null(device, write_bulk_callback);
1321 set_to_generic_if_null(device, shutdown); 1337 set_to_generic_if_null(device, disconnect);
1338 set_to_generic_if_null(device, release);
1322} 1339}
1323 1340
1324int usb_serial_register(struct usb_serial_driver *driver) 1341int usb_serial_register(struct usb_serial_driver *driver)
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 6c9cbb59552a..614800972dc3 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -15,7 +15,19 @@
15#include <linux/usb.h> 15#include <linux/usb.h>
16#include <linux/usb/serial.h> 16#include <linux/usb/serial.h>
17 17
18#define URB_DEBUG_MAX_IN_FLIGHT_URBS 4000
18#define USB_DEBUG_MAX_PACKET_SIZE 8 19#define USB_DEBUG_MAX_PACKET_SIZE 8
20#define USB_DEBUG_BRK_SIZE 8
21static char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = {
22 0x00,
23 0xff,
24 0x01,
25 0xfe,
26 0x00,
27 0xfe,
28 0x01,
29 0xff,
30};
19 31
20static struct usb_device_id id_table [] = { 32static struct usb_device_id id_table [] = {
21 { USB_DEVICE(0x0525, 0x127a) }, 33 { USB_DEVICE(0x0525, 0x127a) },
@@ -38,6 +50,32 @@ static int usb_debug_open(struct tty_struct *tty, struct usb_serial_port *port,
38 return usb_serial_generic_open(tty, port, filp); 50 return usb_serial_generic_open(tty, port, filp);
39} 51}
40 52
53/* This HW really does not support a serial break, so one will be
54 * emulated when ever the break state is set to true.
55 */
56static void usb_debug_break_ctl(struct tty_struct *tty, int break_state)
57{
58 struct usb_serial_port *port = tty->driver_data;
59 if (!break_state)
60 return;
61 usb_serial_generic_write(tty, port, USB_DEBUG_BRK, USB_DEBUG_BRK_SIZE);
62}
63
64static void usb_debug_read_bulk_callback(struct urb *urb)
65{
66 struct usb_serial_port *port = urb->context;
67
68 if (urb->actual_length == USB_DEBUG_BRK_SIZE &&
69 memcmp(urb->transfer_buffer, USB_DEBUG_BRK,
70 USB_DEBUG_BRK_SIZE) == 0) {
71 usb_serial_handle_break(port);
72 usb_serial_generic_resubmit_read_urb(port, GFP_ATOMIC);
73 return;
74 }
75
76 usb_serial_generic_read_bulk_callback(urb);
77}
78
41static struct usb_serial_driver debug_device = { 79static struct usb_serial_driver debug_device = {
42 .driver = { 80 .driver = {
43 .owner = THIS_MODULE, 81 .owner = THIS_MODULE,
@@ -46,6 +84,9 @@ static struct usb_serial_driver debug_device = {
46 .id_table = id_table, 84 .id_table = id_table,
47 .num_ports = 1, 85 .num_ports = 1,
48 .open = usb_debug_open, 86 .open = usb_debug_open,
87 .max_in_flight_urbs = URB_DEBUG_MAX_IN_FLIGHT_URBS,
88 .break_ctl = usb_debug_break_ctl,
89 .read_bulk_callback = usb_debug_read_bulk_callback,
49}; 90};
50 91
51static int __init debug_init(void) 92static int __init debug_init(void)
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index b15f1c0e1d4a..f5d0f64dcc52 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -47,7 +47,7 @@ static void visor_unthrottle(struct tty_struct *tty);
47static int visor_probe(struct usb_serial *serial, 47static int visor_probe(struct usb_serial *serial,
48 const struct usb_device_id *id); 48 const struct usb_device_id *id);
49static int visor_calc_num_ports(struct usb_serial *serial); 49static int visor_calc_num_ports(struct usb_serial *serial);
50static void visor_shutdown(struct usb_serial *serial); 50static void visor_release(struct usb_serial *serial);
51static void visor_write_bulk_callback(struct urb *urb); 51static void visor_write_bulk_callback(struct urb *urb);
52static void visor_read_bulk_callback(struct urb *urb); 52static void visor_read_bulk_callback(struct urb *urb);
53static void visor_read_int_callback(struct urb *urb); 53static void visor_read_int_callback(struct urb *urb);
@@ -202,7 +202,7 @@ static struct usb_serial_driver handspring_device = {
202 .attach = treo_attach, 202 .attach = treo_attach,
203 .probe = visor_probe, 203 .probe = visor_probe,
204 .calc_num_ports = visor_calc_num_ports, 204 .calc_num_ports = visor_calc_num_ports,
205 .shutdown = visor_shutdown, 205 .release = visor_release,
206 .write = visor_write, 206 .write = visor_write,
207 .write_room = visor_write_room, 207 .write_room = visor_write_room,
208 .write_bulk_callback = visor_write_bulk_callback, 208 .write_bulk_callback = visor_write_bulk_callback,
@@ -227,7 +227,7 @@ static struct usb_serial_driver clie_5_device = {
227 .attach = clie_5_attach, 227 .attach = clie_5_attach,
228 .probe = visor_probe, 228 .probe = visor_probe,
229 .calc_num_ports = visor_calc_num_ports, 229 .calc_num_ports = visor_calc_num_ports,
230 .shutdown = visor_shutdown, 230 .release = visor_release,
231 .write = visor_write, 231 .write = visor_write,
232 .write_room = visor_write_room, 232 .write_room = visor_write_room,
233 .write_bulk_callback = visor_write_bulk_callback, 233 .write_bulk_callback = visor_write_bulk_callback,
@@ -918,7 +918,7 @@ static int clie_5_attach(struct usb_serial *serial)
918 return generic_startup(serial); 918 return generic_startup(serial);
919} 919}
920 920
921static void visor_shutdown(struct usb_serial *serial) 921static void visor_release(struct usb_serial *serial)
922{ 922{
923 struct visor_private *priv; 923 struct visor_private *priv;
924 int i; 924 int i;
@@ -927,10 +927,7 @@ static void visor_shutdown(struct usb_serial *serial)
927 927
928 for (i = 0; i < serial->num_ports; i++) { 928 for (i = 0; i < serial->num_ports; i++) {
929 priv = usb_get_serial_port_data(serial->port[i]); 929 priv = usb_get_serial_port_data(serial->port[i]);
930 if (priv) { 930 kfree(priv);
931 usb_set_serial_port_data(serial->port[i], NULL);
932 kfree(priv);
933 }
934 } 931 }
935} 932}
936 933
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 7c7295d09f34..8d126dd7a02e 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -144,7 +144,7 @@ static int whiteheat_firmware_attach(struct usb_serial *serial);
144 144
145/* function prototypes for the Connect Tech WhiteHEAT serial converter */ 145/* function prototypes for the Connect Tech WhiteHEAT serial converter */
146static int whiteheat_attach(struct usb_serial *serial); 146static int whiteheat_attach(struct usb_serial *serial);
147static void whiteheat_shutdown(struct usb_serial *serial); 147static void whiteheat_release(struct usb_serial *serial);
148static int whiteheat_open(struct tty_struct *tty, 148static int whiteheat_open(struct tty_struct *tty,
149 struct usb_serial_port *port, struct file *filp); 149 struct usb_serial_port *port, struct file *filp);
150static void whiteheat_close(struct usb_serial_port *port); 150static void whiteheat_close(struct usb_serial_port *port);
@@ -189,7 +189,7 @@ static struct usb_serial_driver whiteheat_device = {
189 .id_table = id_table_std, 189 .id_table = id_table_std,
190 .num_ports = 4, 190 .num_ports = 4,
191 .attach = whiteheat_attach, 191 .attach = whiteheat_attach,
192 .shutdown = whiteheat_shutdown, 192 .release = whiteheat_release,
193 .open = whiteheat_open, 193 .open = whiteheat_open,
194 .close = whiteheat_close, 194 .close = whiteheat_close,
195 .write = whiteheat_write, 195 .write = whiteheat_write,
@@ -617,7 +617,7 @@ no_command_buffer:
617} 617}
618 618
619 619
620static void whiteheat_shutdown(struct usb_serial *serial) 620static void whiteheat_release(struct usb_serial *serial)
621{ 621{
622 struct usb_serial_port *command_port; 622 struct usb_serial_port *command_port;
623 struct usb_serial_port *port; 623 struct usb_serial_port *port;
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index 2dd9bd4bff56..ec17c96371af 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -52,7 +52,7 @@ int usb_stor_euscsi_init(struct us_data *us)
52 us->iobuf[0] = 0x1; 52 us->iobuf[0] = 0x1;
53 result = usb_stor_control_msg(us, us->send_ctrl_pipe, 53 result = usb_stor_control_msg(us, us->send_ctrl_pipe,
54 0x0C, USB_RECIP_INTERFACE | USB_TYPE_VENDOR, 54 0x0C, USB_RECIP_INTERFACE | USB_TYPE_VENDOR,
55 0x01, 0x0, us->iobuf, 0x1, 5*HZ); 55 0x01, 0x0, us->iobuf, 0x1, 5000);
56 US_DEBUGP("-- result is %d\n", result); 56 US_DEBUGP("-- result is %d\n", result);
57 57
58 return 0; 58 return 0;
@@ -80,14 +80,16 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
80 80
81 res = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, 81 res = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb,
82 US_BULK_CB_WRAP_LEN, &partial); 82 US_BULK_CB_WRAP_LEN, &partial);
83 if(res) 83 if (res)
84 return res; 84 return -EIO;
85 85
86 US_DEBUGP("Getting status packet...\n"); 86 US_DEBUGP("Getting status packet...\n");
87 res = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, 87 res = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs,
88 US_BULK_CS_WRAP_LEN, &partial); 88 US_BULK_CS_WRAP_LEN, &partial);
89 if (res)
90 return -EIO;
89 91
90 return (res ? -1 : 0); 92 return 0;
91} 93}
92 94
93/* This places the HUAWEI E220 devices in multi-port mode */ 95/* This places the HUAWEI E220 devices in multi-port mode */
@@ -99,6 +101,6 @@ int usb_stor_huawei_e220_init(struct us_data *us)
99 USB_REQ_SET_FEATURE, 101 USB_REQ_SET_FEATURE,
100 USB_TYPE_STANDARD | USB_RECIP_DEVICE, 102 USB_TYPE_STANDARD | USB_RECIP_DEVICE,
101 0x01, 0x0, NULL, 0x0, 1000); 103 0x01, 0x0, NULL, 0x0, 1000);
102 US_DEBUGP("usb_control_msg performing result is %d\n", result); 104 US_DEBUGP("Huawei mode set result is %d\n", result);
103 return (result ? 0 : -1); 105 return (result ? 0 : -ENODEV);
104} 106}
diff --git a/drivers/usb/storage/option_ms.c b/drivers/usb/storage/option_ms.c
index 353f922939a4..d41cc0a970f7 100644
--- a/drivers/usb/storage/option_ms.c
+++ b/drivers/usb/storage/option_ms.c
@@ -37,7 +37,7 @@ MODULE_PARM_DESC(option_zero_cd, "ZeroCD mode (1=Force Modem (default),"
37 37
38#define RESPONSE_LEN 1024 38#define RESPONSE_LEN 1024
39 39
40static int option_rezero(struct us_data *us, int ep_in, int ep_out) 40static int option_rezero(struct us_data *us)
41{ 41{
42 const unsigned char rezero_msg[] = { 42 const unsigned char rezero_msg[] = {
43 0x55, 0x53, 0x42, 0x43, 0x78, 0x56, 0x34, 0x12, 43 0x55, 0x53, 0x42, 0x43, 0x78, 0x56, 0x34, 0x12,
@@ -54,10 +54,10 @@ static int option_rezero(struct us_data *us, int ep_in, int ep_out)
54 if (buffer == NULL) 54 if (buffer == NULL)
55 return USB_STOR_TRANSPORT_ERROR; 55 return USB_STOR_TRANSPORT_ERROR;
56 56
57 memcpy(buffer, rezero_msg, sizeof (rezero_msg)); 57 memcpy(buffer, rezero_msg, sizeof(rezero_msg));
58 result = usb_stor_bulk_transfer_buf(us, 58 result = usb_stor_bulk_transfer_buf(us,
59 usb_sndbulkpipe(us->pusb_dev, ep_out), 59 us->send_bulk_pipe,
60 buffer, sizeof (rezero_msg), NULL); 60 buffer, sizeof(rezero_msg), NULL);
61 if (result != USB_STOR_XFER_GOOD) { 61 if (result != USB_STOR_XFER_GOOD) {
62 result = USB_STOR_XFER_ERROR; 62 result = USB_STOR_XFER_ERROR;
63 goto out; 63 goto out;
@@ -66,9 +66,15 @@ static int option_rezero(struct us_data *us, int ep_in, int ep_out)
66 /* Some of the devices need to be asked for a response, but we don't 66 /* Some of the devices need to be asked for a response, but we don't
67 * care what that response is. 67 * care what that response is.
68 */ 68 */
69 result = usb_stor_bulk_transfer_buf(us, 69 usb_stor_bulk_transfer_buf(us,
70 usb_sndbulkpipe(us->pusb_dev, ep_out), 70 us->recv_bulk_pipe,
71 buffer, RESPONSE_LEN, NULL); 71 buffer, RESPONSE_LEN, NULL);
72
73 /* Read the CSW */
74 usb_stor_bulk_transfer_buf(us,
75 us->recv_bulk_pipe,
76 buffer, 13, NULL);
77
72 result = USB_STOR_XFER_GOOD; 78 result = USB_STOR_XFER_GOOD;
73 79
74out: 80out:
@@ -76,63 +82,75 @@ out:
76 return result; 82 return result;
77} 83}
78 84
79int option_ms_init(struct us_data *us) 85static int option_inquiry(struct us_data *us)
80{ 86{
81 struct usb_device *udev; 87 const unsigned char inquiry_msg[] = {
82 struct usb_interface *intf; 88 0x55, 0x53, 0x42, 0x43, 0x12, 0x34, 0x56, 0x78,
83 struct usb_host_interface *iface_desc; 89 0x24, 0x00, 0x00, 0x00, 0x80, 0x00, 0x06, 0x12,
84 struct usb_endpoint_descriptor *endpoint = NULL; 90 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00,
85 u8 ep_in = 0, ep_out = 0; 91 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
86 int ep_in_size = 0, ep_out_size = 0; 92 };
87 int i, result; 93 char *buffer;
88 94 int result;
89 udev = us->pusb_dev;
90 intf = us->pusb_intf;
91
92 /* Ensure it's really a ZeroCD device; devices that are already
93 * in modem mode return 0xFF for class, subclass, and protocol.
94 */
95 if (udev->descriptor.bDeviceClass != 0 ||
96 udev->descriptor.bDeviceSubClass != 0 ||
97 udev->descriptor.bDeviceProtocol != 0)
98 return USB_STOR_TRANSPORT_GOOD;
99 95
100 US_DEBUGP("Option MS: option_ms_init called\n"); 96 US_DEBUGP("Option MS: %s", "device inquiry for vendor name\n");
101 97
102 /* Find the right mass storage interface */ 98 buffer = kzalloc(0x24, GFP_KERNEL);
103 iface_desc = intf->cur_altsetting; 99 if (buffer == NULL)
104 if (iface_desc->desc.bInterfaceClass != 0x8 || 100 return USB_STOR_TRANSPORT_ERROR;
105 iface_desc->desc.bInterfaceSubClass != 0x6 ||
106 iface_desc->desc.bInterfaceProtocol != 0x50) {
107 US_DEBUGP("Option MS: mass storage interface not found, no action "
108 "required\n");
109 return USB_STOR_TRANSPORT_GOOD;
110 }
111 101
112 /* Find the mass storage bulk endpoints */ 102 memcpy(buffer, inquiry_msg, sizeof(inquiry_msg));
113 for (i = 0; i < iface_desc->desc.bNumEndpoints && (!ep_in_size || !ep_out_size); ++i) { 103 result = usb_stor_bulk_transfer_buf(us,
114 endpoint = &iface_desc->endpoint[i].desc; 104 us->send_bulk_pipe,
115 105 buffer, sizeof(inquiry_msg), NULL);
116 if (usb_endpoint_is_bulk_in(endpoint)) { 106 if (result != USB_STOR_XFER_GOOD) {
117 ep_in = usb_endpoint_num(endpoint); 107 result = USB_STOR_XFER_ERROR;
118 ep_in_size = le16_to_cpu(endpoint->wMaxPacketSize); 108 goto out;
119 } else if (usb_endpoint_is_bulk_out(endpoint)) {
120 ep_out = usb_endpoint_num(endpoint);
121 ep_out_size = le16_to_cpu(endpoint->wMaxPacketSize);
122 }
123 } 109 }
124 110
125 /* Can't find the mass storage endpoints */ 111 result = usb_stor_bulk_transfer_buf(us,
126 if (!ep_in_size || !ep_out_size) { 112 us->recv_bulk_pipe,
127 US_DEBUGP("Option MS: mass storage endpoints not found, no action " 113 buffer, 0x24, NULL);
128 "required\n"); 114 if (result != USB_STOR_XFER_GOOD) {
129 return USB_STOR_TRANSPORT_GOOD; 115 result = USB_STOR_XFER_ERROR;
116 goto out;
130 } 117 }
131 118
119 result = memcmp(buffer+8, "Option", 6);
120
121 /* Read the CSW */
122 usb_stor_bulk_transfer_buf(us,
123 us->recv_bulk_pipe,
124 buffer, 13, NULL);
125
126out:
127 kfree(buffer);
128 return result;
129}
130
131
132int option_ms_init(struct us_data *us)
133{
134 int result;
135
136 US_DEBUGP("Option MS: option_ms_init called\n");
137
138 /* Additional test for vendor information via INQUIRY,
139 * because some vendor/product IDs are ambiguous
140 */
141 result = option_inquiry(us);
142 if (result != 0) {
143 US_DEBUGP("Option MS: vendor is not Option or not determinable,"
144 " no action taken\n");
145 return 0;
146 } else
147 US_DEBUGP("Option MS: this is a genuine Option device,"
148 " proceeding\n");
149
132 /* Force Modem mode */ 150 /* Force Modem mode */
133 if (option_zero_cd == ZCD_FORCE_MODEM) { 151 if (option_zero_cd == ZCD_FORCE_MODEM) {
134 US_DEBUGP("Option MS: %s", "Forcing Modem Mode\n"); 152 US_DEBUGP("Option MS: %s", "Forcing Modem Mode\n");
135 result = option_rezero(us, ep_in, ep_out); 153 result = option_rezero(us);
136 if (result != USB_STOR_XFER_GOOD) 154 if (result != USB_STOR_XFER_GOOD)
137 US_DEBUGP("Option MS: Failed to switch to modem mode.\n"); 155 US_DEBUGP("Option MS: Failed to switch to modem mode.\n");
138 return -EIO; 156 return -EIO;
@@ -142,6 +160,6 @@ int option_ms_init(struct us_data *us)
142 " requests it\n"); 160 " requests it\n");
143 } 161 }
144 162
145 return USB_STOR_TRANSPORT_GOOD; 163 return 0;
146} 164}
147 165
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
index 4359a2cb42df..4395c4100ec2 100644
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -202,6 +202,6 @@ int sierra_ms_init(struct us_data *us)
202complete: 202complete:
203 result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst); 203 result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst);
204 204
205 return USB_STOR_TRANSPORT_GOOD; 205 return 0;
206} 206}
207 207
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 4b8b69045fe6..1b9c5dd0fb27 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1385,7 +1385,7 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
1385UNUSUAL_DEV( 0x1186, 0x3e04, 0x0000, 0x0000, 1385UNUSUAL_DEV( 0x1186, 0x3e04, 0x0000, 0x0000,
1386 "D-Link", 1386 "D-Link",
1387 "USB Mass Storage", 1387 "USB Mass Storage",
1388 US_SC_DEVICE, US_PR_DEVICE, option_ms_init, 0), 1388 US_SC_DEVICE, US_PR_DEVICE, option_ms_init, US_FL_IGNORE_DEVICE),
1389 1389
1390/* Reported by Kevin Lloyd <linux@sierrawireless.com> 1390/* Reported by Kevin Lloyd <linux@sierrawireless.com>
1391 * Entry is needed for the initializer function override, 1391 * Entry is needed for the initializer function override,
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 2b5a691064b7..932ffdbf86d9 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2104,6 +2104,7 @@ config FB_MB862XX_LIME
2104 bool "Lime GDC" 2104 bool "Lime GDC"
2105 depends on FB_MB862XX 2105 depends on FB_MB862XX
2106 depends on OF && !FB_MB862XX_PCI_GDC 2106 depends on OF && !FB_MB862XX_PCI_GDC
2107 depends on PPC
2107 select FB_FOREIGN_ENDIAN 2108 select FB_FOREIGN_ENDIAN
2108 select FB_LITTLE_ENDIAN 2109 select FB_LITTLE_ENDIAN
2109 ---help--- 2110 ---help---
diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c
index 6995fe1e86d4..0bcc59eb37fa 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/acornfb.c
@@ -859,43 +859,6 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
859 return 0; 859 return 0;
860} 860}
861 861
862/*
863 * Note that we are entered with the kernel locked.
864 */
865static int
866acornfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
867{
868 unsigned long off, start;
869 u32 len;
870
871 off = vma->vm_pgoff << PAGE_SHIFT;
872
873 start = info->fix.smem_start;
874 len = PAGE_ALIGN(start & ~PAGE_MASK) + info->fix.smem_len;
875 start &= PAGE_MASK;
876 if ((vma->vm_end - vma->vm_start + off) > len)
877 return -EINVAL;
878 off += start;
879 vma->vm_pgoff = off >> PAGE_SHIFT;
880
881 /* This is an IO map - tell maydump to skip this VMA */
882 vma->vm_flags |= VM_IO;
883
884 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
885
886 /*
887 * Don't alter the page protection flags; we want to keep the area
888 * cached for better performance. This does mean that we may miss
889 * some updates to the screen occasionally, but process switches
890 * should cause the caches and buffers to be flushed often enough.
891 */
892 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
893 vma->vm_end - vma->vm_start,
894 vma->vm_page_prot))
895 return -EAGAIN;
896 return 0;
897}
898
899static struct fb_ops acornfb_ops = { 862static struct fb_ops acornfb_ops = {
900 .owner = THIS_MODULE, 863 .owner = THIS_MODULE,
901 .fb_check_var = acornfb_check_var, 864 .fb_check_var = acornfb_check_var,
@@ -905,7 +868,6 @@ static struct fb_ops acornfb_ops = {
905 .fb_fillrect = cfb_fillrect, 868 .fb_fillrect = cfb_fillrect,
906 .fb_copyarea = cfb_copyarea, 869 .fb_copyarea = cfb_copyarea,
907 .fb_imageblit = cfb_imageblit, 870 .fb_imageblit = cfb_imageblit,
908 .fb_mmap = acornfb_mmap,
909}; 871};
910 872
911/* 873/*
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 2fb63f6ea2f1..5afd64482f55 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -345,7 +345,7 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
345 dev_dbg(dev, " bpp: %u\n", var->bits_per_pixel); 345 dev_dbg(dev, " bpp: %u\n", var->bits_per_pixel);
346 dev_dbg(dev, " clk: %lu KHz\n", clk_value_khz); 346 dev_dbg(dev, " clk: %lu KHz\n", clk_value_khz);
347 347
348 if ((PICOS2KHZ(var->pixclock) * var->bits_per_pixel / 8) > clk_value_khz) { 348 if (PICOS2KHZ(var->pixclock) > clk_value_khz) {
349 dev_err(dev, "%lu KHz pixel clock is too fast\n", PICOS2KHZ(var->pixclock)); 349 dev_err(dev, "%lu KHz pixel clock is too fast\n", PICOS2KHZ(var->pixclock));
350 return -EINVAL; 350 return -EINVAL;
351 } 351 }
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index 97a1f095f327..515cf1978d19 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -213,7 +213,6 @@ static void radeon_pm_disable_dynamic_mode(struct radeonfb_info *rinfo)
213 PIXCLKS_CNTL__R300_PIXCLK_TRANS_ALWAYS_ONb | 213 PIXCLKS_CNTL__R300_PIXCLK_TRANS_ALWAYS_ONb |
214 PIXCLKS_CNTL__R300_PIXCLK_TVO_ALWAYS_ONb | 214 PIXCLKS_CNTL__R300_PIXCLK_TVO_ALWAYS_ONb |
215 PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb | 215 PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb |
216 PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb |
217 PIXCLKS_CNTL__R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); 216 PIXCLKS_CNTL__R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
218 OUTPLL(pllPIXCLKS_CNTL, tmp); 217 OUTPLL(pllPIXCLKS_CNTL, tmp);
219 218
@@ -395,7 +394,7 @@ static void radeon_pm_enable_dynamic_mode(struct radeonfb_info *rinfo)
395 PIXCLKS_CNTL__R300_PIXCLK_TRANS_ALWAYS_ONb | 394 PIXCLKS_CNTL__R300_PIXCLK_TRANS_ALWAYS_ONb |
396 PIXCLKS_CNTL__R300_PIXCLK_TVO_ALWAYS_ONb | 395 PIXCLKS_CNTL__R300_PIXCLK_TVO_ALWAYS_ONb |
397 PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb | 396 PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb |
398 PIXCLKS_CNTL__R300_P2G2CLK_ALWAYS_ONb); 397 PIXCLKS_CNTL__R300_P2G2CLK_DAC_ALWAYS_ONb);
399 OUTPLL(pllPIXCLKS_CNTL, tmp); 398 OUTPLL(pllPIXCLKS_CNTL, tmp);
400 399
401 tmp = INPLL(pllMCLK_MISC); 400 tmp = INPLL(pllMCLK_MISC);
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 37e60b1d2ed9..e49ae5edcc00 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -323,7 +323,6 @@ static int bfin_bf54x_fb_release(struct fb_info *info, int user)
323 bfin_write_EPPI0_CONTROL(0); 323 bfin_write_EPPI0_CONTROL(0);
324 SSYNC(); 324 SSYNC();
325 disable_dma(CH_EPPI0); 325 disable_dma(CH_EPPI0);
326 memset(fbi->fb_buffer, 0, info->fix.smem_len);
327 } 326 }
328 327
329 spin_unlock(&fbi->lock); 328 spin_unlock(&fbi->lock);
@@ -530,7 +529,7 @@ static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id)
530 return IRQ_HANDLED; 529 return IRQ_HANDLED;
531} 530}
532 531
533static int __init bfin_bf54x_probe(struct platform_device *pdev) 532static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
534{ 533{
535 struct bfin_bf54xfb_info *info; 534 struct bfin_bf54xfb_info *info;
536 struct fb_info *fbinfo; 535 struct fb_info *fbinfo;
@@ -626,14 +625,12 @@ static int __init bfin_bf54x_probe(struct platform_device *pdev)
626 goto out3; 625 goto out3;
627 } 626 }
628 627
629 memset(info->fb_buffer, 0, fbinfo->fix.smem_len);
630
631 fbinfo->screen_base = (void *)info->fb_buffer; 628 fbinfo->screen_base = (void *)info->fb_buffer;
632 fbinfo->fix.smem_start = (int)info->fb_buffer; 629 fbinfo->fix.smem_start = (int)info->fb_buffer;
633 630
634 fbinfo->fbops = &bfin_bf54x_fb_ops; 631 fbinfo->fbops = &bfin_bf54x_fb_ops;
635 632
636 fbinfo->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL); 633 fbinfo->pseudo_palette = kzalloc(sizeof(u32) * 16, GFP_KERNEL);
637 if (!fbinfo->pseudo_palette) { 634 if (!fbinfo->pseudo_palette) {
638 printk(KERN_ERR DRIVER_NAME 635 printk(KERN_ERR DRIVER_NAME
639 "Fail to allocate pseudo_palette\n"); 636 "Fail to allocate pseudo_palette\n");
@@ -642,8 +639,6 @@ static int __init bfin_bf54x_probe(struct platform_device *pdev)
642 goto out4; 639 goto out4;
643 } 640 }
644 641
645 memset(fbinfo->pseudo_palette, 0, sizeof(u32) * 16);
646
647 if (fb_alloc_cmap(&fbinfo->cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0) 642 if (fb_alloc_cmap(&fbinfo->cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0)
648 < 0) { 643 < 0) {
649 printk(KERN_ERR DRIVER_NAME 644 printk(KERN_ERR DRIVER_NAME
@@ -712,7 +707,7 @@ out1:
712 return ret; 707 return ret;
713} 708}
714 709
715static int bfin_bf54x_remove(struct platform_device *pdev) 710static int __devexit bfin_bf54x_remove(struct platform_device *pdev)
716{ 711{
717 712
718 struct fb_info *fbinfo = platform_get_drvdata(pdev); 713 struct fb_info *fbinfo = platform_get_drvdata(pdev);
@@ -781,7 +776,7 @@ static int bfin_bf54x_resume(struct platform_device *pdev)
781 776
782static struct platform_driver bfin_bf54x_driver = { 777static struct platform_driver bfin_bf54x_driver = {
783 .probe = bfin_bf54x_probe, 778 .probe = bfin_bf54x_probe,
784 .remove = bfin_bf54x_remove, 779 .remove = __devexit_p(bfin_bf54x_remove),
785 .suspend = bfin_bf54x_suspend, 780 .suspend = bfin_bf54x_suspend,
786 .resume = bfin_bf54x_resume, 781 .resume = bfin_bf54x_resume,
787 .driver = { 782 .driver = {
@@ -790,7 +785,7 @@ static struct platform_driver bfin_bf54x_driver = {
790 }, 785 },
791}; 786};
792 787
793static int __devinit bfin_bf54x_driver_init(void) 788static int __init bfin_bf54x_driver_init(void)
794{ 789{
795 return platform_driver_register(&bfin_bf54x_driver); 790 return platform_driver_register(&bfin_bf54x_driver);
796} 791}
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 90cfddabf1f7..5cc36cfbf07b 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -242,7 +242,6 @@ static int bfin_t350mcqb_fb_release(struct fb_info *info, int user)
242 SSYNC(); 242 SSYNC();
243 disable_dma(CH_PPI); 243 disable_dma(CH_PPI);
244 bfin_t350mcqb_stop_timers(); 244 bfin_t350mcqb_stop_timers();
245 memset(fbi->fb_buffer, 0, info->fix.smem_len);
246 } 245 }
247 246
248 spin_unlock(&fbi->lock); 247 spin_unlock(&fbi->lock);
@@ -527,8 +526,6 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
527 goto out3; 526 goto out3;
528 } 527 }
529 528
530 memset(info->fb_buffer, 0, fbinfo->fix.smem_len);
531
532 fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET; 529 fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
533 fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET; 530 fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
534 531
@@ -602,7 +599,7 @@ out1:
602 return ret; 599 return ret;
603} 600}
604 601
605static int bfin_t350mcqb_remove(struct platform_device *pdev) 602static int __devexit bfin_t350mcqb_remove(struct platform_device *pdev)
606{ 603{
607 604
608 struct fb_info *fbinfo = platform_get_drvdata(pdev); 605 struct fb_info *fbinfo = platform_get_drvdata(pdev);
@@ -637,9 +634,6 @@ static int bfin_t350mcqb_remove(struct platform_device *pdev)
637#ifdef CONFIG_PM 634#ifdef CONFIG_PM
638static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t state) 635static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t state)
639{ 636{
640 struct fb_info *fbinfo = platform_get_drvdata(pdev);
641 struct bfin_t350mcqbfb_info *info = fbinfo->par;
642
643 bfin_t350mcqb_disable_ppi(); 637 bfin_t350mcqb_disable_ppi();
644 disable_dma(CH_PPI); 638 disable_dma(CH_PPI);
645 bfin_write_PPI_STATUS(0xFFFF); 639 bfin_write_PPI_STATUS(0xFFFF);
@@ -649,9 +643,6 @@ static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t stat
649 643
650static int bfin_t350mcqb_resume(struct platform_device *pdev) 644static int bfin_t350mcqb_resume(struct platform_device *pdev)
651{ 645{
652 struct fb_info *fbinfo = platform_get_drvdata(pdev);
653 struct bfin_t350mcqbfb_info *info = fbinfo->par;
654
655 enable_dma(CH_PPI); 646 enable_dma(CH_PPI);
656 bfin_t350mcqb_enable_ppi(); 647 bfin_t350mcqb_enable_ppi();
657 648
@@ -664,7 +655,7 @@ static int bfin_t350mcqb_resume(struct platform_device *pdev)
664 655
665static struct platform_driver bfin_t350mcqb_driver = { 656static struct platform_driver bfin_t350mcqb_driver = {
666 .probe = bfin_t350mcqb_probe, 657 .probe = bfin_t350mcqb_probe,
667 .remove = bfin_t350mcqb_remove, 658 .remove = __devexit_p(bfin_t350mcqb_remove),
668 .suspend = bfin_t350mcqb_suspend, 659 .suspend = bfin_t350mcqb_suspend,
669 .resume = bfin_t350mcqb_resume, 660 .resume = bfin_t350mcqb_resume,
670 .driver = { 661 .driver = {
@@ -673,7 +664,7 @@ static struct platform_driver bfin_t350mcqb_driver = {
673 }, 664 },
674}; 665};
675 666
676static int __devinit bfin_t350mcqb_driver_init(void) 667static int __init bfin_t350mcqb_driver_init(void)
677{ 668{
678 return platform_driver_register(&bfin_t350mcqb_driver); 669 return platform_driver_register(&bfin_t350mcqb_driver);
679} 670}
diff --git a/drivers/video/carminefb.c b/drivers/video/carminefb.c
index c7ff3c1a266a..0c02f8ec4bf3 100644
--- a/drivers/video/carminefb.c
+++ b/drivers/video/carminefb.c
@@ -562,7 +562,7 @@ static int __devinit alloc_carmine_fb(void __iomem *regs, void __iomem *smem_bas
562 if (ret < 0) 562 if (ret < 0)
563 goto err_free_fb; 563 goto err_free_fb;
564 564
565 if (fb_mode > ARRAY_SIZE(carmine_modedb)) 565 if (fb_mode >= ARRAY_SIZE(carmine_modedb))
566 fb_mode = CARMINEFB_DEFAULT_VIDEO_MODE; 566 fb_mode = CARMINEFB_DEFAULT_VIDEO_MODE;
567 567
568 par->cur_mode = par->new_mode = ~0; 568 par->cur_mode = par->new_mode = ~0;
diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c
index 777389c40988..57b9d276497e 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/chipsfb.c
@@ -414,7 +414,6 @@ chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
414 } 414 }
415 415
416 pci_set_drvdata(dp, p); 416 pci_set_drvdata(dp, p);
417 p->device = &dp->dev;
418 417
419 init_chips(p, addr); 418 init_chips(p, addr);
420 419
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 8dea2bc92705..eb12182b2059 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -280,6 +280,9 @@ static int __init efifb_probe(struct platform_device *dev)
280 info->pseudo_palette = info->par; 280 info->pseudo_palette = info->par;
281 info->par = NULL; 281 info->par = NULL;
282 282
283 info->aperture_base = efifb_fix.smem_start;
284 info->aperture_size = size_total;
285
283 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); 286 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
284 if (!info->screen_base) { 287 if (!info->screen_base) {
285 printk(KERN_ERR "efifb: abort, cannot ioremap video memory " 288 printk(KERN_ERR "efifb: abort, cannot ioremap video memory "
@@ -337,7 +340,7 @@ static int __init efifb_probe(struct platform_device *dev)
337 info->fbops = &efifb_ops; 340 info->fbops = &efifb_ops;
338 info->var = efifb_defined; 341 info->var = efifb_defined;
339 info->fix = efifb_fix; 342 info->fix = efifb_fix;
340 info->flags = FBINFO_FLAG_DEFAULT; 343 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE;
341 344
342 if ((err = fb_alloc_cmap(&info->cmap, 256, 0)) < 0) { 345 if ((err = fb_alloc_cmap(&info->cmap, 256, 0)) < 0) {
343 printk(KERN_ERR "efifb: cannot allocate colormap\n"); 346 printk(KERN_ERR "efifb: cannot allocate colormap\n");
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index d412a1ddc12f..f8a09bf8d0cd 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1462,6 +1462,16 @@ static int fb_check_foreignness(struct fb_info *fi)
1462 return 0; 1462 return 0;
1463} 1463}
1464 1464
1465static bool fb_do_apertures_overlap(struct fb_info *gen, struct fb_info *hw)
1466{
1467 /* is the generic aperture base the same as the HW one */
1468 if (gen->aperture_base == hw->aperture_base)
1469 return true;
1470 /* is the generic aperture base inside the hw base->hw base+size */
1471 if (gen->aperture_base > hw->aperture_base && gen->aperture_base <= hw->aperture_base + hw->aperture_size)
1472 return true;
1473 return false;
1474}
1465/** 1475/**
1466 * register_framebuffer - registers a frame buffer device 1476 * register_framebuffer - registers a frame buffer device
1467 * @fb_info: frame buffer info structure 1477 * @fb_info: frame buffer info structure
@@ -1485,6 +1495,23 @@ register_framebuffer(struct fb_info *fb_info)
1485 if (fb_check_foreignness(fb_info)) 1495 if (fb_check_foreignness(fb_info))
1486 return -ENOSYS; 1496 return -ENOSYS;
1487 1497
1498 /* check all firmware fbs and kick off if the base addr overlaps */
1499 for (i = 0 ; i < FB_MAX; i++) {
1500 if (!registered_fb[i])
1501 continue;
1502
1503 if (registered_fb[i]->flags & FBINFO_MISC_FIRMWARE) {
1504 if (fb_do_apertures_overlap(registered_fb[i], fb_info)) {
1505 printk(KERN_ERR "fb: conflicting fb hw usage "
1506 "%s vs %s - removing generic driver\n",
1507 fb_info->fix.id,
1508 registered_fb[i]->fix.id);
1509 unregister_framebuffer(registered_fb[i]);
1510 break;
1511 }
1512 }
1513 }
1514
1488 num_registered_fb++; 1515 num_registered_fb++;
1489 for (i = 0 ; i < FB_MAX; i++) 1516 for (i = 0 ; i < FB_MAX; i++)
1490 if (!registered_fb[i]) 1517 if (!registered_fb[i])
@@ -1586,6 +1613,10 @@ unregister_framebuffer(struct fb_info *fb_info)
1586 device_destroy(fb_class, MKDEV(FB_MAJOR, i)); 1613 device_destroy(fb_class, MKDEV(FB_MAJOR, i));
1587 event.info = fb_info; 1614 event.info = fb_info;
1588 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); 1615 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
1616
1617 /* this may free fb info */
1618 if (fb_info->fbops->fb_destroy)
1619 fb_info->fbops->fb_destroy(fb_info);
1589done: 1620done:
1590 return ret; 1621 return ret;
1591} 1622}
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c
index 3a81060137a2..15d200109446 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/igafb.c
@@ -395,17 +395,16 @@ int __init igafb_init(void)
395 /* We leak a reference here but as it cannot be unloaded this is 395 /* We leak a reference here but as it cannot be unloaded this is
396 fine. If you write unload code remember to free it in unload */ 396 fine. If you write unload code remember to free it in unload */
397 397
398 size = sizeof(struct fb_info) + sizeof(struct iga_par) + sizeof(u32)*16; 398 size = sizeof(struct iga_par) + sizeof(u32)*16;
399 399
400 info = kzalloc(size, GFP_ATOMIC); 400 info = framebuffer_alloc(size, &pdev->dev);
401 if (!info) { 401 if (!info) {
402 printk("igafb_init: can't alloc fb_info\n"); 402 printk("igafb_init: can't alloc fb_info\n");
403 pci_dev_put(pdev); 403 pci_dev_put(pdev);
404 return -ENOMEM; 404 return -ENOMEM;
405 } 405 }
406 406
407 par = (struct iga_par *) (info + 1); 407 par = info->par;
408
409 408
410 if ((addr = pdev->resource[0].start) == 0) { 409 if ((addr = pdev->resource[0].start) == 0) {
411 printk("igafb_init: no memory start\n"); 410 printk("igafb_init: no memory start\n");
@@ -526,7 +525,6 @@ int __init igafb_init(void)
526 info->var = default_var; 525 info->var = default_var;
527 info->fix = igafb_fix; 526 info->fix = igafb_fix;
528 info->pseudo_palette = (void *)(par + 1); 527 info->pseudo_palette = (void *)(par + 1);
529 info->device = &pdev->dev;
530 528
531 if (!iga_init(info, par)) { 529 if (!iga_init(info, par)) {
532 iounmap((void *)par->io_base); 530 iounmap((void *)par->io_base);
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index ace14fe02fc4..0cafd642fbc0 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -1365,6 +1365,11 @@ static int intelfb_set_par(struct fb_info *info)
1365 DBG_MSG("intelfb_set_par (%dx%d-%d)\n", info->var.xres, 1365 DBG_MSG("intelfb_set_par (%dx%d-%d)\n", info->var.xres,
1366 info->var.yres, info->var.bits_per_pixel); 1366 info->var.yres, info->var.bits_per_pixel);
1367 1367
1368 /*
1369 * Disable VCO prior to timing register change.
1370 */
1371 OUTREG(DPLL_A, INREG(DPLL_A) & ~DPLL_VCO_ENABLE);
1372
1368 intelfb_blank(FB_BLANK_POWERDOWN, info); 1373 intelfb_blank(FB_BLANK_POWERDOWN, info);
1369 1374
1370 if (ACCEL(dinfo, info)) 1375 if (ACCEL(dinfo, info))
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index b91251d1fe41..3b437813584c 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -37,22 +37,24 @@ extra-y += $(call logo-cfiles,_clut224,ppm)
37# Gray 256 37# Gray 256
38extra-y += $(call logo-cfiles,_gray256,pgm) 38extra-y += $(call logo-cfiles,_gray256,pgm)
39 39
40pnmtologo := scripts/pnmtologo
41
40# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..." 42# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..."
41quiet_cmd_logo = LOGO $@ 43quiet_cmd_logo = LOGO $@
42 cmd_logo = scripts/pnmtologo \ 44 cmd_logo = $(pnmtologo) \
43 -t $(patsubst $*_%,%,$(notdir $(basename $<))) \ 45 -t $(patsubst $*_%,%,$(notdir $(basename $<))) \
44 -n $(notdir $(basename $<)) -o $@ $< 46 -n $(notdir $(basename $<)) -o $@ $<
45 47
46$(obj)/%_mono.c: $(src)/%_mono.pbm FORCE 48$(obj)/%_mono.c: $(src)/%_mono.pbm $(pnmtologo) FORCE
47 $(call if_changed,logo) 49 $(call if_changed,logo)
48 50
49$(obj)/%_vga16.c: $(src)/%_vga16.ppm FORCE 51$(obj)/%_vga16.c: $(src)/%_vga16.ppm $(pnmtologo) FORCE
50 $(call if_changed,logo) 52 $(call if_changed,logo)
51 53
52$(obj)/%_clut224.c: $(src)/%_clut224.ppm FORCE 54$(obj)/%_clut224.c: $(src)/%_clut224.ppm $(pnmtologo) FORCE
53 $(call if_changed,logo) 55 $(call if_changed,logo)
54 56
55$(obj)/%_gray256.c: $(src)/%_gray256.pgm FORCE 57$(obj)/%_gray256.c: $(src)/%_gray256.pgm $(pnmtologo) FORCE
56 $(call if_changed,logo) 58 $(call if_changed,logo)
57 59
58# Files generated that shall be removed upon make clean 60# Files generated that shall be removed upon make clean
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index 2e85a2b52d05..ea7a8ccc830c 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -21,21 +21,6 @@
21#include <asm/bootinfo.h> 21#include <asm/bootinfo.h>
22#endif 22#endif
23 23
24extern const struct linux_logo logo_linux_mono;
25extern const struct linux_logo logo_linux_vga16;
26extern const struct linux_logo logo_linux_clut224;
27extern const struct linux_logo logo_blackfin_vga16;
28extern const struct linux_logo logo_blackfin_clut224;
29extern const struct linux_logo logo_dec_clut224;
30extern const struct linux_logo logo_mac_clut224;
31extern const struct linux_logo logo_parisc_clut224;
32extern const struct linux_logo logo_sgi_clut224;
33extern const struct linux_logo logo_sun_clut224;
34extern const struct linux_logo logo_superh_mono;
35extern const struct linux_logo logo_superh_vga16;
36extern const struct linux_logo logo_superh_clut224;
37extern const struct linux_logo logo_m32r_clut224;
38
39static int nologo; 24static int nologo;
40module_param(nologo, bool, 0); 25module_param(nologo, bool, 0);
41MODULE_PARM_DESC(nologo, "Disables startup logo"); 26MODULE_PARM_DESC(nologo, "Disables startup logo");
diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfb.c
index fb64234a3825..a28e3cfbbf70 100644
--- a/drivers/video/mb862xx/mb862xxfb.c
+++ b/drivers/video/mb862xx/mb862xxfb.c
@@ -19,7 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#if defined(CONFIG_PPC_OF) 22#if defined(CONFIG_OF)
23#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#endif 24#endif
25#include "mb862xxfb.h" 25#include "mb862xxfb.h"
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index 16186240c5f2..34e4e7995169 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -264,6 +264,14 @@ static const struct fb_videomode modedb[] = {
264 /* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */ 264 /* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */
265 NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3, 265 NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3,
266 0, FB_VMODE_NONINTERLACED 266 0, FB_VMODE_NONINTERLACED
267 }, {
268 /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
269 NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5,
270 0, FB_VMODE_INTERLACED
271 }, {
272 /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
273 NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5,
274 0, FB_VMODE_INTERLACED
267 }, 275 },
268}; 276};
269 277
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index e1d9eeb1aeaf..4d8c54c23dd7 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -378,7 +378,6 @@ static void __init offb_init_fb(const char *name, const char *full_name,
378 struct fb_fix_screeninfo *fix; 378 struct fb_fix_screeninfo *fix;
379 struct fb_var_screeninfo *var; 379 struct fb_var_screeninfo *var;
380 struct fb_info *info; 380 struct fb_info *info;
381 int size;
382 381
383 if (!request_mem_region(res_start, res_size, "offb")) 382 if (!request_mem_region(res_start, res_size, "offb"))
384 return; 383 return;
@@ -393,15 +392,12 @@ static void __init offb_init_fb(const char *name, const char *full_name,
393 return; 392 return;
394 } 393 }
395 394
396 size = sizeof(struct fb_info) + sizeof(u32) * 16; 395 info = framebuffer_alloc(sizeof(u32) * 16, NULL);
397
398 info = kmalloc(size, GFP_ATOMIC);
399 396
400 if (info == 0) { 397 if (info == 0) {
401 release_mem_region(res_start, res_size); 398 release_mem_region(res_start, res_size);
402 return; 399 return;
403 } 400 }
404 memset(info, 0, size);
405 401
406 fix = &info->fix; 402 fix = &info->fix;
407 var = &info->var; 403 var = &info->var;
@@ -497,7 +493,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
497 iounmap(par->cmap_adr); 493 iounmap(par->cmap_adr);
498 par->cmap_adr = NULL; 494 par->cmap_adr = NULL;
499 iounmap(info->screen_base); 495 iounmap(info->screen_base);
500 kfree(info); 496 framebuffer_release(info);
501 release_mem_region(res_start, res_size); 497 release_mem_region(res_start, res_size);
502 return; 498 return;
503 } 499 }
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index c6dd924976a4..36436ee6c1a4 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -1748,7 +1748,7 @@ static void __devexit pm2fb_remove(struct pci_dev *pdev)
1748 pci_set_drvdata(pdev, NULL); 1748 pci_set_drvdata(pdev, NULL);
1749 fb_dealloc_cmap(&info->cmap); 1749 fb_dealloc_cmap(&info->cmap);
1750 kfree(info->pixmap.addr); 1750 kfree(info->pixmap.addr);
1751 kfree(info); 1751 framebuffer_release(info);
1752} 1752}
1753 1753
1754static struct pci_device_id pm2fb_id_table[] = { 1754static struct pci_device_id pm2fb_id_table[] = {
diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
index 0726aecf3b7e..0deb0a8867b7 100644
--- a/drivers/video/s1d13xxxfb.c
+++ b/drivers/video/s1d13xxxfb.c
@@ -2,6 +2,7 @@
2 * 2 *
3 * (c) 2004 Simtec Electronics 3 * (c) 2004 Simtec Electronics
4 * (c) 2005 Thibaut VARENE <varenet@parisc-linux.org> 4 * (c) 2005 Thibaut VARENE <varenet@parisc-linux.org>
5 * (c) 2009 Kristoffer Ericson <kristoffer.ericson@gmail.com>
5 * 6 *
6 * Driver for Epson S1D13xxx series framebuffer chips 7 * Driver for Epson S1D13xxx series framebuffer chips
7 * 8 *
@@ -10,18 +11,10 @@
10 * linux/drivers/video/epson1355fb.c 11 * linux/drivers/video/epson1355fb.c
11 * linux/drivers/video/epson/s1d13xxxfb.c (2.4 driver by Epson) 12 * linux/drivers/video/epson/s1d13xxxfb.c (2.4 driver by Epson)
12 * 13 *
13 * Note, currently only tested on S1D13806 with 16bit CRT.
14 * As such, this driver might still contain some hardcoded bits relating to
15 * S1D13806.
16 * Making it work on other S1D13XXX chips should merely be a matter of adding
17 * a few switch()s, some missing glue here and there maybe, and split header
18 * files.
19 *
20 * TODO: - handle dual screen display (CRT and LCD at the same time). 14 * TODO: - handle dual screen display (CRT and LCD at the same time).
21 * - check_var(), mode change, etc. 15 * - check_var(), mode change, etc.
22 * - PM untested. 16 * - probably not SMP safe :)
23 * - Accelerated interfaces. 17 * - support all bitblt operations on all cards
24 * - Probably not SMP safe :)
25 * 18 *
26 * This file is subject to the terms and conditions of the GNU General Public 19 * This file is subject to the terms and conditions of the GNU General Public
27 * License. See the file COPYING in the main directory of this archive for 20 * License. See the file COPYING in the main directory of this archive for
@@ -31,19 +24,24 @@
31#include <linux/module.h> 24#include <linux/module.h>
32#include <linux/platform_device.h> 25#include <linux/platform_device.h>
33#include <linux/delay.h> 26#include <linux/delay.h>
34
35#include <linux/types.h> 27#include <linux/types.h>
36#include <linux/errno.h> 28#include <linux/errno.h>
37#include <linux/mm.h> 29#include <linux/mm.h>
38#include <linux/mman.h> 30#include <linux/mman.h>
39#include <linux/fb.h> 31#include <linux/fb.h>
32#include <linux/spinlock_types.h>
33#include <linux/spinlock.h>
40 34
41#include <asm/io.h> 35#include <asm/io.h>
42 36
43#include <video/s1d13xxxfb.h> 37#include <video/s1d13xxxfb.h>
44 38
45#define PFX "s1d13xxxfb: " 39#define PFX "s1d13xxxfb: "
40#define BLIT "s1d13xxxfb_bitblt: "
46 41
42/*
43 * set this to enable debugging on general functions
44 */
47#if 0 45#if 0
48#define dbg(fmt, args...) do { printk(KERN_INFO fmt, ## args); } while(0) 46#define dbg(fmt, args...) do { printk(KERN_INFO fmt, ## args); } while(0)
49#else 47#else
@@ -51,7 +49,21 @@
51#endif 49#endif
52 50
53/* 51/*
54 * List of card production ids 52 * set this to enable debugging on 2D acceleration
53 */
54#if 0
55#define dbg_blit(fmt, args...) do { printk(KERN_INFO BLIT fmt, ## args); } while (0)
56#else
57#define dbg_blit(fmt, args...) do { } while (0)
58#endif
59
60/*
61 * we make sure only one bitblt operation is running
62 */
63static DEFINE_SPINLOCK(s1d13xxxfb_bitblt_lock);
64
65/*
66 * list of card production ids
55 */ 67 */
56static const int s1d13xxxfb_prod_ids[] = { 68static const int s1d13xxxfb_prod_ids[] = {
57 S1D13505_PROD_ID, 69 S1D13505_PROD_ID,
@@ -69,7 +81,7 @@ static const char *s1d13xxxfb_prod_names[] = {
69}; 81};
70 82
71/* 83/*
72 * Here we define the default struct fb_fix_screeninfo 84 * here we define the default struct fb_fix_screeninfo
73 */ 85 */
74static struct fb_fix_screeninfo __devinitdata s1d13xxxfb_fix = { 86static struct fb_fix_screeninfo __devinitdata s1d13xxxfb_fix = {
75 .id = S1D_FBID, 87 .id = S1D_FBID,
@@ -145,8 +157,10 @@ crt_enable(struct s1d13xxxfb_par *par, int enable)
145 s1d13xxxfb_writereg(par, S1DREG_COM_DISP_MODE, mode); 157 s1d13xxxfb_writereg(par, S1DREG_COM_DISP_MODE, mode);
146} 158}
147 159
148/* framebuffer control routines */
149 160
161/*************************************************************
162 framebuffer control functions
163 *************************************************************/
150static inline void 164static inline void
151s1d13xxxfb_setup_pseudocolour(struct fb_info *info) 165s1d13xxxfb_setup_pseudocolour(struct fb_info *info)
152{ 166{
@@ -242,13 +256,13 @@ s1d13xxxfb_set_par(struct fb_info *info)
242} 256}
243 257
244/** 258/**
245 * s1d13xxxfb_setcolreg - sets a color register. 259 * s1d13xxxfb_setcolreg - sets a color register.
246 * @regno: Which register in the CLUT we are programming 260 * @regno: Which register in the CLUT we are programming
247 * @red: The red value which can be up to 16 bits wide 261 * @red: The red value which can be up to 16 bits wide
248 * @green: The green value which can be up to 16 bits wide 262 * @green: The green value which can be up to 16 bits wide
249 * @blue: The blue value which can be up to 16 bits wide. 263 * @blue: The blue value which can be up to 16 bits wide.
250 * @transp: If supported the alpha value which can be up to 16 bits wide. 264 * @transp: If supported the alpha value which can be up to 16 bits wide.
251 * @info: frame buffer info structure 265 * @info: frame buffer info structure
252 * 266 *
253 * Returns negative errno on error, or zero on success. 267 * Returns negative errno on error, or zero on success.
254 */ 268 */
@@ -351,15 +365,15 @@ s1d13xxxfb_blank(int blank_mode, struct fb_info *info)
351} 365}
352 366
353/** 367/**
354 * s1d13xxxfb_pan_display - Pans the display. 368 * s1d13xxxfb_pan_display - Pans the display.
355 * @var: frame buffer variable screen structure 369 * @var: frame buffer variable screen structure
356 * @info: frame buffer structure that represents a single frame buffer 370 * @info: frame buffer structure that represents a single frame buffer
357 * 371 *
358 * Pan (or wrap, depending on the `vmode' field) the display using the 372 * Pan (or wrap, depending on the `vmode' field) the display using the
359 * `yoffset' field of the `var' structure (`xoffset' not yet supported). 373 * `yoffset' field of the `var' structure (`xoffset' not yet supported).
360 * If the values don't fit, return -EINVAL. 374 * If the values don't fit, return -EINVAL.
361 * 375 *
362 * Returns negative errno on error, or zero on success. 376 * Returns negative errno on error, or zero on success.
363 */ 377 */
364static int 378static int
365s1d13xxxfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) 379s1d13xxxfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
@@ -390,8 +404,259 @@ s1d13xxxfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
390 return 0; 404 return 0;
391} 405}
392 406
393/* framebuffer information structures */ 407/************************************************************
408 functions to handle bitblt acceleration
409 ************************************************************/
410
411/**
412 * bltbit_wait_bitset - waits for change in register value
413 * @info : framebuffer structure
414 * @bit : value expected in register
415 * @timeout : ...
416 *
417 * waits until value changes INTO bit
418 */
419static u8
420bltbit_wait_bitset(struct fb_info *info, u8 bit, int timeout)
421{
422 while (!(s1d13xxxfb_readreg(info->par, S1DREG_BBLT_CTL0) & bit)) {
423 udelay(10);
424 if (!--timeout) {
425 dbg_blit("wait_bitset timeout\n");
426 break;
427 }
428 }
429
430 return timeout;
431}
432
433/**
434 * bltbit_wait_bitclear - waits for change in register value
435 * @info : frambuffer structure
436 * @bit : value currently in register
437 * @timeout : ...
438 *
439 * waits until value changes FROM bit
440 *
441 */
442static u8
443bltbit_wait_bitclear(struct fb_info *info, u8 bit, int timeout)
444{
445 while (s1d13xxxfb_readreg(info->par, S1DREG_BBLT_CTL0) & bit) {
446 udelay(10);
447 if (!--timeout) {
448 dbg_blit("wait_bitclear timeout\n");
449 break;
450 }
451 }
452
453 return timeout;
454}
455
456/**
457 * bltbit_fifo_status - checks the current status of the fifo
458 * @info : framebuffer structure
459 *
460 * returns number of free words in buffer
461 */
462static u8
463bltbit_fifo_status(struct fb_info *info)
464{
465 u8 status;
394 466
467 status = s1d13xxxfb_readreg(info->par, S1DREG_BBLT_CTL0);
468
469 /* its empty so room for 16 words */
470 if (status & BBLT_FIFO_EMPTY)
471 return 16;
472
473 /* its full so we dont want to add */
474 if (status & BBLT_FIFO_FULL)
475 return 0;
476
477 /* its atleast half full but we can add one atleast */
478 if (status & BBLT_FIFO_NOT_FULL)
479 return 1;
480
481 return 0;
482}
483
484/*
485 * s1d13xxxfb_bitblt_copyarea - accelerated copyarea function
486 * @info : framebuffer structure
487 * @area : fb_copyarea structure
488 *
489 * supports (atleast) S1D13506
490 *
491 */
492static void
493s1d13xxxfb_bitblt_copyarea(struct fb_info *info, const struct fb_copyarea *area)
494{
495 u32 dst, src;
496 u32 stride;
497 u16 reverse = 0;
498 u16 sx = area->sx, sy = area->sy;
499 u16 dx = area->dx, dy = area->dy;
500 u16 width = area->width, height = area->height;
501 u16 bpp;
502
503 spin_lock(&s1d13xxxfb_bitblt_lock);
504
505 /* bytes per xres line */
506 bpp = (info->var.bits_per_pixel >> 3);
507 stride = bpp * info->var.xres;
508
509 /* reverse, calculate the last pixel in rectangle */
510 if ((dy > sy) || ((dy == sy) && (dx >= sx))) {
511 dst = (((dy + height - 1) * stride) + (bpp * (dx + width - 1)));
512 src = (((sy + height - 1) * stride) + (bpp * (sx + width - 1)));
513 reverse = 1;
514 /* not reverse, calculate the first pixel in rectangle */
515 } else { /* (y * xres) + (bpp * x) */
516 dst = (dy * stride) + (bpp * dx);
517 src = (sy * stride) + (bpp * sx);
518 }
519
520 /* set source adress */
521 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START0, (src & 0xff));
522 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START1, (src >> 8) & 0x00ff);
523 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START2, (src >> 16) & 0x00ff);
524
525 /* set destination adress */
526 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START0, (dst & 0xff));
527 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START1, (dst >> 8) & 0x00ff);
528 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START2, (dst >> 16) & 0x00ff);
529
530 /* program height and width */
531 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH0, (width & 0xff) - 1);
532 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH1, (width >> 8));
533
534 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT0, (height & 0xff) - 1);
535 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT1, (height >> 8));
536
537 /* negative direction ROP */
538 if (reverse == 1) {
539 dbg_blit("(copyarea) negative rop\n");
540 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, 0x03);
541 } else /* positive direction ROP */ {
542 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, 0x02);
543 dbg_blit("(copyarea) positive rop\n");
544 }
545
546 /* set for rectangel mode and not linear */
547 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x0);
548
549 /* setup the bpp 1 = 16bpp, 0 = 8bpp*/
550 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL1, (bpp >> 1));
551
552 /* set words per xres */
553 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF0, (stride >> 1) & 0xff);
554 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF1, (stride >> 9));
555
556 dbg_blit("(copyarea) dx=%d, dy=%d\n", dx, dy);
557 dbg_blit("(copyarea) sx=%d, sy=%d\n", sx, sy);
558 dbg_blit("(copyarea) width=%d, height=%d\n", width - 1, height - 1);
559 dbg_blit("(copyarea) stride=%d\n", stride);
560 dbg_blit("(copyarea) bpp=%d=0x0%d, mem_offset1=%d, mem_offset2=%d\n", bpp, (bpp >> 1),
561 (stride >> 1) & 0xff, stride >> 9);
562
563 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CC_EXP, 0x0c);
564
565 /* initialize the engine */
566 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x80);
567
568 /* wait to complete */
569 bltbit_wait_bitclear(info, 0x80, 8000);
570
571 spin_unlock(&s1d13xxxfb_bitblt_lock);
572}
573
574/**
575 *
576 * s1d13xxxfb_bitblt_solidfill - accelerated solidfill function
577 * @info : framebuffer structure
578 * @rect : fb_fillrect structure
579 *
580 * supports (atleast 13506)
581 *
582 **/
583static void
584s1d13xxxfb_bitblt_solidfill(struct fb_info *info, const struct fb_fillrect *rect)
585{
586 u32 screen_stride, dest;
587 u32 fg;
588 u16 bpp = (info->var.bits_per_pixel >> 3);
589
590 /* grab spinlock */
591 spin_lock(&s1d13xxxfb_bitblt_lock);
592
593 /* bytes per x width */
594 screen_stride = (bpp * info->var.xres);
595
596 /* bytes to starting point */
597 dest = ((rect->dy * screen_stride) + (bpp * rect->dx));
598
599 dbg_blit("(solidfill) dx=%d, dy=%d, stride=%d, dest=%d\n"
600 "(solidfill) : rect_width=%d, rect_height=%d\n",
601 rect->dx, rect->dy, screen_stride, dest,
602 rect->width - 1, rect->height - 1);
603
604 dbg_blit("(solidfill) : xres=%d, yres=%d, bpp=%d\n",
605 info->var.xres, info->var.yres,
606 info->var.bits_per_pixel);
607 dbg_blit("(solidfill) : rop=%d\n", rect->rop);
608
609 /* We split the destination into the three registers */
610 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START0, (dest & 0x00ff));
611 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START1, ((dest >> 8) & 0x00ff));
612 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START2, ((dest >> 16) & 0x00ff));
613
614 /* give information regarding rectangel width */
615 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH0, ((rect->width) & 0x00ff) - 1);
616 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH1, (rect->width >> 8));
617
618 /* give information regarding rectangel height */
619 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT0, ((rect->height) & 0x00ff) - 1);
620 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT1, (rect->height >> 8));
621
622 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
623 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
624 fg = ((u32 *)info->pseudo_palette)[rect->color];
625 dbg_blit("(solidfill) truecolor/directcolor\n");
626 dbg_blit("(solidfill) pseudo_palette[%d] = %d\n", rect->color, fg);
627 } else {
628 fg = rect->color;
629 dbg_blit("(solidfill) color = %d\n", rect->color);
630 }
631
632 /* set foreground color */
633 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_FGC0, (fg & 0xff));
634 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_FGC1, (fg >> 8) & 0xff);
635
636 /* set rectangual region of memory (rectangle and not linear) */
637 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x0);
638
639 /* set operation mode SOLID_FILL */
640 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, BBLT_SOLID_FILL);
641
642 /* set bits per pixel (1 = 16bpp, 0 = 8bpp) */
643 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL1, (info->var.bits_per_pixel >> 4));
644
645 /* set the memory offset for the bblt in word sizes */
646 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF0, (screen_stride >> 1) & 0x00ff);
647 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF1, (screen_stride >> 9));
648
649 /* and away we go.... */
650 s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x80);
651
652 /* wait until its done */
653 bltbit_wait_bitclear(info, 0x80, 8000);
654
655 /* let others play */
656 spin_unlock(&s1d13xxxfb_bitblt_lock);
657}
658
659/* framebuffer information structures */
395static struct fb_ops s1d13xxxfb_fbops = { 660static struct fb_ops s1d13xxxfb_fbops = {
396 .owner = THIS_MODULE, 661 .owner = THIS_MODULE,
397 .fb_set_par = s1d13xxxfb_set_par, 662 .fb_set_par = s1d13xxxfb_set_par,
@@ -400,7 +665,7 @@ static struct fb_ops s1d13xxxfb_fbops = {
400 665
401 .fb_pan_display = s1d13xxxfb_pan_display, 666 .fb_pan_display = s1d13xxxfb_pan_display,
402 667
403 /* to be replaced by any acceleration we can */ 668 /* gets replaced at chip detection time */
404 .fb_fillrect = cfb_fillrect, 669 .fb_fillrect = cfb_fillrect,
405 .fb_copyarea = cfb_copyarea, 670 .fb_copyarea = cfb_copyarea,
406 .fb_imageblit = cfb_imageblit, 671 .fb_imageblit = cfb_imageblit,
@@ -412,9 +677,9 @@ static int s1d13xxxfb_width_tab[2][4] __devinitdata = {
412}; 677};
413 678
414/** 679/**
415 * s1d13xxxfb_fetch_hw_state - Configure the framebuffer according to 680 * s1d13xxxfb_fetch_hw_state - Configure the framebuffer according to
416 * hardware setup. 681 * hardware setup.
417 * @info: frame buffer structure 682 * @info: frame buffer structure
418 * 683 *
419 * We setup the framebuffer structures according to the current 684 * We setup the framebuffer structures according to the current
420 * hardware setup. On some machines, the BIOS will have filled 685 * hardware setup. On some machines, the BIOS will have filled
@@ -569,7 +834,6 @@ s1d13xxxfb_probe(struct platform_device *pdev)
569 if (pdata && pdata->platform_init_video) 834 if (pdata && pdata->platform_init_video)
570 pdata->platform_init_video(); 835 pdata->platform_init_video();
571 836
572
573 if (pdev->num_resources != 2) { 837 if (pdev->num_resources != 2) {
574 dev_err(&pdev->dev, "invalid num_resources: %i\n", 838 dev_err(&pdev->dev, "invalid num_resources: %i\n",
575 pdev->num_resources); 839 pdev->num_resources);
@@ -655,16 +919,27 @@ s1d13xxxfb_probe(struct platform_device *pdev)
655 919
656 info->fix = s1d13xxxfb_fix; 920 info->fix = s1d13xxxfb_fix;
657 info->fix.mmio_start = pdev->resource[1].start; 921 info->fix.mmio_start = pdev->resource[1].start;
658 info->fix.mmio_len = pdev->resource[1].end - pdev->resource[1].start +1; 922 info->fix.mmio_len = pdev->resource[1].end - pdev->resource[1].start + 1;
659 info->fix.smem_start = pdev->resource[0].start; 923 info->fix.smem_start = pdev->resource[0].start;
660 info->fix.smem_len = pdev->resource[0].end - pdev->resource[0].start +1; 924 info->fix.smem_len = pdev->resource[0].end - pdev->resource[0].start + 1;
661 925
662 printk(KERN_INFO PFX "regs mapped at 0x%p, fb %d KiB mapped at 0x%p\n", 926 printk(KERN_INFO PFX "regs mapped at 0x%p, fb %d KiB mapped at 0x%p\n",
663 default_par->regs, info->fix.smem_len / 1024, info->screen_base); 927 default_par->regs, info->fix.smem_len / 1024, info->screen_base);
664 928
665 info->par = default_par; 929 info->par = default_par;
666 info->fbops = &s1d13xxxfb_fbops;
667 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; 930 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
931 info->fbops = &s1d13xxxfb_fbops;
932
933 switch(prod_id) {
934 case S1D13506_PROD_ID: /* activate acceleration */
935 s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
936 s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
937 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
938 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
939 break;
940 default:
941 break;
942 }
668 943
669 /* perform "manual" chip initialization, if needed */ 944 /* perform "manual" chip initialization, if needed */
670 if (pdata && pdata->initregs) 945 if (pdata && pdata->initregs)
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index d3a568e6b169..43680e545427 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -358,9 +358,16 @@ static int s3c_fb_set_par(struct fb_info *info)
358 writel(data, regs + VIDOSD_B(win_no)); 358 writel(data, regs + VIDOSD_B(win_no));
359 359
360 data = var->xres * var->yres; 360 data = var->xres * var->yres;
361
362 u32 osdc_data = 0;
363
364 osdc_data = VIDISD14C_ALPHA1_R(0xf) |
365 VIDISD14C_ALPHA1_G(0xf) |
366 VIDISD14C_ALPHA1_B(0xf);
367
361 if (s3c_fb_has_osd_d(win_no)) { 368 if (s3c_fb_has_osd_d(win_no)) {
362 writel(data, regs + VIDOSD_D(win_no)); 369 writel(data, regs + VIDOSD_D(win_no));
363 writel(0, regs + VIDOSD_C(win_no)); 370 writel(osdc_data, regs + VIDOSD_C(win_no));
364 } else 371 } else
365 writel(data, regs + VIDOSD_C(win_no)); 372 writel(data, regs + VIDOSD_C(win_no));
366 373
@@ -409,8 +416,12 @@ static int s3c_fb_set_par(struct fb_info *info)
409 data |= WINCON1_BPPMODE_19BPP_A1666; 416 data |= WINCON1_BPPMODE_19BPP_A1666;
410 else 417 else
411 data |= WINCON1_BPPMODE_18BPP_666; 418 data |= WINCON1_BPPMODE_18BPP_666;
412 } else if (var->transp.length != 0) 419 } else if (var->transp.length == 1)
413 data |= WINCON1_BPPMODE_25BPP_A1888; 420 data |= WINCON1_BPPMODE_25BPP_A1888
421 | WINCON1_BLD_PIX;
422 else if (var->transp.length == 4)
423 data |= WINCON1_BPPMODE_28BPP_A4888
424 | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
414 else 425 else
415 data |= WINCON0_BPPMODE_24BPP_888; 426 data |= WINCON0_BPPMODE_24BPP_888;
416 427
@@ -418,6 +429,20 @@ static int s3c_fb_set_par(struct fb_info *info)
418 break; 429 break;
419 } 430 }
420 431
432 /* It has no color key control register for window0 */
433 if (win_no > 0) {
434 u32 keycon0_data = 0, keycon1_data = 0;
435
436 keycon0_data = ~(WxKEYCON0_KEYBL_EN |
437 WxKEYCON0_KEYEN_F |
438 WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
439
440 keycon1_data = WxKEYCON1_COLVAL(0xffffff);
441
442 writel(keycon0_data, regs + WxKEYCONy(win_no-1, 0));
443 writel(keycon1_data, regs + WxKEYCONy(win_no-1, 1));
444 }
445
421 writel(data, regs + WINCON(win_no)); 446 writel(data, regs + WINCON(win_no));
422 writel(0x0, regs + WINxMAP(win_no)); 447 writel(0x0, regs + WINxMAP(win_no));
423 448
@@ -700,9 +725,12 @@ static void s3c_fb_free_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
700 */ 725 */
701static void s3c_fb_release_win(struct s3c_fb *sfb, struct s3c_fb_win *win) 726static void s3c_fb_release_win(struct s3c_fb *sfb, struct s3c_fb_win *win)
702{ 727{
703 fb_dealloc_cmap(&win->fbinfo->cmap); 728 if (win->fbinfo) {
704 unregister_framebuffer(win->fbinfo); 729 unregister_framebuffer(win->fbinfo);
705 s3c_fb_free_memory(sfb, win); 730 fb_dealloc_cmap(&win->fbinfo->cmap);
731 s3c_fb_free_memory(sfb, win);
732 framebuffer_release(win->fbinfo);
733 }
706} 734}
707 735
708/** 736/**
@@ -753,7 +781,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
753 ret = s3c_fb_alloc_memory(sfb, win); 781 ret = s3c_fb_alloc_memory(sfb, win);
754 if (ret) { 782 if (ret) {
755 dev_err(sfb->dev, "failed to allocate display memory\n"); 783 dev_err(sfb->dev, "failed to allocate display memory\n");
756 goto err_framebuffer; 784 return ret;
757 } 785 }
758 786
759 /* setup the r/b/g positions for the window's palette */ 787 /* setup the r/b/g positions for the window's palette */
@@ -776,7 +804,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
776 ret = s3c_fb_check_var(&fbinfo->var, fbinfo); 804 ret = s3c_fb_check_var(&fbinfo->var, fbinfo);
777 if (ret < 0) { 805 if (ret < 0) {
778 dev_err(sfb->dev, "check_var failed on initial video params\n"); 806 dev_err(sfb->dev, "check_var failed on initial video params\n");
779 goto err_alloc_mem; 807 return ret;
780 } 808 }
781 809
782 /* create initial colour map */ 810 /* create initial colour map */
@@ -796,20 +824,13 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
796 ret = register_framebuffer(fbinfo); 824 ret = register_framebuffer(fbinfo);
797 if (ret < 0) { 825 if (ret < 0) {
798 dev_err(sfb->dev, "failed to register framebuffer\n"); 826 dev_err(sfb->dev, "failed to register framebuffer\n");
799 goto err_alloc_mem; 827 return ret;
800 } 828 }
801 829
802 *res = win; 830 *res = win;
803 dev_info(sfb->dev, "window %d: fb %s\n", win_no, fbinfo->fix.id); 831 dev_info(sfb->dev, "window %d: fb %s\n", win_no, fbinfo->fix.id);
804 832
805 return 0; 833 return 0;
806
807err_alloc_mem:
808 s3c_fb_free_memory(sfb, win);
809
810err_framebuffer:
811 unregister_framebuffer(fbinfo);
812 return ret;
813} 834}
814 835
815/** 836/**
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index b0b4513ba537..7da0027e2409 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -24,6 +24,7 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/cpufreq.h>
27 28
28#include <asm/io.h> 29#include <asm/io.h>
29#include <asm/div64.h> 30#include <asm/div64.h>
@@ -89,7 +90,7 @@ static void s3c2410fb_set_lcdaddr(struct fb_info *info)
89static unsigned int s3c2410fb_calc_pixclk(struct s3c2410fb_info *fbi, 90static unsigned int s3c2410fb_calc_pixclk(struct s3c2410fb_info *fbi,
90 unsigned long pixclk) 91 unsigned long pixclk)
91{ 92{
92 unsigned long clk = clk_get_rate(fbi->clk); 93 unsigned long clk = fbi->clk_rate;
93 unsigned long long div; 94 unsigned long long div;
94 95
95 /* pixclk is in picoseconds, our clock is in Hz 96 /* pixclk is in picoseconds, our clock is in Hz
@@ -758,6 +759,57 @@ static irqreturn_t s3c2410fb_irq(int irq, void *dev_id)
758 return IRQ_HANDLED; 759 return IRQ_HANDLED;
759} 760}
760 761
762#ifdef CONFIG_CPU_FREQ
763
764static int s3c2410fb_cpufreq_transition(struct notifier_block *nb,
765 unsigned long val, void *data)
766{
767 struct cpufreq_freqs *freqs = data;
768 struct s3c2410fb_info *info;
769 struct fb_info *fbinfo;
770 long delta_f;
771
772 info = container_of(nb, struct s3c2410fb_info, freq_transition);
773 fbinfo = platform_get_drvdata(to_platform_device(info->dev));
774
775 /* work out change, <0 for speed-up */
776 delta_f = info->clk_rate - clk_get_rate(info->clk);
777
778 if ((val == CPUFREQ_POSTCHANGE && delta_f > 0) ||
779 (val == CPUFREQ_PRECHANGE && delta_f < 0)) {
780 info->clk_rate = clk_get_rate(info->clk);
781 s3c2410fb_activate_var(fbinfo);
782 }
783
784 return 0;
785}
786
787static inline int s3c2410fb_cpufreq_register(struct s3c2410fb_info *info)
788{
789 info->freq_transition.notifier_call = s3c2410fb_cpufreq_transition;
790
791 return cpufreq_register_notifier(&info->freq_transition,
792 CPUFREQ_TRANSITION_NOTIFIER);
793}
794
795static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info)
796{
797 cpufreq_unregister_notifier(&info->freq_transition,
798 CPUFREQ_TRANSITION_NOTIFIER);
799}
800
801#else
802static inline int s3c2410fb_cpufreq_register(struct s3c2410fb_info *info)
803{
804 return 0;
805}
806
807static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info)
808{
809}
810#endif
811
812
761static char driver_name[] = "s3c2410fb"; 813static char driver_name[] = "s3c2410fb";
762 814
763static int __init s3c24xxfb_probe(struct platform_device *pdev, 815static int __init s3c24xxfb_probe(struct platform_device *pdev,
@@ -875,6 +927,8 @@ static int __init s3c24xxfb_probe(struct platform_device *pdev,
875 927
876 msleep(1); 928 msleep(1);
877 929
930 info->clk_rate = clk_get_rate(info->clk);
931
878 /* find maximum required memory size for display */ 932 /* find maximum required memory size for display */
879 for (i = 0; i < mach_info->num_displays; i++) { 933 for (i = 0; i < mach_info->num_displays; i++) {
880 unsigned long smem_len = mach_info->displays[i].xres; 934 unsigned long smem_len = mach_info->displays[i].xres;
@@ -904,11 +958,17 @@ static int __init s3c24xxfb_probe(struct platform_device *pdev,
904 958
905 s3c2410fb_check_var(&fbinfo->var, fbinfo); 959 s3c2410fb_check_var(&fbinfo->var, fbinfo);
906 960
961 ret = s3c2410fb_cpufreq_register(info);
962 if (ret < 0) {
963 dev_err(&pdev->dev, "Failed to register cpufreq\n");
964 goto free_video_memory;
965 }
966
907 ret = register_framebuffer(fbinfo); 967 ret = register_framebuffer(fbinfo);
908 if (ret < 0) { 968 if (ret < 0) {
909 printk(KERN_ERR "Failed to register framebuffer device: %d\n", 969 printk(KERN_ERR "Failed to register framebuffer device: %d\n",
910 ret); 970 ret);
911 goto free_video_memory; 971 goto free_cpufreq;
912 } 972 }
913 973
914 /* create device files */ 974 /* create device files */
@@ -922,6 +982,8 @@ static int __init s3c24xxfb_probe(struct platform_device *pdev,
922 982
923 return 0; 983 return 0;
924 984
985 free_cpufreq:
986 s3c2410fb_cpufreq_deregister(info);
925free_video_memory: 987free_video_memory:
926 s3c2410fb_unmap_video_memory(fbinfo); 988 s3c2410fb_unmap_video_memory(fbinfo);
927release_clock: 989release_clock:
@@ -961,6 +1023,7 @@ static int s3c2410fb_remove(struct platform_device *pdev)
961 int irq; 1023 int irq;
962 1024
963 unregister_framebuffer(fbinfo); 1025 unregister_framebuffer(fbinfo);
1026 s3c2410fb_cpufreq_deregister(info);
964 1027
965 s3c2410fb_lcd_enable(info, 0); 1028 s3c2410fb_lcd_enable(info, 0);
966 msleep(1); 1029 msleep(1);
diff --git a/drivers/video/s3c2410fb.h b/drivers/video/s3c2410fb.h
index 9a6ba3e9d1b8..47a17bd23011 100644
--- a/drivers/video/s3c2410fb.h
+++ b/drivers/video/s3c2410fb.h
@@ -29,8 +29,13 @@ struct s3c2410fb_info {
29 enum s3c_drv_type drv_type; 29 enum s3c_drv_type drv_type;
30 struct s3c2410fb_hw regs; 30 struct s3c2410fb_hw regs;
31 31
32 unsigned long clk_rate;
32 unsigned int palette_ready; 33 unsigned int palette_ready;
33 34
35#ifdef CONFIG_CPU_FREQ
36 struct notifier_block freq_transition;
37#endif
38
34 /* keep these registers in case we need to re-write palette */ 39 /* keep these registers in case we need to re-write palette */
35 u32 palette_buffer[256]; 40 u32 palette_buffer[256];
36 u32 pseudo_pal[16]; 41 u32 pseudo_pal[16];
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 7e17ee95a97a..7072d19080d5 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -5928,7 +5928,7 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5928 if(pci_enable_device(pdev)) { 5928 if(pci_enable_device(pdev)) {
5929 if(ivideo->nbridge) pci_dev_put(ivideo->nbridge); 5929 if(ivideo->nbridge) pci_dev_put(ivideo->nbridge);
5930 pci_set_drvdata(pdev, NULL); 5930 pci_set_drvdata(pdev, NULL);
5931 kfree(sis_fb_info); 5931 framebuffer_release(sis_fb_info);
5932 return -EIO; 5932 return -EIO;
5933 } 5933 }
5934 } 5934 }
@@ -6134,7 +6134,7 @@ error_3: vfree(ivideo->bios_abase);
6134 pci_set_drvdata(pdev, NULL); 6134 pci_set_drvdata(pdev, NULL);
6135 if(!ivideo->sisvga_enabled) 6135 if(!ivideo->sisvga_enabled)
6136 pci_disable_device(pdev); 6136 pci_disable_device(pdev);
6137 kfree(sis_fb_info); 6137 framebuffer_release(sis_fb_info);
6138 return ret; 6138 return ret;
6139 } 6139 }
6140 6140
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
index eabaad765aeb..eec9dcb7f599 100644
--- a/drivers/video/stifb.c
+++ b/drivers/video/stifb.c
@@ -1380,7 +1380,7 @@ stifb_cleanup(void)
1380 if (info->screen_base) 1380 if (info->screen_base)
1381 iounmap(info->screen_base); 1381 iounmap(info->screen_base);
1382 fb_dealloc_cmap(&info->cmap); 1382 fb_dealloc_cmap(&info->cmap);
1383 kfree(info); 1383 framebuffer_release(info);
1384 } 1384 }
1385 sti->info = NULL; 1385 sti->info = NULL;
1386 } 1386 }
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index 643afbfe8277..45b883598bf0 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -116,17 +116,16 @@ struct tcx_par {
116 u32 flags; 116 u32 flags;
117#define TCX_FLAG_BLANKED 0x00000001 117#define TCX_FLAG_BLANKED 0x00000001
118 118
119 unsigned long physbase;
120 unsigned long which_io; 119 unsigned long which_io;
121 unsigned long fbsize;
122 120
123 struct sbus_mmap_map mmap_map[TCX_MMAP_ENTRIES]; 121 struct sbus_mmap_map mmap_map[TCX_MMAP_ENTRIES];
124 int lowdepth; 122 int lowdepth;
125}; 123};
126 124
127/* Reset control plane so that WID is 8-bit plane. */ 125/* Reset control plane so that WID is 8-bit plane. */
128static void __tcx_set_control_plane(struct tcx_par *par) 126static void __tcx_set_control_plane(struct fb_info *info)
129{ 127{
128 struct tcx_par *par = info->par;
130 u32 __iomem *p, *pend; 129 u32 __iomem *p, *pend;
131 130
132 if (par->lowdepth) 131 if (par->lowdepth)
@@ -135,7 +134,7 @@ static void __tcx_set_control_plane(struct tcx_par *par)
135 p = par->cplane; 134 p = par->cplane;
136 if (p == NULL) 135 if (p == NULL)
137 return; 136 return;
138 for (pend = p + par->fbsize; p < pend; p++) { 137 for (pend = p + info->fix.smem_len; p < pend; p++) {
139 u32 tmp = sbus_readl(p); 138 u32 tmp = sbus_readl(p);
140 139
141 tmp &= 0xffffff; 140 tmp &= 0xffffff;
@@ -149,7 +148,7 @@ static void tcx_reset(struct fb_info *info)
149 unsigned long flags; 148 unsigned long flags;
150 149
151 spin_lock_irqsave(&par->lock, flags); 150 spin_lock_irqsave(&par->lock, flags);
152 __tcx_set_control_plane(par); 151 __tcx_set_control_plane(info);
153 spin_unlock_irqrestore(&par->lock, flags); 152 spin_unlock_irqrestore(&par->lock, flags);
154} 153}
155 154
@@ -304,7 +303,7 @@ static int tcx_mmap(struct fb_info *info, struct vm_area_struct *vma)
304 struct tcx_par *par = (struct tcx_par *)info->par; 303 struct tcx_par *par = (struct tcx_par *)info->par;
305 304
306 return sbusfb_mmap_helper(par->mmap_map, 305 return sbusfb_mmap_helper(par->mmap_map,
307 par->physbase, par->fbsize, 306 info->fix.smem_start, info->fix.smem_len,
308 par->which_io, vma); 307 par->which_io, vma);
309} 308}
310 309
@@ -316,7 +315,7 @@ static int tcx_ioctl(struct fb_info *info, unsigned int cmd,
316 return sbusfb_ioctl_helper(cmd, arg, info, 315 return sbusfb_ioctl_helper(cmd, arg, info,
317 FBTYPE_TCXCOLOR, 316 FBTYPE_TCXCOLOR,
318 (par->lowdepth ? 8 : 24), 317 (par->lowdepth ? 8 : 24),
319 par->fbsize); 318 info->fix.smem_len);
320} 319}
321 320
322/* 321/*
@@ -358,10 +357,10 @@ static void tcx_unmap_regs(struct of_device *op, struct fb_info *info,
358 par->bt, sizeof(struct bt_regs)); 357 par->bt, sizeof(struct bt_regs));
359 if (par->cplane) 358 if (par->cplane)
360 of_iounmap(&op->resource[4], 359 of_iounmap(&op->resource[4],
361 par->cplane, par->fbsize * sizeof(u32)); 360 par->cplane, info->fix.smem_len * sizeof(u32));
362 if (info->screen_base) 361 if (info->screen_base)
363 of_iounmap(&op->resource[0], 362 of_iounmap(&op->resource[0],
364 info->screen_base, par->fbsize); 363 info->screen_base, info->fix.smem_len);
365} 364}
366 365
367static int __devinit tcx_probe(struct of_device *op, 366static int __devinit tcx_probe(struct of_device *op,
@@ -391,7 +390,7 @@ static int __devinit tcx_probe(struct of_device *op,
391 390
392 linebytes = of_getintprop_default(dp, "linebytes", 391 linebytes = of_getintprop_default(dp, "linebytes",
393 info->var.xres); 392 info->var.xres);
394 par->fbsize = PAGE_ALIGN(linebytes * info->var.yres); 393 info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
395 394
396 par->tec = of_ioremap(&op->resource[7], 0, 395 par->tec = of_ioremap(&op->resource[7], 0,
397 sizeof(struct tcx_tec), "tcx tec"); 396 sizeof(struct tcx_tec), "tcx tec");
@@ -400,7 +399,7 @@ static int __devinit tcx_probe(struct of_device *op,
400 par->bt = of_ioremap(&op->resource[8], 0, 399 par->bt = of_ioremap(&op->resource[8], 0,
401 sizeof(struct bt_regs), "tcx dac"); 400 sizeof(struct bt_regs), "tcx dac");
402 info->screen_base = of_ioremap(&op->resource[0], 0, 401 info->screen_base = of_ioremap(&op->resource[0], 0,
403 par->fbsize, "tcx ram"); 402 info->fix.smem_len, "tcx ram");
404 if (!par->tec || !par->thc || 403 if (!par->tec || !par->thc ||
405 !par->bt || !info->screen_base) 404 !par->bt || !info->screen_base)
406 goto out_unmap_regs; 405 goto out_unmap_regs;
@@ -408,7 +407,7 @@ static int __devinit tcx_probe(struct of_device *op,
408 memcpy(&par->mmap_map, &__tcx_mmap_map, sizeof(par->mmap_map)); 407 memcpy(&par->mmap_map, &__tcx_mmap_map, sizeof(par->mmap_map));
409 if (!par->lowdepth) { 408 if (!par->lowdepth) {
410 par->cplane = of_ioremap(&op->resource[4], 0, 409 par->cplane = of_ioremap(&op->resource[4], 0,
411 par->fbsize * sizeof(u32), 410 info->fix.smem_len * sizeof(u32),
412 "tcx cplane"); 411 "tcx cplane");
413 if (!par->cplane) 412 if (!par->cplane)
414 goto out_unmap_regs; 413 goto out_unmap_regs;
@@ -419,7 +418,7 @@ static int __devinit tcx_probe(struct of_device *op,
419 par->mmap_map[6].size = SBUS_MMAP_EMPTY; 418 par->mmap_map[6].size = SBUS_MMAP_EMPTY;
420 } 419 }
421 420
422 par->physbase = op->resource[0].start; 421 info->fix.smem_start = op->resource[0].start;
423 par->which_io = op->resource[0].flags & IORESOURCE_BITS; 422 par->which_io = op->resource[0].flags & IORESOURCE_BITS;
424 423
425 for (i = 0; i < TCX_MMAP_ENTRIES; i++) { 424 for (i = 0; i < TCX_MMAP_ENTRIES; i++) {
@@ -473,7 +472,7 @@ static int __devinit tcx_probe(struct of_device *op,
473 printk(KERN_INFO "%s: TCX at %lx:%lx, %s\n", 472 printk(KERN_INFO "%s: TCX at %lx:%lx, %s\n",
474 dp->full_name, 473 dp->full_name,
475 par->which_io, 474 par->which_io,
476 par->physbase, 475 info->fix.smem_start,
477 par->lowdepth ? "8-bit only" : "24-bit depth"); 476 par->lowdepth ? "8-bit only" : "24-bit depth");
478 477
479 return 0; 478 return 0;
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index d6856f43d241..bd37ee1f6a25 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -174,8 +174,17 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
174 return err; 174 return err;
175} 175}
176 176
177static void vesafb_destroy(struct fb_info *info)
178{
179 if (info->screen_base)
180 iounmap(info->screen_base);
181 release_mem_region(info->aperture_base, info->aperture_size);
182 framebuffer_release(info);
183}
184
177static struct fb_ops vesafb_ops = { 185static struct fb_ops vesafb_ops = {
178 .owner = THIS_MODULE, 186 .owner = THIS_MODULE,
187 .fb_destroy = vesafb_destroy,
179 .fb_setcolreg = vesafb_setcolreg, 188 .fb_setcolreg = vesafb_setcolreg,
180 .fb_pan_display = vesafb_pan_display, 189 .fb_pan_display = vesafb_pan_display,
181 .fb_fillrect = cfb_fillrect, 190 .fb_fillrect = cfb_fillrect,
@@ -286,6 +295,10 @@ static int __init vesafb_probe(struct platform_device *dev)
286 info->pseudo_palette = info->par; 295 info->pseudo_palette = info->par;
287 info->par = NULL; 296 info->par = NULL;
288 297
298 /* set vesafb aperture size for generic probing */
299 info->aperture_base = screen_info.lfb_base;
300 info->aperture_size = size_total;
301
289 info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len); 302 info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len);
290 if (!info->screen_base) { 303 if (!info->screen_base) {
291 printk(KERN_ERR 304 printk(KERN_ERR
@@ -437,7 +450,7 @@ static int __init vesafb_probe(struct platform_device *dev)
437 info->fbops = &vesafb_ops; 450 info->fbops = &vesafb_ops;
438 info->var = vesafb_defined; 451 info->var = vesafb_defined;
439 info->fix = vesafb_fix; 452 info->fix = vesafb_fix;
440 info->flags = FBINFO_FLAG_DEFAULT | 453 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
441 (ypan ? FBINFO_HWACCEL_YPAN : 0); 454 (ypan ? FBINFO_HWACCEL_YPAN : 0);
442 455
443 if (!ypan) 456 if (!ypan)
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 2493f05e9f61..15502d5e3641 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -384,7 +384,7 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
384 fb_size = XENFB_DEFAULT_FB_LEN; 384 fb_size = XENFB_DEFAULT_FB_LEN;
385 } 385 }
386 386
387 dev->dev.driver_data = info; 387 dev_set_drvdata(&dev->dev, info);
388 info->xbdev = dev; 388 info->xbdev = dev;
389 info->irq = -1; 389 info->irq = -1;
390 info->x1 = info->y1 = INT_MAX; 390 info->x1 = info->y1 = INT_MAX;
@@ -503,7 +503,7 @@ xenfb_make_preferred_console(void)
503 503
504static int xenfb_resume(struct xenbus_device *dev) 504static int xenfb_resume(struct xenbus_device *dev)
505{ 505{
506 struct xenfb_info *info = dev->dev.driver_data; 506 struct xenfb_info *info = dev_get_drvdata(&dev->dev);
507 507
508 xenfb_disconnect_backend(info); 508 xenfb_disconnect_backend(info);
509 xenfb_init_shared_page(info, info->fb_info); 509 xenfb_init_shared_page(info, info->fb_info);
@@ -512,7 +512,7 @@ static int xenfb_resume(struct xenbus_device *dev)
512 512
513static int xenfb_remove(struct xenbus_device *dev) 513static int xenfb_remove(struct xenbus_device *dev)
514{ 514{
515 struct xenfb_info *info = dev->dev.driver_data; 515 struct xenfb_info *info = dev_get_drvdata(&dev->dev);
516 516
517 xenfb_disconnect_backend(info); 517 xenfb_disconnect_backend(info);
518 if (info->fb_info) { 518 if (info->fb_info) {
@@ -621,7 +621,7 @@ static void xenfb_disconnect_backend(struct xenfb_info *info)
621static void xenfb_backend_changed(struct xenbus_device *dev, 621static void xenfb_backend_changed(struct xenbus_device *dev,
622 enum xenbus_state backend_state) 622 enum xenbus_state backend_state)
623{ 623{
624 struct xenfb_info *info = dev->dev.driver_data; 624 struct xenfb_info *info = dev_get_drvdata(&dev->dev);
625 int val; 625 int val;
626 626
627 switch (backend_state) { 627 switch (backend_state) {
diff --git a/drivers/vlynq/Kconfig b/drivers/vlynq/Kconfig
new file mode 100644
index 000000000000..f6542211db48
--- /dev/null
+++ b/drivers/vlynq/Kconfig
@@ -0,0 +1,20 @@
1menu "TI VLYNQ"
2
3config VLYNQ
4 bool "TI VLYNQ bus support"
5 depends on AR7 && EXPERIMENTAL
6 help
7 Support for Texas Instruments(R) VLYNQ bus.
8 The VLYNQ bus is a high-speed, serial and packetized
9 data bus which allows external peripherals of a SoC
10 to appear into the system's main memory.
11
12 If unsure, say N
13
14config VLYNQ_DEBUG
15 bool "VLYNQ bus debug"
16 depends on VLYNQ && KERNEL_DEBUG
17 help
18 Turn on VLYNQ bus debugging.
19
20endmenu
diff --git a/drivers/vlynq/Makefile b/drivers/vlynq/Makefile
new file mode 100644
index 000000000000..b3f61149b599
--- /dev/null
+++ b/drivers/vlynq/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for kernel vlynq drivers
3#
4
5obj-$(CONFIG_VLYNQ) += vlynq.o
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c
new file mode 100644
index 000000000000..7335433b067b
--- /dev/null
+++ b/drivers/vlynq/vlynq.c
@@ -0,0 +1,814 @@
1/*
2 * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 *
18 * Parts of the VLYNQ specification can be found here:
19 * http://www.ti.com/litv/pdf/sprue36a
20 */
21
22#include <linux/init.h>
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/string.h>
26#include <linux/device.h>
27#include <linux/module.h>
28#include <linux/errno.h>
29#include <linux/platform_device.h>
30#include <linux/interrupt.h>
31#include <linux/device.h>
32#include <linux/delay.h>
33#include <linux/io.h>
34
35#include <linux/vlynq.h>
36
37#define VLYNQ_CTRL_PM_ENABLE 0x80000000
38#define VLYNQ_CTRL_CLOCK_INT 0x00008000
39#define VLYNQ_CTRL_CLOCK_DIV(x) (((x) & 7) << 16)
40#define VLYNQ_CTRL_INT_LOCAL 0x00004000
41#define VLYNQ_CTRL_INT_ENABLE 0x00002000
42#define VLYNQ_CTRL_INT_VECTOR(x) (((x) & 0x1f) << 8)
43#define VLYNQ_CTRL_INT2CFG 0x00000080
44#define VLYNQ_CTRL_RESET 0x00000001
45
46#define VLYNQ_CTRL_CLOCK_MASK (0x7 << 16)
47
48#define VLYNQ_INT_OFFSET 0x00000014
49#define VLYNQ_REMOTE_OFFSET 0x00000080
50
51#define VLYNQ_STATUS_LINK 0x00000001
52#define VLYNQ_STATUS_LERROR 0x00000080
53#define VLYNQ_STATUS_RERROR 0x00000100
54
55#define VINT_ENABLE 0x00000100
56#define VINT_TYPE_EDGE 0x00000080
57#define VINT_LEVEL_LOW 0x00000040
58#define VINT_VECTOR(x) ((x) & 0x1f)
59#define VINT_OFFSET(irq) (8 * ((irq) % 4))
60
61#define VLYNQ_AUTONEGO_V2 0x00010000
62
63struct vlynq_regs {
64 u32 revision;
65 u32 control;
66 u32 status;
67 u32 int_prio;
68 u32 int_status;
69 u32 int_pending;
70 u32 int_ptr;
71 u32 tx_offset;
72 struct vlynq_mapping rx_mapping[4];
73 u32 chip;
74 u32 autonego;
75 u32 unused[6];
76 u32 int_device[8];
77};
78
79#ifdef VLYNQ_DEBUG
80static void vlynq_dump_regs(struct vlynq_device *dev)
81{
82 int i;
83
84 printk(KERN_DEBUG "VLYNQ local=%p remote=%p\n",
85 dev->local, dev->remote);
86 for (i = 0; i < 32; i++) {
87 printk(KERN_DEBUG "VLYNQ: local %d: %08x\n",
88 i + 1, ((u32 *)dev->local)[i]);
89 printk(KERN_DEBUG "VLYNQ: remote %d: %08x\n",
90 i + 1, ((u32 *)dev->remote)[i]);
91 }
92}
93
94static void vlynq_dump_mem(u32 *base, int count)
95{
96 int i;
97
98 for (i = 0; i < (count + 3) / 4; i++) {
99 if (i % 4 == 0)
100 printk(KERN_DEBUG "\nMEM[0x%04x]:", i * 4);
101 printk(KERN_DEBUG " 0x%08x", *(base + i));
102 }
103 printk(KERN_DEBUG "\n");
104}
105#endif
106
107/* Check the VLYNQ link status with a given device */
108static int vlynq_linked(struct vlynq_device *dev)
109{
110 int i;
111
112 for (i = 0; i < 100; i++)
113 if (readl(&dev->local->status) & VLYNQ_STATUS_LINK)
114 return 1;
115 else
116 cpu_relax();
117
118 return 0;
119}
120
121static void vlynq_reset(struct vlynq_device *dev)
122{
123 writel(readl(&dev->local->control) | VLYNQ_CTRL_RESET,
124 &dev->local->control);
125
126 /* Wait for the devices to finish resetting */
127 msleep(5);
128
129 /* Remove reset bit */
130 writel(readl(&dev->local->control) & ~VLYNQ_CTRL_RESET,
131 &dev->local->control);
132
133 /* Give some time for the devices to settle */
134 msleep(5);
135}
136
137static void vlynq_irq_unmask(unsigned int irq)
138{
139 u32 val;
140 struct vlynq_device *dev = get_irq_chip_data(irq);
141 int virq;
142
143 BUG_ON(!dev);
144 virq = irq - dev->irq_start;
145 val = readl(&dev->remote->int_device[virq >> 2]);
146 val |= (VINT_ENABLE | virq) << VINT_OFFSET(virq);
147 writel(val, &dev->remote->int_device[virq >> 2]);
148}
149
150static void vlynq_irq_mask(unsigned int irq)
151{
152 u32 val;
153 struct vlynq_device *dev = get_irq_chip_data(irq);
154 int virq;
155
156 BUG_ON(!dev);
157 virq = irq - dev->irq_start;
158 val = readl(&dev->remote->int_device[virq >> 2]);
159 val &= ~(VINT_ENABLE << VINT_OFFSET(virq));
160 writel(val, &dev->remote->int_device[virq >> 2]);
161}
162
163static int vlynq_irq_type(unsigned int irq, unsigned int flow_type)
164{
165 u32 val;
166 struct vlynq_device *dev = get_irq_chip_data(irq);
167 int virq;
168
169 BUG_ON(!dev);
170 virq = irq - dev->irq_start;
171 val = readl(&dev->remote->int_device[virq >> 2]);
172 switch (flow_type & IRQ_TYPE_SENSE_MASK) {
173 case IRQ_TYPE_EDGE_RISING:
174 case IRQ_TYPE_EDGE_FALLING:
175 case IRQ_TYPE_EDGE_BOTH:
176 val |= VINT_TYPE_EDGE << VINT_OFFSET(virq);
177 val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq));
178 break;
179 case IRQ_TYPE_LEVEL_HIGH:
180 val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq));
181 val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq));
182 break;
183 case IRQ_TYPE_LEVEL_LOW:
184 val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq));
185 val |= VINT_LEVEL_LOW << VINT_OFFSET(virq);
186 break;
187 default:
188 return -EINVAL;
189 }
190 writel(val, &dev->remote->int_device[virq >> 2]);
191 return 0;
192}
193
194static void vlynq_local_ack(unsigned int irq)
195{
196 struct vlynq_device *dev = get_irq_chip_data(irq);
197
198 u32 status = readl(&dev->local->status);
199
200 pr_debug("%s: local status: 0x%08x\n",
201 dev_name(&dev->dev), status);
202 writel(status, &dev->local->status);
203}
204
205static void vlynq_remote_ack(unsigned int irq)
206{
207 struct vlynq_device *dev = get_irq_chip_data(irq);
208
209 u32 status = readl(&dev->remote->status);
210
211 pr_debug("%s: remote status: 0x%08x\n",
212 dev_name(&dev->dev), status);
213 writel(status, &dev->remote->status);
214}
215
216static irqreturn_t vlynq_irq(int irq, void *dev_id)
217{
218 struct vlynq_device *dev = dev_id;
219 u32 status;
220 int virq = 0;
221
222 status = readl(&dev->local->int_status);
223 writel(status, &dev->local->int_status);
224
225 if (unlikely(!status))
226 spurious_interrupt();
227
228 while (status) {
229 if (status & 1)
230 do_IRQ(dev->irq_start + virq);
231 status >>= 1;
232 virq++;
233 }
234
235 return IRQ_HANDLED;
236}
237
238static struct irq_chip vlynq_irq_chip = {
239 .name = "vlynq",
240 .unmask = vlynq_irq_unmask,
241 .mask = vlynq_irq_mask,
242 .set_type = vlynq_irq_type,
243};
244
245static struct irq_chip vlynq_local_chip = {
246 .name = "vlynq local error",
247 .unmask = vlynq_irq_unmask,
248 .mask = vlynq_irq_mask,
249 .ack = vlynq_local_ack,
250};
251
252static struct irq_chip vlynq_remote_chip = {
253 .name = "vlynq local error",
254 .unmask = vlynq_irq_unmask,
255 .mask = vlynq_irq_mask,
256 .ack = vlynq_remote_ack,
257};
258
259static int vlynq_setup_irq(struct vlynq_device *dev)
260{
261 u32 val;
262 int i, virq;
263
264 if (dev->local_irq == dev->remote_irq) {
265 printk(KERN_ERR
266 "%s: local vlynq irq should be different from remote\n",
267 dev_name(&dev->dev));
268 return -EINVAL;
269 }
270
271 /* Clear local and remote error bits */
272 writel(readl(&dev->local->status), &dev->local->status);
273 writel(readl(&dev->remote->status), &dev->remote->status);
274
275 /* Now setup interrupts */
276 val = VLYNQ_CTRL_INT_VECTOR(dev->local_irq);
277 val |= VLYNQ_CTRL_INT_ENABLE | VLYNQ_CTRL_INT_LOCAL |
278 VLYNQ_CTRL_INT2CFG;
279 val |= readl(&dev->local->control);
280 writel(VLYNQ_INT_OFFSET, &dev->local->int_ptr);
281 writel(val, &dev->local->control);
282
283 val = VLYNQ_CTRL_INT_VECTOR(dev->remote_irq);
284 val |= VLYNQ_CTRL_INT_ENABLE;
285 val |= readl(&dev->remote->control);
286 writel(VLYNQ_INT_OFFSET, &dev->remote->int_ptr);
287 writel(val, &dev->remote->int_ptr);
288 writel(val, &dev->remote->control);
289
290 for (i = dev->irq_start; i <= dev->irq_end; i++) {
291 virq = i - dev->irq_start;
292 if (virq == dev->local_irq) {
293 set_irq_chip_and_handler(i, &vlynq_local_chip,
294 handle_level_irq);
295 set_irq_chip_data(i, dev);
296 } else if (virq == dev->remote_irq) {
297 set_irq_chip_and_handler(i, &vlynq_remote_chip,
298 handle_level_irq);
299 set_irq_chip_data(i, dev);
300 } else {
301 set_irq_chip_and_handler(i, &vlynq_irq_chip,
302 handle_simple_irq);
303 set_irq_chip_data(i, dev);
304 writel(0, &dev->remote->int_device[virq >> 2]);
305 }
306 }
307
308 if (request_irq(dev->irq, vlynq_irq, IRQF_SHARED, "vlynq", dev)) {
309 printk(KERN_ERR "%s: request_irq failed\n",
310 dev_name(&dev->dev));
311 return -EAGAIN;
312 }
313
314 return 0;
315}
316
317static void vlynq_device_release(struct device *dev)
318{
319 struct vlynq_device *vdev = to_vlynq_device(dev);
320 kfree(vdev);
321}
322
323static int vlynq_device_match(struct device *dev,
324 struct device_driver *drv)
325{
326 struct vlynq_device *vdev = to_vlynq_device(dev);
327 struct vlynq_driver *vdrv = to_vlynq_driver(drv);
328 struct vlynq_device_id *ids = vdrv->id_table;
329
330 while (ids->id) {
331 if (ids->id == vdev->dev_id) {
332 vdev->divisor = ids->divisor;
333 vlynq_set_drvdata(vdev, ids);
334 printk(KERN_INFO "Driver found for VLYNQ "
335 "device: %08x\n", vdev->dev_id);
336 return 1;
337 }
338 printk(KERN_DEBUG "Not using the %08x VLYNQ device's driver"
339 " for VLYNQ device: %08x\n", ids->id, vdev->dev_id);
340 ids++;
341 }
342 return 0;
343}
344
345static int vlynq_device_probe(struct device *dev)
346{
347 struct vlynq_device *vdev = to_vlynq_device(dev);
348 struct vlynq_driver *drv = to_vlynq_driver(dev->driver);
349 struct vlynq_device_id *id = vlynq_get_drvdata(vdev);
350 int result = -ENODEV;
351
352 if (drv->probe)
353 result = drv->probe(vdev, id);
354 if (result)
355 put_device(dev);
356 return result;
357}
358
359static int vlynq_device_remove(struct device *dev)
360{
361 struct vlynq_driver *drv = to_vlynq_driver(dev->driver);
362
363 if (drv->remove)
364 drv->remove(to_vlynq_device(dev));
365
366 return 0;
367}
368
369int __vlynq_register_driver(struct vlynq_driver *driver, struct module *owner)
370{
371 driver->driver.name = driver->name;
372 driver->driver.bus = &vlynq_bus_type;
373 return driver_register(&driver->driver);
374}
375EXPORT_SYMBOL(__vlynq_register_driver);
376
377void vlynq_unregister_driver(struct vlynq_driver *driver)
378{
379 driver_unregister(&driver->driver);
380}
381EXPORT_SYMBOL(vlynq_unregister_driver);
382
383/*
384 * A VLYNQ remote device can clock the VLYNQ bus master
385 * using a dedicated clock line. In that case, both the
386 * remove device and the bus master should have the same
387 * serial clock dividers configured. Iterate through the
388 * 8 possible dividers until we actually link with the
389 * device.
390 */
391static int __vlynq_try_remote(struct vlynq_device *dev)
392{
393 int i;
394
395 vlynq_reset(dev);
396 for (i = dev->dev_id ? vlynq_rdiv2 : vlynq_rdiv8; dev->dev_id ?
397 i <= vlynq_rdiv8 : i >= vlynq_rdiv2;
398 dev->dev_id ? i++ : i--) {
399
400 if (!vlynq_linked(dev))
401 break;
402
403 writel((readl(&dev->remote->control) &
404 ~VLYNQ_CTRL_CLOCK_MASK) |
405 VLYNQ_CTRL_CLOCK_INT |
406 VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1),
407 &dev->remote->control);
408 writel((readl(&dev->local->control)
409 & ~(VLYNQ_CTRL_CLOCK_INT |
410 VLYNQ_CTRL_CLOCK_MASK)) |
411 VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1),
412 &dev->local->control);
413
414 if (vlynq_linked(dev)) {
415 printk(KERN_DEBUG
416 "%s: using remote clock divisor %d\n",
417 dev_name(&dev->dev), i - vlynq_rdiv1 + 1);
418 dev->divisor = i;
419 return 0;
420 } else {
421 vlynq_reset(dev);
422 }
423 }
424
425 return -ENODEV;
426}
427
428/*
429 * A VLYNQ remote device can be clocked by the VLYNQ bus
430 * master using a dedicated clock line. In that case, only
431 * the bus master configures the serial clock divider.
432 * Iterate through the 8 possible dividers until we
433 * actually get a link with the device.
434 */
435static int __vlynq_try_local(struct vlynq_device *dev)
436{
437 int i;
438
439 vlynq_reset(dev);
440
441 for (i = dev->dev_id ? vlynq_ldiv2 : vlynq_ldiv8; dev->dev_id ?
442 i <= vlynq_ldiv8 : i >= vlynq_ldiv2;
443 dev->dev_id ? i++ : i--) {
444
445 writel((readl(&dev->local->control) &
446 ~VLYNQ_CTRL_CLOCK_MASK) |
447 VLYNQ_CTRL_CLOCK_INT |
448 VLYNQ_CTRL_CLOCK_DIV(i - vlynq_ldiv1),
449 &dev->local->control);
450
451 if (vlynq_linked(dev)) {
452 printk(KERN_DEBUG
453 "%s: using local clock divisor %d\n",
454 dev_name(&dev->dev), i - vlynq_ldiv1 + 1);
455 dev->divisor = i;
456 return 0;
457 } else {
458 vlynq_reset(dev);
459 }
460 }
461
462 return -ENODEV;
463}
464
465/*
466 * When using external clocking method, serial clock
467 * is supplied by an external oscillator, therefore we
468 * should mask the local clock bit in the clock control
469 * register for both the bus master and the remote device.
470 */
471static int __vlynq_try_external(struct vlynq_device *dev)
472{
473 vlynq_reset(dev);
474 if (!vlynq_linked(dev))
475 return -ENODEV;
476
477 writel((readl(&dev->remote->control) &
478 ~VLYNQ_CTRL_CLOCK_INT),
479 &dev->remote->control);
480
481 writel((readl(&dev->local->control) &
482 ~VLYNQ_CTRL_CLOCK_INT),
483 &dev->local->control);
484
485 if (vlynq_linked(dev)) {
486 printk(KERN_DEBUG "%s: using external clock\n",
487 dev_name(&dev->dev));
488 dev->divisor = vlynq_div_external;
489 return 0;
490 }
491
492 return -ENODEV;
493}
494
495static int __vlynq_enable_device(struct vlynq_device *dev)
496{
497 int result;
498 struct plat_vlynq_ops *ops = dev->dev.platform_data;
499
500 result = ops->on(dev);
501 if (result)
502 return result;
503
504 switch (dev->divisor) {
505 case vlynq_div_external:
506 case vlynq_div_auto:
507 /* When the device is brought from reset it should have clock
508 * generation negotiated by hardware.
509 * Check which device is generating clocks and perform setup
510 * accordingly */
511 if (vlynq_linked(dev) && readl(&dev->remote->control) &
512 VLYNQ_CTRL_CLOCK_INT) {
513 if (!__vlynq_try_remote(dev) ||
514 !__vlynq_try_local(dev) ||
515 !__vlynq_try_external(dev))
516 return 0;
517 } else {
518 if (!__vlynq_try_external(dev) ||
519 !__vlynq_try_local(dev) ||
520 !__vlynq_try_remote(dev))
521 return 0;
522 }
523 break;
524 case vlynq_ldiv1:
525 case vlynq_ldiv2:
526 case vlynq_ldiv3:
527 case vlynq_ldiv4:
528 case vlynq_ldiv5:
529 case vlynq_ldiv6:
530 case vlynq_ldiv7:
531 case vlynq_ldiv8:
532 writel(VLYNQ_CTRL_CLOCK_INT |
533 VLYNQ_CTRL_CLOCK_DIV(dev->divisor -
534 vlynq_ldiv1), &dev->local->control);
535 writel(0, &dev->remote->control);
536 if (vlynq_linked(dev)) {
537 printk(KERN_DEBUG
538 "%s: using local clock divisor %d\n",
539 dev_name(&dev->dev),
540 dev->divisor - vlynq_ldiv1 + 1);
541 return 0;
542 }
543 break;
544 case vlynq_rdiv1:
545 case vlynq_rdiv2:
546 case vlynq_rdiv3:
547 case vlynq_rdiv4:
548 case vlynq_rdiv5:
549 case vlynq_rdiv6:
550 case vlynq_rdiv7:
551 case vlynq_rdiv8:
552 writel(0, &dev->local->control);
553 writel(VLYNQ_CTRL_CLOCK_INT |
554 VLYNQ_CTRL_CLOCK_DIV(dev->divisor -
555 vlynq_rdiv1), &dev->remote->control);
556 if (vlynq_linked(dev)) {
557 printk(KERN_DEBUG
558 "%s: using remote clock divisor %d\n",
559 dev_name(&dev->dev),
560 dev->divisor - vlynq_rdiv1 + 1);
561 return 0;
562 }
563 break;
564 }
565
566 ops->off(dev);
567 return -ENODEV;
568}
569
570int vlynq_enable_device(struct vlynq_device *dev)
571{
572 struct plat_vlynq_ops *ops = dev->dev.platform_data;
573 int result = -ENODEV;
574
575 result = __vlynq_enable_device(dev);
576 if (result)
577 return result;
578
579 result = vlynq_setup_irq(dev);
580 if (result)
581 ops->off(dev);
582
583 dev->enabled = !result;
584 return result;
585}
586EXPORT_SYMBOL(vlynq_enable_device);
587
588
589void vlynq_disable_device(struct vlynq_device *dev)
590{
591 struct plat_vlynq_ops *ops = dev->dev.platform_data;
592
593 dev->enabled = 0;
594 free_irq(dev->irq, dev);
595 ops->off(dev);
596}
597EXPORT_SYMBOL(vlynq_disable_device);
598
599int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset,
600 struct vlynq_mapping *mapping)
601{
602 int i;
603
604 if (!dev->enabled)
605 return -ENXIO;
606
607 writel(tx_offset, &dev->local->tx_offset);
608 for (i = 0; i < 4; i++) {
609 writel(mapping[i].offset, &dev->local->rx_mapping[i].offset);
610 writel(mapping[i].size, &dev->local->rx_mapping[i].size);
611 }
612 return 0;
613}
614EXPORT_SYMBOL(vlynq_set_local_mapping);
615
616int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset,
617 struct vlynq_mapping *mapping)
618{
619 int i;
620
621 if (!dev->enabled)
622 return -ENXIO;
623
624 writel(tx_offset, &dev->remote->tx_offset);
625 for (i = 0; i < 4; i++) {
626 writel(mapping[i].offset, &dev->remote->rx_mapping[i].offset);
627 writel(mapping[i].size, &dev->remote->rx_mapping[i].size);
628 }
629 return 0;
630}
631EXPORT_SYMBOL(vlynq_set_remote_mapping);
632
633int vlynq_set_local_irq(struct vlynq_device *dev, int virq)
634{
635 int irq = dev->irq_start + virq;
636 if (dev->enabled)
637 return -EBUSY;
638
639 if ((irq < dev->irq_start) || (irq > dev->irq_end))
640 return -EINVAL;
641
642 if (virq == dev->remote_irq)
643 return -EINVAL;
644
645 dev->local_irq = virq;
646
647 return 0;
648}
649EXPORT_SYMBOL(vlynq_set_local_irq);
650
651int vlynq_set_remote_irq(struct vlynq_device *dev, int virq)
652{
653 int irq = dev->irq_start + virq;
654 if (dev->enabled)
655 return -EBUSY;
656
657 if ((irq < dev->irq_start) || (irq > dev->irq_end))
658 return -EINVAL;
659
660 if (virq == dev->local_irq)
661 return -EINVAL;
662
663 dev->remote_irq = virq;
664
665 return 0;
666}
667EXPORT_SYMBOL(vlynq_set_remote_irq);
668
669static int vlynq_probe(struct platform_device *pdev)
670{
671 struct vlynq_device *dev;
672 struct resource *regs_res, *mem_res, *irq_res;
673 int len, result;
674
675 regs_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
676 if (!regs_res)
677 return -ENODEV;
678
679 mem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
680 if (!mem_res)
681 return -ENODEV;
682
683 irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "devirq");
684 if (!irq_res)
685 return -ENODEV;
686
687 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
688 if (!dev) {
689 printk(KERN_ERR
690 "vlynq: failed to allocate device structure\n");
691 return -ENOMEM;
692 }
693
694 dev->id = pdev->id;
695 dev->dev.bus = &vlynq_bus_type;
696 dev->dev.parent = &pdev->dev;
697 dev_set_name(&dev->dev, "vlynq%d", dev->id);
698 dev->dev.platform_data = pdev->dev.platform_data;
699 dev->dev.release = vlynq_device_release;
700
701 dev->regs_start = regs_res->start;
702 dev->regs_end = regs_res->end;
703 dev->mem_start = mem_res->start;
704 dev->mem_end = mem_res->end;
705
706 len = regs_res->end - regs_res->start;
707 if (!request_mem_region(regs_res->start, len, dev_name(&dev->dev))) {
708 printk(KERN_ERR "%s: Can't request vlynq registers\n",
709 dev_name(&dev->dev));
710 result = -ENXIO;
711 goto fail_request;
712 }
713
714 dev->local = ioremap(regs_res->start, len);
715 if (!dev->local) {
716 printk(KERN_ERR "%s: Can't remap vlynq registers\n",
717 dev_name(&dev->dev));
718 result = -ENXIO;
719 goto fail_remap;
720 }
721
722 dev->remote = (struct vlynq_regs *)((void *)dev->local +
723 VLYNQ_REMOTE_OFFSET);
724
725 dev->irq = platform_get_irq_byname(pdev, "irq");
726 dev->irq_start = irq_res->start;
727 dev->irq_end = irq_res->end;
728 dev->local_irq = dev->irq_end - dev->irq_start;
729 dev->remote_irq = dev->local_irq - 1;
730
731 if (device_register(&dev->dev))
732 goto fail_register;
733 platform_set_drvdata(pdev, dev);
734
735 printk(KERN_INFO "%s: regs 0x%p, irq %d, mem 0x%p\n",
736 dev_name(&dev->dev), (void *)dev->regs_start, dev->irq,
737 (void *)dev->mem_start);
738
739 dev->dev_id = 0;
740 dev->divisor = vlynq_div_auto;
741 result = __vlynq_enable_device(dev);
742 if (result == 0) {
743 dev->dev_id = readl(&dev->remote->chip);
744 ((struct plat_vlynq_ops *)(dev->dev.platform_data))->off(dev);
745 }
746 if (dev->dev_id)
747 printk(KERN_INFO "Found a VLYNQ device: %08x\n", dev->dev_id);
748
749 return 0;
750
751fail_register:
752 iounmap(dev->local);
753fail_remap:
754fail_request:
755 release_mem_region(regs_res->start, len);
756 kfree(dev);
757 return result;
758}
759
760static int vlynq_remove(struct platform_device *pdev)
761{
762 struct vlynq_device *dev = platform_get_drvdata(pdev);
763
764 device_unregister(&dev->dev);
765 iounmap(dev->local);
766 release_mem_region(dev->regs_start, dev->regs_end - dev->regs_start);
767
768 kfree(dev);
769
770 return 0;
771}
772
773static struct platform_driver vlynq_platform_driver = {
774 .driver.name = "vlynq",
775 .probe = vlynq_probe,
776 .remove = __devexit_p(vlynq_remove),
777};
778
779struct bus_type vlynq_bus_type = {
780 .name = "vlynq",
781 .match = vlynq_device_match,
782 .probe = vlynq_device_probe,
783 .remove = vlynq_device_remove,
784};
785EXPORT_SYMBOL(vlynq_bus_type);
786
787static int __devinit vlynq_init(void)
788{
789 int res = 0;
790
791 res = bus_register(&vlynq_bus_type);
792 if (res)
793 goto fail_bus;
794
795 res = platform_driver_register(&vlynq_platform_driver);
796 if (res)
797 goto fail_platform;
798
799 return 0;
800
801fail_platform:
802 bus_unregister(&vlynq_bus_type);
803fail_bus:
804 return res;
805}
806
807static void __devexit vlynq_exit(void)
808{
809 platform_driver_unregister(&vlynq_platform_driver);
810 bus_unregister(&vlynq_bus_type);
811}
812
813module_init(vlynq_init);
814module_exit(vlynq_exit);